source
stringlengths
3
92
original_c
stringlengths
26
2.25M
no_omp_formatted
stringlengths
0
2.25M
omp_formatted
stringlengths
0
2.25M
raytracer.h
#pragma once #include <algorithm> #include <memory> #include <set> #include <chrono> #include <glm/glm.hpp> #include "camera.h" #include "entities.h" #include "image.h" #include "octree.h" #include "omp.h" #include <ctime> #include "util.h" #include "halton_enum.h" #include "halton_sampler.h" #include "photonMap.h" //#define DEBUG_OCTREE class RayTracer { public: int p = 0; int width, height; RayTracer() = delete; RayTracer(const Camera& camera) : _camera(camera), _image(std::make_shared<Image>(0, 0)) { }; void setScene(Octree* scene) { _scene = scene; _photon_map = new PhotonMap(_scene->_root._bbox.min, _scene->_root._bbox.max); } void run(int w, int h) { std::cout << "starting raytracer with frame size: " << w << ", " << h << "\n"; srand(std::time(0)); Halton_sampler sampler; sampler.init_faure(); Halton_enum halton_enum(w, h); _image = std::make_shared<Image>(w, h); width = w; height = h; if (!_scene->valid) { _scene->rebuild(); } if (!_photon_map->valid) { auto start = std::chrono::high_resolution_clock::now(); std::cout << "emitting photons...\n"; tracePhotons(5, photons, sampler, halton_enum); auto end = std::chrono::high_resolution_clock::now(); std::cout << "photon time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count()/1000.0 << " s\n"; _photon_map->rebuild(); //_scene->rebuild(); } double sensorHalfWidth = (_camera.sensorDiag*w)/(sqrt((double)w*w + h*h)); double sensorHalfHeight = sensorHalfWidth * ((double)h/w); glm::dvec3 screenCenter = _camera.pos + _camera.focalDist*_camera.forward; glm::dvec3 cameraRight = glm::normalize(glm::cross(_camera.forward, _camera.up)); std::cout << cameraRight.x << ", " << cameraRight.y << ", " << cameraRight.z << ", " << "\n"; double avgTests = 0; std::vector<double> vars; vars.reserve(w*h); std::vector<double> xrand; subrand(xrand, 25000); std::vector<double> yrand; subrand(yrand, 25000); // The structure of the for loop should remain for incremental rendering. #pragma omp parallel for schedule(dynamic, 10) //OpenMP for (int y = 0; y < h; ++y) { srand(std::time(0) + rand()); if(_running) { for (int x = 0; x < w; ++x) { glm::dvec3 color(0.5, 0.5, 0.5); glm::dvec3 lastCol(0, 0, 0); double var = 0; int samps = 0; int s = 0; while (s < max_samples && samps < min_samples) { lastCol = color; int idx = halton_enum.get_index(s, x, y); double xr = sampler.sample(0, idx); double yr = sampler.sample(1, idx); //xr = .2*fmod(halton_enum.scale_x(xr), 1.0) + .8*1*xrand[((x + w*y)*max_samples + s) % xrand.size()];// + 0*drand(); //yr = .2*fmod(halton_enum.scale_y(yr), 1.0) + .8*1*yrand[((x + w*y)*max_samples + s) % yrand.size()];// + 0*drand(); //std::cout << (xr - yr) << "\n"; double dx = halton_enum.scale_x(xr);// (double)x + AA_JITTER*xrand[((x + w*y)*max_samples + s) % xrand.size()]; double dy = halton_enum.scale_y(yr);// (double)y + AA_JITTER*yrand[((x + w*y)*max_samples + s) % yrand.size()]; glm::dvec3 pixelPos = screenCenter + (sensorHalfWidth*(dx / w - .5))*cameraRight - (sensorHalfHeight*(dy / h - .5))*_camera.up; glm::dvec3 eyePos = _camera.pos + FOCAL_BLUR*(xr - .5)*cameraRight + FOCAL_BLUR*(yr - .5) *_camera.up; Ray ray(eyePos, glm::normalize(pixelPos - eyePos)); if (s == 0) color = radiance(ray, 0, sampler, halton_enum, /*(x + w*y)*SAMPLES + s*/ idx, glm::dvec3(1, 1, 1)); else color = (1.0*s*color + radiance(ray, 0, sampler, halton_enum, /*(x + w*y)*SAMPLES + s*/ idx, glm::dvec3(1, 1, 1)))*(1.0 / (s + 1));// (1.0 / SAMPLES)*radiance(ray, 0); if (s > 0) { var = (1.0*5*var + glm::length(color - lastCol))*(1.0 / (5 + 1)); //var = .5*var + .5*vars[clamp(0, w, x - 1) + w*clamp(0, h, y)]; } if (s > 0 && var > noise_thresh) samps-=2; s++; samps++; } color = gamma(color, 2.2); //vars[x + w*y] = var; #pragma omp critical (im_update) { _image->setPixel(x, y, glm::clamp(color/*1.0*(double)s / max_samples*glm::dvec3(1, 1, 1)*/, 0.0, 1.0)); } } } } avgTests /= w*h; std::cout << "average intersection tests: " << avgTests << "\n"; } glm::dvec3 radiance(const Ray& ray, int depth, const Halton_sampler& halton_sampler, const Halton_enum& halton_enum, int sample, glm::dvec3 contrib) { if (depth > MAX_DEPTH) return glm::dvec3(0, 0, 0); float sx = halton_sampler.sample(2 + 2*depth, sample); float sy = halton_sampler.sample(3 + 2*depth, sample); double offset = SHADOW_BIAS; glm::dvec3 minHit, minNorm; glm::dvec2 minUV; Entity* current; bool backface = false; texture debugTex(glm::dvec3(.5, .5, .5)); texture debugTexEm(glm::dvec3(0, 0, 0)); Material dummyMat(&debugTex, &debugTexEm, 1, 1); sphere debugSphere(glm::dvec3(0, 0, 0), 1, dummyMat); bool intersected = trace(ray, minHit, minNorm, minUV, current); if (intersected) { if (!current) current = &debugSphere; glm::dvec3 i(0, 0, 0); glm::dvec3 refDir; glm::dvec3 color = current->material.diffuse->get(minUV); double roughness = current->material.roughness; //type of secondary ray, 0 for reflection, 1 for refraction, 2 for glossy // int type = rayType(current, ray, minNorm, minUV); glm::dvec3 f(1, 1, 1); secondaryRay(ray, current, minNorm, minUV, sx, sy, refDir, f, roughness, contrib, offset); double tmin = 0; double tmax = glm::length(minHit - ray.origin); if (_scene->atmosphereBounds(ray, tmin, tmax)) { glm::dvec3 hit, col; //std::cout << "atmosphere bounds hit: " << tmin << ", " << tmax << "\n"; if (raymarch(ray, hit, col, tmin, tmax)) { minHit = hit; refDir = randomUnitVec(sx, sy); f = 1.0*col; color = col; contrib = col; roughness = 1; //std::cout << "atmosphere hit\n"; } } for (Light* light : _scene->lights) { bool shadow = false; glm::dvec3 lightDir = light->getPoint(drand(), drand()) - (minHit + SHADOW_BIAS*minNorm); double maxt = vecLengthSquared(lightDir); //double cos_alpha = (light->rad / sqrt(vecLengthSquared(lightDir) + std::pow(light->rad, 2))); double hfrac = 1 / (M_PI*vecLengthSquared(light->pos - minHit)); //fraction of the hemisphere Ray shadow_ray(minHit + SHADOW_BIAS*minNorm, lightDir); shadow = !visible(shadow_ray, maxt); if (!shadow) { double d = glm::dot(minNorm, glm::normalize(light->pos - minHit)); if (d < 0) d = 0; double l = pow(d, (1.0 / roughness)); i = light->col*l*hfrac; } } glm::dvec3 caustic = depth <= 10 ? samplePhotons(minHit, refDir, 32) : glm::dvec3(0, 0, 0); //return color*caustic; // continuation probability double q = compMax(contrib); if (depth <= MIN_DEPTH || drand() < q) { f *= depth <= MIN_DEPTH ? 1.0 : (1.0 / q); //diffuse*direct_light + diffuse*brdf*radiance + emmissive return color*i + f*radiance(Ray(minHit + offset*minNorm, refDir), ++depth, halton_sampler, halton_enum, sample, contrib) + current->material.emissive->get(minUV) + color*caustic; } else return color*i;// glm::dvec3(0, 0, 0);///*1.0*depth / MAX_DEPTH * glm::dvec3(1, 1, 1);//*/ //current->material.diffuse->get(minUV)*i + current->material.emissive->get(minUV); } else return /*1.0*depth / MAX_DEPTH * glm::dvec3(1, 1, 1);//*/ ambient; } //checks if the given ray is visible, meaning that nothing overlaps it bool visible(const Ray& ray, double mt) { bool hit = false; std::vector<Entity*> shadow_objects = _scene->intersect(ray, 0, sqrt(mt)-SHADOW_BIAS); std::vector<Entity*>::iterator shadow_it = shadow_objects.begin(); #ifdef DEBUG_OCTREE return shadow_it == shadow_objects.end(); #endif while (!hit && shadow_it != shadow_objects.end()) { Entity* t = *shadow_it; glm::dvec3 pos, norm; glm::dvec2 uv; if (t->intersect(ray, pos, norm, uv) && (drand() < t->material.getAlpha(uv) || t->material.IOR != 1)) { double t_shadow = vecLengthSquared(pos - ray.origin); hit = (t_shadow < mt)&&(t_shadow > 0); } ++shadow_it; } if (hit) return false; double tmin = 0; double tmax = mt; if (_scene->atmosphereBounds(ray, tmin, tmax)) { glm::dvec3 hit, col; if (raymarch(ray, hit, col, tmin, tmax)) return false; } return true; } void secondaryRay(const Ray& ray, const Entity* current, glm::dvec3& norm, glm::dvec2& UV, double sx, double sy, glm::dvec3& refDir, glm::dvec3& f, double& roughness, glm::dvec3& contrib, double& offset) { bool backface = false; if (glm::dot(norm, ray.dir) > 0) { norm *= -1.0; backface = true; } glm::dvec3 color = current->material.diffuse->get(UV); roughness = current->material.roughness; int type = rayType(current, ray, norm, UV); if (type == 1) { if (backface) { refDir = refr(ray.dir, norm, current->material.IOR); } else { refDir = refr(ray.dir, norm, 1.0 / current->material.IOR); } offset *= -1; contrib = glm::dvec3(1, 1, 1); f = 1.0*color; } else if (type == 0) { refDir = glm::reflect(ray.dir, norm); contrib = glm::dvec3(1, 1, 1); f = 1.0*color; // glm::dot(refDir, minNorm); } else { refDir = hemisphereSample_cos(norm, sx, sy, 2); if (current->material.roughness < .9) { refDir = sample_phong(glm::reflect(ray.dir, norm), norm, (1.0 / (current->material.roughness)) + 1, sx, sy); if (glm::dot(refDir, norm) < 0) refDir = glm::reflect(refDir, norm); } f = 1.0*color; glm::dvec3 inf = color;// *pow(dot, 1 / current->material.roughness); contrib *= inf; contrib = glm::mix(contrib, inf, 0.5); } } //traces a ray against the scene geometry, returns true on intersection bool trace(const Ray& ray, glm::dvec3& minHit, glm::dvec3& minNorm, glm::dvec2& minUV, Entity*& obj) { glm::dvec3 hit, norm; glm::dvec2 uv; bool intersected = false; std::vector<std::pair<const Octree::Node*, double>> nodes = _scene->intersectSorted(ray, 0, INFINITY); std::vector<std::pair<const Octree::Node*, double>>::iterator nd = nodes.begin(); #ifdef DEBUG_OCTREE if(nd != nodes.end()) { const Octree::Node* curNode = nd->first; double tmin, tmax; auto snap = [](glm::dvec3& in) {return in / std::max(in[0], std::max(in[1], in[2])); }; curNode->_bbox.intersect(ray, 0, INFINITY, tmin, tmax); minHit = ray.origin + tmin * ray.dir; glm::dvec3 p = minHit - curNode->_bbox.center(); glm::dvec3 d = .5 * curNode->_bbox.size(); glm::dvec3 n = (p / d) * (1.0001); auto absMax = [](double a, double b){ return (std::max(std::abs(a), std::abs(b)) == std::abs(a)) ? a : b; }; double max = absMax(n[0], absMax(n[1], n[2])); glm::dvec3 n_ = glm::dvec3(max == n[0], max == n[1], max == n[2]); if (glm::length(n_) < .1 || glm::length(n_) > 2) { std::cout << "normal length out of tolerance: " << n_[0] << ", " << n_[1] << ", " << n_[2] << ", " << n[0] << ", " << n[1] << ", " << n[2] << ", " << "\n"; n_ = n; } minNorm = glm::normalize(n_); minUV = glm::dvec2(0, 0); obj = NULL; return true; } return false; #endif //avgTests += objects.size(); if (drand() < .5 && nodes.size() > 500) { std::cout << nodes.size() << ", ray: " << ray.origin.x << ", " << ray.origin.y << ", " << ray.origin.z << "\n"; } Entity* current; bool term = false; while (nd != nodes.end() && !term) { const Octree::Node* curNode = nd->first; std::vector<Entity*>::const_iterator it = curNode->_entities.begin(); while (it != curNode->_entities.end()) { Entity* tmp = *it; if (tmp->intersect(ray, hit, norm, uv) && (drand() < tmp->material.getAlpha(uv) || tmp->material.IOR != 1)) { if ((!intersected || vecLengthSquared(hit - ray.origin) < vecLengthSquared(minHit - ray.origin))) { current = tmp; minHit = hit; minNorm = norm; minUV = uv; intersected = true; if (curNode->_bbox.contains(hit)) term = true; } } ++it; } ++nd; } if(intersected) obj = current; return intersected; } //returns the type of the secondary ray, 0 for reflection, 1 for refraction, 2 for diffuse/glossy int rayType(const Entity* entity, const Ray& ray, glm::dvec3& norm, glm::dvec2& minUV) { int type = 2; double IOR = entity->material.IOR; double opacity = entity->material.diffuse->getAlpha(minUV) * entity->material.opacity; double r0 = std::pow((1 - IOR) / (1 + IOR), 2); //Schlicks approximation of the fresnel term double fs = r0 + (1 - r0)*std::pow(1 - glm::dot(glm::reflect(ray.dir, norm), norm), 5); if (entity->material.roughness < .001) { type = 0; } if (drand() > opacity) { if (drand() < fs) type = 0; else type = 1; } return type; } //uses raymarching to determine the intersection point of a ray with the atmosphere bool raymarch(const Ray& r, glm::dvec3& hit, glm::dvec3& col, double mint, double maxt) { double t = mint + SHADOW_BIAS; double scatter; glm::dvec3 current = r.origin + mint*r.dir; while (t < maxt) { if (drand() < _scene->atmosphereDensity(current, col, scatter)) { hit = current; return true; } current += RAYMARCH_STEPSIZE * r.dir; t += RAYMARCH_STEPSIZE; } return false; } //computes a radiance estimate from the photons surrounding the hit point glm::dvec3 samplePhotons(glm::dvec3 pos, glm::dvec3 dir, int count) { double dist = .0; glm::dvec3 res(0, 0, 0); double scale = 0; std::vector<Photon*> photons = _photon_map->getInRange(pos, scale, dist); /*if(photons.size() > 9*MAX_PHOTONS_PER_LEAF && drand() < .01) { std::cout << "photons: " << photons.size() << "\n"; }*/ count = std::min(count, (int)photons.size()); std::partial_sort(photons.begin(), photons.begin() + count, photons.end(), [pos](const Photon* lhs, const Photon* rhs) {return vecLengthSquared(lhs->origin - pos) < vecLengthSquared(rhs->origin - pos); }); double maxDist = 0; //std::cout << photons.size() << "\n"; /*double lastDist = 0; if(photons.size() > 0) lastDist = vecLengthSquared(photons[0]->origin - pos);*/ for (int i = 0; i < count; i++) { Photon* p = photons[i]; /*if((vecLengthSquared(p->origin - pos) - lastDist) < 0) { std::cout << "non-ascending photon distance: " << lastDist << ", " << vecLengthSquared(p->origin - pos) << "\n"; } lastDist = vecLengthSquared(p->origin - pos);*/ //if (vecLengthSquared(p->origin - pos) < .01 && visible(Ray(pos + SHADOW_BIAS*dir, p->origin - pos - SHADOW_BIAS*dir), glm::length(p->origin - pos - SHADOW_BIAS*dir))) res += p->col*glm::dot(p->dir, dir); } if(photons.size() > 0) { maxDist = vecLengthSquared(photons[count - 1]->origin - pos); res /= (M_PI*maxDist); } return res; } //traces caustic photons from every light source void tracePhotons(int maxDepth, int count, Halton_sampler& halton_sampler, Halton_enum& halton_enum) { _photon_map->reserve(count); int total = 0; #pragma omp parallel { std::vector<Photon*> tmp; int tmpCount = 0; #pragma omp for for (int i = 0; i < count; i++) { for (Light* l : _scene->lights) { int tries = 0; bool stored = false; //srand(i); while (!stored && tries < 500) { float sx = halton_sampler.sample(0, i * 500 + tries); float sy = halton_sampler.sample(1, i * 500 + tries); //sx = fmod(halton_enum.scale_x(sx), 1.0); //sy = fmod(halton_enum.scale_y(sy), 1.0); //std::cout << i * 500 + tries << ": " << sx << ", " << sy << "\n"; glm::dvec3 pos = l->getPointInRange(sx, sy); glm::dvec3 dir = sphereCapSample_cos(glm::normalize(pos - l->pos), fmod(drand() + 5 * i, 1), fmod(drand() + 13 * i, 1), 2, l->angle); Ray r(pos, dir); glm::dvec3 hit, norm; glm::dvec3 col = (1.0/count)*.5*l->angle*l->col; glm::dvec2 UV; Entity* current; int depth = 0; bool term = false; bool isCaustic = false; if (!trace(r, hit, norm, UV, current)) { tries++; continue; } while (depth < maxDepth && !term) { double roughness = current->material.roughness; //std::cout << "tracing photon, depth: " << depth << "\n"; if (roughness < 0.1) { //std::cout << "tracing caustics photon at depth: " << depth << "\n"; if (!trace(r, hit, norm, UV, current)) { term = true; continue; } roughness = current->material.roughness; glm::dvec3 refDir, f, contrib; double offset = SHADOW_BIAS; sx = halton_sampler.sample(0, maxDepth*(i * 500 + tries) + depth); sy = halton_sampler.sample(1, maxDepth*(i * 500 + tries) + depth); //sx = fmod(halton_enum.scale_x(sx), 1.0); //sy = fmod(halton_enum.scale_y(sy), 1.0); secondaryRay(r, current, norm, UV, fmod(drand() + 5 * i, 1), fmod(drand() + 13 * i, 1), refDir, f, roughness, contrib, offset); double tmin = 0; double tmax = glm::length(hit - r.origin); if (_scene->atmosphereBounds(r, tmin, tmax)) { glm::dvec3 ahit, color; //std::cout << "atmosphere bounds hit: " << tmin << ", " << tmax << "\n"; if (raymarch(r, ahit, color, tmin, tmax)) { hit = ahit; refDir = randomUnitVec(fmod(drand() + 13 * i, 1), fmod(drand() + 7 * i, 1)); f = 1.0*color; roughness = 1; //std::cout << "atmosphere hit by photon\n"; } } col *= f; r.origin = hit + offset*norm; r.setDir(refDir); isCaustic = true; } if (depth > 0 && isCaustic && roughness >= 0.1) { //std::cout << "photon stored\n"; tmp.push_back(new Photon(hit, r.dir, col)); term = true; stored = true; } depth++; } tries++; } tmpCount += tries; } } #pragma omp critical { for (Photon* p : tmp) { _photon_map->push_back(p); //_scene->push_back(new sphere(p->origin, 0.01, Material(new texture(p->col), new texture(glm::dvec3(0, 0, 0)), 1, 1))); } total += tmpCount; } } std::cout << "total photon tests: " << total << "\n"; } bool running() const { return _running; } void stop() { _running = false; } void start() { _running = true; } int photons = PHOTONS; int photon_depth = PHOTON_DEPTH; int min_samples = MIN_SAMPLES; int max_samples = SAMPLES; double noise_thresh = NOISE_THRESH; glm::dvec3 ambient = glm::dvec3(0, 0, 0); std::shared_ptr<Image> getImage() const { return _image; } Camera _camera; private: bool _running = false; Octree* _scene; PhotonMap* _photon_map; std::shared_ptr<Image> _image;};
#pragma once #include <algorithm> #include <memory> #include <set> #include <chrono> #include <glm/glm.hpp> #include "camera.h" #include "entities.h" #include "image.h" #include "octree.h" #include "omp.h" #include <ctime> #include "util.h" #include "halton_enum.h" #include "halton_sampler.h" #include "photonMap.h" //#define DEBUG_OCTREE class RayTracer { public: int p = 0; int width, height; RayTracer() = delete; RayTracer(const Camera& camera) : _camera(camera), _image(std::make_shared<Image>(0, 0)) { }; void setScene(Octree* scene) { _scene = scene; _photon_map = new PhotonMap(_scene->_root._bbox.min, _scene->_root._bbox.max); } void run(int w, int h) { std::cout << "starting raytracer with frame size: " << w << ", " << h << "\n"; srand(std::time(0)); Halton_sampler sampler; sampler.init_faure(); Halton_enum halton_enum(w, h); _image = std::make_shared<Image>(w, h); width = w; height = h; if (!_scene->valid) { _scene->rebuild(); } if (!_photon_map->valid) { auto start = std::chrono::high_resolution_clock::now(); std::cout << "emitting photons...\n"; tracePhotons(5, photons, sampler, halton_enum); auto end = std::chrono::high_resolution_clock::now(); std::cout << "photon time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count()/1000.0 << " s\n"; _photon_map->rebuild(); //_scene->rebuild(); } double sensorHalfWidth = (_camera.sensorDiag*w)/(sqrt((double)w*w + h*h)); double sensorHalfHeight = sensorHalfWidth * ((double)h/w); glm::dvec3 screenCenter = _camera.pos + _camera.focalDist*_camera.forward; glm::dvec3 cameraRight = glm::normalize(glm::cross(_camera.forward, _camera.up)); std::cout << cameraRight.x << ", " << cameraRight.y << ", " << cameraRight.z << ", " << "\n"; double avgTests = 0; std::vector<double> vars; vars.reserve(w*h); std::vector<double> xrand; subrand(xrand, 25000); std::vector<double> yrand; subrand(yrand, 25000); // The structure of the for loop should remain for incremental rendering. for (int y = 0; y < h; ++y) { srand(std::time(0) + rand()); if(_running) { for (int x = 0; x < w; ++x) { glm::dvec3 color(0.5, 0.5, 0.5); glm::dvec3 lastCol(0, 0, 0); double var = 0; int samps = 0; int s = 0; while (s < max_samples && samps < min_samples) { lastCol = color; int idx = halton_enum.get_index(s, x, y); double xr = sampler.sample(0, idx); double yr = sampler.sample(1, idx); //xr = .2*fmod(halton_enum.scale_x(xr), 1.0) + .8*1*xrand[((x + w*y)*max_samples + s) % xrand.size()];// + 0*drand(); //yr = .2*fmod(halton_enum.scale_y(yr), 1.0) + .8*1*yrand[((x + w*y)*max_samples + s) % yrand.size()];// + 0*drand(); //std::cout << (xr - yr) << "\n"; double dx = halton_enum.scale_x(xr);// (double)x + AA_JITTER*xrand[((x + w*y)*max_samples + s) % xrand.size()]; double dy = halton_enum.scale_y(yr);// (double)y + AA_JITTER*yrand[((x + w*y)*max_samples + s) % yrand.size()]; glm::dvec3 pixelPos = screenCenter + (sensorHalfWidth*(dx / w - .5))*cameraRight - (sensorHalfHeight*(dy / h - .5))*_camera.up; glm::dvec3 eyePos = _camera.pos + FOCAL_BLUR*(xr - .5)*cameraRight + FOCAL_BLUR*(yr - .5) *_camera.up; Ray ray(eyePos, glm::normalize(pixelPos - eyePos)); if (s == 0) color = radiance(ray, 0, sampler, halton_enum, /*(x + w*y)*SAMPLES + s*/ idx, glm::dvec3(1, 1, 1)); else color = (1.0*s*color + radiance(ray, 0, sampler, halton_enum, /*(x + w*y)*SAMPLES + s*/ idx, glm::dvec3(1, 1, 1)))*(1.0 / (s + 1));// (1.0 / SAMPLES)*radiance(ray, 0); if (s > 0) { var = (1.0*5*var + glm::length(color - lastCol))*(1.0 / (5 + 1)); //var = .5*var + .5*vars[clamp(0, w, x - 1) + w*clamp(0, h, y)]; } if (s > 0 && var > noise_thresh) samps-=2; s++; samps++; } color = gamma(color, 2.2); //vars[x + w*y] = var; _image->setPixel(x, y, glm::clamp(color/*1.0*(double)s / max_samples*glm::dvec3(1, 1, 1)*/, 0.0, 1.0)); } } } avgTests /= w*h; std::cout << "average intersection tests: " << avgTests << "\n"; } glm::dvec3 radiance(const Ray& ray, int depth, const Halton_sampler& halton_sampler, const Halton_enum& halton_enum, int sample, glm::dvec3 contrib) { if (depth > MAX_DEPTH) return glm::dvec3(0, 0, 0); float sx = halton_sampler.sample(2 + 2*depth, sample); float sy = halton_sampler.sample(3 + 2*depth, sample); double offset = SHADOW_BIAS; glm::dvec3 minHit, minNorm; glm::dvec2 minUV; Entity* current; bool backface = false; texture debugTex(glm::dvec3(.5, .5, .5)); texture debugTexEm(glm::dvec3(0, 0, 0)); Material dummyMat(&debugTex, &debugTexEm, 1, 1); sphere debugSphere(glm::dvec3(0, 0, 0), 1, dummyMat); bool intersected = trace(ray, minHit, minNorm, minUV, current); if (intersected) { if (!current) current = &debugSphere; glm::dvec3 i(0, 0, 0); glm::dvec3 refDir; glm::dvec3 color = current->material.diffuse->get(minUV); double roughness = current->material.roughness; //type of secondary ray, 0 for reflection, 1 for refraction, 2 for glossy // int type = rayType(current, ray, minNorm, minUV); glm::dvec3 f(1, 1, 1); secondaryRay(ray, current, minNorm, minUV, sx, sy, refDir, f, roughness, contrib, offset); double tmin = 0; double tmax = glm::length(minHit - ray.origin); if (_scene->atmosphereBounds(ray, tmin, tmax)) { glm::dvec3 hit, col; //std::cout << "atmosphere bounds hit: " << tmin << ", " << tmax << "\n"; if (raymarch(ray, hit, col, tmin, tmax)) { minHit = hit; refDir = randomUnitVec(sx, sy); f = 1.0*col; color = col; contrib = col; roughness = 1; //std::cout << "atmosphere hit\n"; } } for (Light* light : _scene->lights) { bool shadow = false; glm::dvec3 lightDir = light->getPoint(drand(), drand()) - (minHit + SHADOW_BIAS*minNorm); double maxt = vecLengthSquared(lightDir); //double cos_alpha = (light->rad / sqrt(vecLengthSquared(lightDir) + std::pow(light->rad, 2))); double hfrac = 1 / (M_PI*vecLengthSquared(light->pos - minHit)); //fraction of the hemisphere Ray shadow_ray(minHit + SHADOW_BIAS*minNorm, lightDir); shadow = !visible(shadow_ray, maxt); if (!shadow) { double d = glm::dot(minNorm, glm::normalize(light->pos - minHit)); if (d < 0) d = 0; double l = pow(d, (1.0 / roughness)); i = light->col*l*hfrac; } } glm::dvec3 caustic = depth <= 10 ? samplePhotons(minHit, refDir, 32) : glm::dvec3(0, 0, 0); //return color*caustic; // continuation probability double q = compMax(contrib); if (depth <= MIN_DEPTH || drand() < q) { f *= depth <= MIN_DEPTH ? 1.0 : (1.0 / q); //diffuse*direct_light + diffuse*brdf*radiance + emmissive return color*i + f*radiance(Ray(minHit + offset*minNorm, refDir), ++depth, halton_sampler, halton_enum, sample, contrib) + current->material.emissive->get(minUV) + color*caustic; } else return color*i;// glm::dvec3(0, 0, 0);///*1.0*depth / MAX_DEPTH * glm::dvec3(1, 1, 1);//*/ //current->material.diffuse->get(minUV)*i + current->material.emissive->get(minUV); } else return /*1.0*depth / MAX_DEPTH * glm::dvec3(1, 1, 1);//*/ ambient; } //checks if the given ray is visible, meaning that nothing overlaps it bool visible(const Ray& ray, double mt) { bool hit = false; std::vector<Entity*> shadow_objects = _scene->intersect(ray, 0, sqrt(mt)-SHADOW_BIAS); std::vector<Entity*>::iterator shadow_it = shadow_objects.begin(); #ifdef DEBUG_OCTREE return shadow_it == shadow_objects.end(); #endif while (!hit && shadow_it != shadow_objects.end()) { Entity* t = *shadow_it; glm::dvec3 pos, norm; glm::dvec2 uv; if (t->intersect(ray, pos, norm, uv) && (drand() < t->material.getAlpha(uv) || t->material.IOR != 1)) { double t_shadow = vecLengthSquared(pos - ray.origin); hit = (t_shadow < mt)&&(t_shadow > 0); } ++shadow_it; } if (hit) return false; double tmin = 0; double tmax = mt; if (_scene->atmosphereBounds(ray, tmin, tmax)) { glm::dvec3 hit, col; if (raymarch(ray, hit, col, tmin, tmax)) return false; } return true; } void secondaryRay(const Ray& ray, const Entity* current, glm::dvec3& norm, glm::dvec2& UV, double sx, double sy, glm::dvec3& refDir, glm::dvec3& f, double& roughness, glm::dvec3& contrib, double& offset) { bool backface = false; if (glm::dot(norm, ray.dir) > 0) { norm *= -1.0; backface = true; } glm::dvec3 color = current->material.diffuse->get(UV); roughness = current->material.roughness; int type = rayType(current, ray, norm, UV); if (type == 1) { if (backface) { refDir = refr(ray.dir, norm, current->material.IOR); } else { refDir = refr(ray.dir, norm, 1.0 / current->material.IOR); } offset *= -1; contrib = glm::dvec3(1, 1, 1); f = 1.0*color; } else if (type == 0) { refDir = glm::reflect(ray.dir, norm); contrib = glm::dvec3(1, 1, 1); f = 1.0*color; // glm::dot(refDir, minNorm); } else { refDir = hemisphereSample_cos(norm, sx, sy, 2); if (current->material.roughness < .9) { refDir = sample_phong(glm::reflect(ray.dir, norm), norm, (1.0 / (current->material.roughness)) + 1, sx, sy); if (glm::dot(refDir, norm) < 0) refDir = glm::reflect(refDir, norm); } f = 1.0*color; glm::dvec3 inf = color;// *pow(dot, 1 / current->material.roughness); contrib *= inf; contrib = glm::mix(contrib, inf, 0.5); } } //traces a ray against the scene geometry, returns true on intersection bool trace(const Ray& ray, glm::dvec3& minHit, glm::dvec3& minNorm, glm::dvec2& minUV, Entity*& obj) { glm::dvec3 hit, norm; glm::dvec2 uv; bool intersected = false; std::vector<std::pair<const Octree::Node*, double>> nodes = _scene->intersectSorted(ray, 0, INFINITY); std::vector<std::pair<const Octree::Node*, double>>::iterator nd = nodes.begin(); #ifdef DEBUG_OCTREE if(nd != nodes.end()) { const Octree::Node* curNode = nd->first; double tmin, tmax; auto snap = [](glm::dvec3& in) {return in / std::max(in[0], std::max(in[1], in[2])); }; curNode->_bbox.intersect(ray, 0, INFINITY, tmin, tmax); minHit = ray.origin + tmin * ray.dir; glm::dvec3 p = minHit - curNode->_bbox.center(); glm::dvec3 d = .5 * curNode->_bbox.size(); glm::dvec3 n = (p / d) * (1.0001); auto absMax = [](double a, double b){ return (std::max(std::abs(a), std::abs(b)) == std::abs(a)) ? a : b; }; double max = absMax(n[0], absMax(n[1], n[2])); glm::dvec3 n_ = glm::dvec3(max == n[0], max == n[1], max == n[2]); if (glm::length(n_) < .1 || glm::length(n_) > 2) { std::cout << "normal length out of tolerance: " << n_[0] << ", " << n_[1] << ", " << n_[2] << ", " << n[0] << ", " << n[1] << ", " << n[2] << ", " << "\n"; n_ = n; } minNorm = glm::normalize(n_); minUV = glm::dvec2(0, 0); obj = NULL; return true; } return false; #endif //avgTests += objects.size(); if (drand() < .5 && nodes.size() > 500) { std::cout << nodes.size() << ", ray: " << ray.origin.x << ", " << ray.origin.y << ", " << ray.origin.z << "\n"; } Entity* current; bool term = false; while (nd != nodes.end() && !term) { const Octree::Node* curNode = nd->first; std::vector<Entity*>::const_iterator it = curNode->_entities.begin(); while (it != curNode->_entities.end()) { Entity* tmp = *it; if (tmp->intersect(ray, hit, norm, uv) && (drand() < tmp->material.getAlpha(uv) || tmp->material.IOR != 1)) { if ((!intersected || vecLengthSquared(hit - ray.origin) < vecLengthSquared(minHit - ray.origin))) { current = tmp; minHit = hit; minNorm = norm; minUV = uv; intersected = true; if (curNode->_bbox.contains(hit)) term = true; } } ++it; } ++nd; } if(intersected) obj = current; return intersected; } //returns the type of the secondary ray, 0 for reflection, 1 for refraction, 2 for diffuse/glossy int rayType(const Entity* entity, const Ray& ray, glm::dvec3& norm, glm::dvec2& minUV) { int type = 2; double IOR = entity->material.IOR; double opacity = entity->material.diffuse->getAlpha(minUV) * entity->material.opacity; double r0 = std::pow((1 - IOR) / (1 + IOR), 2); //Schlicks approximation of the fresnel term double fs = r0 + (1 - r0)*std::pow(1 - glm::dot(glm::reflect(ray.dir, norm), norm), 5); if (entity->material.roughness < .001) { type = 0; } if (drand() > opacity) { if (drand() < fs) type = 0; else type = 1; } return type; } //uses raymarching to determine the intersection point of a ray with the atmosphere bool raymarch(const Ray& r, glm::dvec3& hit, glm::dvec3& col, double mint, double maxt) { double t = mint + SHADOW_BIAS; double scatter; glm::dvec3 current = r.origin + mint*r.dir; while (t < maxt) { if (drand() < _scene->atmosphereDensity(current, col, scatter)) { hit = current; return true; } current += RAYMARCH_STEPSIZE * r.dir; t += RAYMARCH_STEPSIZE; } return false; } //computes a radiance estimate from the photons surrounding the hit point glm::dvec3 samplePhotons(glm::dvec3 pos, glm::dvec3 dir, int count) { double dist = .0; glm::dvec3 res(0, 0, 0); double scale = 0; std::vector<Photon*> photons = _photon_map->getInRange(pos, scale, dist); /*if(photons.size() > 9*MAX_PHOTONS_PER_LEAF && drand() < .01) { std::cout << "photons: " << photons.size() << "\n"; }*/ count = std::min(count, (int)photons.size()); std::partial_sort(photons.begin(), photons.begin() + count, photons.end(), [pos](const Photon* lhs, const Photon* rhs) {return vecLengthSquared(lhs->origin - pos) < vecLengthSquared(rhs->origin - pos); }); double maxDist = 0; //std::cout << photons.size() << "\n"; /*double lastDist = 0; if(photons.size() > 0) lastDist = vecLengthSquared(photons[0]->origin - pos);*/ for (int i = 0; i < count; i++) { Photon* p = photons[i]; /*if((vecLengthSquared(p->origin - pos) - lastDist) < 0) { std::cout << "non-ascending photon distance: " << lastDist << ", " << vecLengthSquared(p->origin - pos) << "\n"; } lastDist = vecLengthSquared(p->origin - pos);*/ //if (vecLengthSquared(p->origin - pos) < .01 && visible(Ray(pos + SHADOW_BIAS*dir, p->origin - pos - SHADOW_BIAS*dir), glm::length(p->origin - pos - SHADOW_BIAS*dir))) res += p->col*glm::dot(p->dir, dir); } if(photons.size() > 0) { maxDist = vecLengthSquared(photons[count - 1]->origin - pos); res /= (M_PI*maxDist); } return res; } //traces caustic photons from every light source void tracePhotons(int maxDepth, int count, Halton_sampler& halton_sampler, Halton_enum& halton_enum) { _photon_map->reserve(count); int total = 0; std::vector<Photon*> tmp; int tmpCount = 0; for (int i = 0; i < count; i++) { for (Light* l : _scene->lights) { int tries = 0; bool stored = false; //srand(i); while (!stored && tries < 500) { float sx = halton_sampler.sample(0, i * 500 + tries); float sy = halton_sampler.sample(1, i * 500 + tries); //sx = fmod(halton_enum.scale_x(sx), 1.0); //sy = fmod(halton_enum.scale_y(sy), 1.0); //std::cout << i * 500 + tries << ": " << sx << ", " << sy << "\n"; glm::dvec3 pos = l->getPointInRange(sx, sy); glm::dvec3 dir = sphereCapSample_cos(glm::normalize(pos - l->pos), fmod(drand() + 5 * i, 1), fmod(drand() + 13 * i, 1), 2, l->angle); Ray r(pos, dir); glm::dvec3 hit, norm; glm::dvec3 col = (1.0/count)*.5*l->angle*l->col; glm::dvec2 UV; Entity* current; int depth = 0; bool term = false; bool isCaustic = false; if (!trace(r, hit, norm, UV, current)) { tries++; continue; } while (depth < maxDepth && !term) { double roughness = current->material.roughness; //std::cout << "tracing photon, depth: " << depth << "\n"; if (roughness < 0.1) { //std::cout << "tracing caustics photon at depth: " << depth << "\n"; if (!trace(r, hit, norm, UV, current)) { term = true; continue; } roughness = current->material.roughness; glm::dvec3 refDir, f, contrib; double offset = SHADOW_BIAS; sx = halton_sampler.sample(0, maxDepth*(i * 500 + tries) + depth); sy = halton_sampler.sample(1, maxDepth*(i * 500 + tries) + depth); //sx = fmod(halton_enum.scale_x(sx), 1.0); //sy = fmod(halton_enum.scale_y(sy), 1.0); secondaryRay(r, current, norm, UV, fmod(drand() + 5 * i, 1), fmod(drand() + 13 * i, 1), refDir, f, roughness, contrib, offset); double tmin = 0; double tmax = glm::length(hit - r.origin); if (_scene->atmosphereBounds(r, tmin, tmax)) { glm::dvec3 ahit, color; //std::cout << "atmosphere bounds hit: " << tmin << ", " << tmax << "\n"; if (raymarch(r, ahit, color, tmin, tmax)) { hit = ahit; refDir = randomUnitVec(fmod(drand() + 13 * i, 1), fmod(drand() + 7 * i, 1)); f = 1.0*color; roughness = 1; //std::cout << "atmosphere hit by photon\n"; } } col *= f; r.origin = hit + offset*norm; r.setDir(refDir); isCaustic = true; } if (depth > 0 && isCaustic && roughness >= 0.1) { //std::cout << "photon stored\n"; tmp.push_back(new Photon(hit, r.dir, col)); term = true; stored = true; } depth++; } tries++; } tmpCount += tries; } } for (Photon* p : tmp) { _photon_map->push_back(p); //_scene->push_back(new sphere(p->origin, 0.01, Material(new texture(p->col), new texture(glm::dvec3(0, 0, 0)), 1, 1))); } total += tmpCount; std::cout << "total photon tests: " << total << "\n"; } bool running() const { return _running; } void stop() { _running = false; } void start() { _running = true; } int photons = PHOTONS; int photon_depth = PHOTON_DEPTH; int min_samples = MIN_SAMPLES; int max_samples = SAMPLES; double noise_thresh = NOISE_THRESH; glm::dvec3 ambient = glm::dvec3(0, 0, 0); std::shared_ptr<Image> getImage() const { return _image; } Camera _camera; private: bool _running = false; Octree* _scene; PhotonMap* _photon_map; std::shared_ptr<Image> _image;};
#pragma once #include <algorithm> #include <memory> #include <set> #include <chrono> #include <glm/glm.hpp> #include "camera.h" #include "entities.h" #include "image.h" #include "octree.h" #include "omp.h" #include <ctime> #include "util.h" #include "halton_enum.h" #include "halton_sampler.h" #include "photonMap.h" //#define DEBUG_OCTREE class RayTracer { public: int p = 0; int width, height; RayTracer() = delete; RayTracer(const Camera& camera) : _camera(camera), _image(std::make_shared<Image>(0, 0)) { }; void setScene(Octree* scene) { _scene = scene; _photon_map = new PhotonMap(_scene->_root._bbox.min, _scene->_root._bbox.max); } void run(int w, int h) { std::cout << "starting raytracer with frame size: " << w << ", " << h << "\n"; srand(std::time(0)); Halton_sampler sampler; sampler.init_faure(); Halton_enum halton_enum(w, h); _image = std::make_shared<Image>(w, h); width = w; height = h; if (!_scene->valid) { _scene->rebuild(); } if (!_photon_map->valid) { auto start = std::chrono::high_resolution_clock::now(); std::cout << "emitting photons...\n"; tracePhotons(5, photons, sampler, halton_enum); auto end = std::chrono::high_resolution_clock::now(); std::cout << "photon time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count()/1000.0 << " s\n"; _photon_map->rebuild(); //_scene->rebuild(); } double sensorHalfWidth = (_camera.sensorDiag*w)/(sqrt((double)w*w + h*h)); double sensorHalfHeight = sensorHalfWidth * ((double)h/w); glm::dvec3 screenCenter = _camera.pos + _camera.focalDist*_camera.forward; glm::dvec3 cameraRight = glm::normalize(glm::cross(_camera.forward, _camera.up)); std::cout << cameraRight.x << ", " << cameraRight.y << ", " << cameraRight.z << ", " << "\n"; double avgTests = 0; std::vector<double> vars; vars.reserve(w*h); std::vector<double> xrand; subrand(xrand, 25000); std::vector<double> yrand; subrand(yrand, 25000); // The structure of the for loop should remain for incremental rendering. #pragma omp parallel for schedule(dynamic, 10) //OpenMP for (int y = 0; y < h; ++y) { srand(std::time(0) + rand()); if(_running) { for (int x = 0; x < w; ++x) { glm::dvec3 color(0.5, 0.5, 0.5); glm::dvec3 lastCol(0, 0, 0); double var = 0; int samps = 0; int s = 0; while (s < max_samples && samps < min_samples) { lastCol = color; int idx = halton_enum.get_index(s, x, y); double xr = sampler.sample(0, idx); double yr = sampler.sample(1, idx); //xr = .2*fmod(halton_enum.scale_x(xr), 1.0) + .8*1*xrand[((x + w*y)*max_samples + s) % xrand.size()];// + 0*drand(); //yr = .2*fmod(halton_enum.scale_y(yr), 1.0) + .8*1*yrand[((x + w*y)*max_samples + s) % yrand.size()];// + 0*drand(); //std::cout << (xr - yr) << "\n"; double dx = halton_enum.scale_x(xr);// (double)x + AA_JITTER*xrand[((x + w*y)*max_samples + s) % xrand.size()]; double dy = halton_enum.scale_y(yr);// (double)y + AA_JITTER*yrand[((x + w*y)*max_samples + s) % yrand.size()]; glm::dvec3 pixelPos = screenCenter + (sensorHalfWidth*(dx / w - .5))*cameraRight - (sensorHalfHeight*(dy / h - .5))*_camera.up; glm::dvec3 eyePos = _camera.pos + FOCAL_BLUR*(xr - .5)*cameraRight + FOCAL_BLUR*(yr - .5) *_camera.up; Ray ray(eyePos, glm::normalize(pixelPos - eyePos)); if (s == 0) color = radiance(ray, 0, sampler, halton_enum, /*(x + w*y)*SAMPLES + s*/ idx, glm::dvec3(1, 1, 1)); else color = (1.0*s*color + radiance(ray, 0, sampler, halton_enum, /*(x + w*y)*SAMPLES + s*/ idx, glm::dvec3(1, 1, 1)))*(1.0 / (s + 1));// (1.0 / SAMPLES)*radiance(ray, 0); if (s > 0) { var = (1.0*5*var + glm::length(color - lastCol))*(1.0 / (5 + 1)); //var = .5*var + .5*vars[clamp(0, w, x - 1) + w*clamp(0, h, y)]; } if (s > 0 && var > noise_thresh) samps-=2; s++; samps++; } color = gamma(color, 2.2); //vars[x + w*y] = var; #pragma omp critical (im_update) { _image->setPixel(x, y, glm::clamp(color/*1.0*(double)s / max_samples*glm::dvec3(1, 1, 1)*/, 0.0, 1.0)); } } } } avgTests /= w*h; std::cout << "average intersection tests: " << avgTests << "\n"; } glm::dvec3 radiance(const Ray& ray, int depth, const Halton_sampler& halton_sampler, const Halton_enum& halton_enum, int sample, glm::dvec3 contrib) { if (depth > MAX_DEPTH) return glm::dvec3(0, 0, 0); float sx = halton_sampler.sample(2 + 2*depth, sample); float sy = halton_sampler.sample(3 + 2*depth, sample); double offset = SHADOW_BIAS; glm::dvec3 minHit, minNorm; glm::dvec2 minUV; Entity* current; bool backface = false; texture debugTex(glm::dvec3(.5, .5, .5)); texture debugTexEm(glm::dvec3(0, 0, 0)); Material dummyMat(&debugTex, &debugTexEm, 1, 1); sphere debugSphere(glm::dvec3(0, 0, 0), 1, dummyMat); bool intersected = trace(ray, minHit, minNorm, minUV, current); if (intersected) { if (!current) current = &debugSphere; glm::dvec3 i(0, 0, 0); glm::dvec3 refDir; glm::dvec3 color = current->material.diffuse->get(minUV); double roughness = current->material.roughness; //type of secondary ray, 0 for reflection, 1 for refraction, 2 for glossy // int type = rayType(current, ray, minNorm, minUV); glm::dvec3 f(1, 1, 1); secondaryRay(ray, current, minNorm, minUV, sx, sy, refDir, f, roughness, contrib, offset); double tmin = 0; double tmax = glm::length(minHit - ray.origin); if (_scene->atmosphereBounds(ray, tmin, tmax)) { glm::dvec3 hit, col; //std::cout << "atmosphere bounds hit: " << tmin << ", " << tmax << "\n"; if (raymarch(ray, hit, col, tmin, tmax)) { minHit = hit; refDir = randomUnitVec(sx, sy); f = 1.0*col; color = col; contrib = col; roughness = 1; //std::cout << "atmosphere hit\n"; } } for (Light* light : _scene->lights) { bool shadow = false; glm::dvec3 lightDir = light->getPoint(drand(), drand()) - (minHit + SHADOW_BIAS*minNorm); double maxt = vecLengthSquared(lightDir); //double cos_alpha = (light->rad / sqrt(vecLengthSquared(lightDir) + std::pow(light->rad, 2))); double hfrac = 1 / (M_PI*vecLengthSquared(light->pos - minHit)); //fraction of the hemisphere Ray shadow_ray(minHit + SHADOW_BIAS*minNorm, lightDir); shadow = !visible(shadow_ray, maxt); if (!shadow) { double d = glm::dot(minNorm, glm::normalize(light->pos - minHit)); if (d < 0) d = 0; double l = pow(d, (1.0 / roughness)); i = light->col*l*hfrac; } } glm::dvec3 caustic = depth <= 10 ? samplePhotons(minHit, refDir, 32) : glm::dvec3(0, 0, 0); //return color*caustic; // continuation probability double q = compMax(contrib); if (depth <= MIN_DEPTH || drand() < q) { f *= depth <= MIN_DEPTH ? 1.0 : (1.0 / q); //diffuse*direct_light + diffuse*brdf*radiance + emmissive return color*i + f*radiance(Ray(minHit + offset*minNorm, refDir), ++depth, halton_sampler, halton_enum, sample, contrib) + current->material.emissive->get(minUV) + color*caustic; } else return color*i;// glm::dvec3(0, 0, 0);///*1.0*depth / MAX_DEPTH * glm::dvec3(1, 1, 1);//*/ //current->material.diffuse->get(minUV)*i + current->material.emissive->get(minUV); } else return /*1.0*depth / MAX_DEPTH * glm::dvec3(1, 1, 1);//*/ ambient; } //checks if the given ray is visible, meaning that nothing overlaps it bool visible(const Ray& ray, double mt) { bool hit = false; std::vector<Entity*> shadow_objects = _scene->intersect(ray, 0, sqrt(mt)-SHADOW_BIAS); std::vector<Entity*>::iterator shadow_it = shadow_objects.begin(); #ifdef DEBUG_OCTREE return shadow_it == shadow_objects.end(); #endif while (!hit && shadow_it != shadow_objects.end()) { Entity* t = *shadow_it; glm::dvec3 pos, norm; glm::dvec2 uv; if (t->intersect(ray, pos, norm, uv) && (drand() < t->material.getAlpha(uv) || t->material.IOR != 1)) { double t_shadow = vecLengthSquared(pos - ray.origin); hit = (t_shadow < mt)&&(t_shadow > 0); } ++shadow_it; } if (hit) return false; double tmin = 0; double tmax = mt; if (_scene->atmosphereBounds(ray, tmin, tmax)) { glm::dvec3 hit, col; if (raymarch(ray, hit, col, tmin, tmax)) return false; } return true; } void secondaryRay(const Ray& ray, const Entity* current, glm::dvec3& norm, glm::dvec2& UV, double sx, double sy, glm::dvec3& refDir, glm::dvec3& f, double& roughness, glm::dvec3& contrib, double& offset) { bool backface = false; if (glm::dot(norm, ray.dir) > 0) { norm *= -1.0; backface = true; } glm::dvec3 color = current->material.diffuse->get(UV); roughness = current->material.roughness; int type = rayType(current, ray, norm, UV); if (type == 1) { if (backface) { refDir = refr(ray.dir, norm, current->material.IOR); } else { refDir = refr(ray.dir, norm, 1.0 / current->material.IOR); } offset *= -1; contrib = glm::dvec3(1, 1, 1); f = 1.0*color; } else if (type == 0) { refDir = glm::reflect(ray.dir, norm); contrib = glm::dvec3(1, 1, 1); f = 1.0*color; // glm::dot(refDir, minNorm); } else { refDir = hemisphereSample_cos(norm, sx, sy, 2); if (current->material.roughness < .9) { refDir = sample_phong(glm::reflect(ray.dir, norm), norm, (1.0 / (current->material.roughness)) + 1, sx, sy); if (glm::dot(refDir, norm) < 0) refDir = glm::reflect(refDir, norm); } f = 1.0*color; glm::dvec3 inf = color;// *pow(dot, 1 / current->material.roughness); contrib *= inf; contrib = glm::mix(contrib, inf, 0.5); } } //traces a ray against the scene geometry, returns true on intersection bool trace(const Ray& ray, glm::dvec3& minHit, glm::dvec3& minNorm, glm::dvec2& minUV, Entity*& obj) { glm::dvec3 hit, norm; glm::dvec2 uv; bool intersected = false; std::vector<std::pair<const Octree::Node*, double>> nodes = _scene->intersectSorted(ray, 0, INFINITY); std::vector<std::pair<const Octree::Node*, double>>::iterator nd = nodes.begin(); #ifdef DEBUG_OCTREE if(nd != nodes.end()) { const Octree::Node* curNode = nd->first; double tmin, tmax; auto snap = [](glm::dvec3& in) {return in / std::max(in[0], std::max(in[1], in[2])); }; curNode->_bbox.intersect(ray, 0, INFINITY, tmin, tmax); minHit = ray.origin + tmin * ray.dir; glm::dvec3 p = minHit - curNode->_bbox.center(); glm::dvec3 d = .5 * curNode->_bbox.size(); glm::dvec3 n = (p / d) * (1.0001); auto absMax = [](double a, double b){ return (std::max(std::abs(a), std::abs(b)) == std::abs(a)) ? a : b; }; double max = absMax(n[0], absMax(n[1], n[2])); glm::dvec3 n_ = glm::dvec3(max == n[0], max == n[1], max == n[2]); if (glm::length(n_) < .1 || glm::length(n_) > 2) { std::cout << "normal length out of tolerance: " << n_[0] << ", " << n_[1] << ", " << n_[2] << ", " << n[0] << ", " << n[1] << ", " << n[2] << ", " << "\n"; n_ = n; } minNorm = glm::normalize(n_); minUV = glm::dvec2(0, 0); obj = NULL; return true; } return false; #endif //avgTests += objects.size(); if (drand() < .5 && nodes.size() > 500) { std::cout << nodes.size() << ", ray: " << ray.origin.x << ", " << ray.origin.y << ", " << ray.origin.z << "\n"; } Entity* current; bool term = false; while (nd != nodes.end() && !term) { const Octree::Node* curNode = nd->first; std::vector<Entity*>::const_iterator it = curNode->_entities.begin(); while (it != curNode->_entities.end()) { Entity* tmp = *it; if (tmp->intersect(ray, hit, norm, uv) && (drand() < tmp->material.getAlpha(uv) || tmp->material.IOR != 1)) { if ((!intersected || vecLengthSquared(hit - ray.origin) < vecLengthSquared(minHit - ray.origin))) { current = tmp; minHit = hit; minNorm = norm; minUV = uv; intersected = true; if (curNode->_bbox.contains(hit)) term = true; } } ++it; } ++nd; } if(intersected) obj = current; return intersected; } //returns the type of the secondary ray, 0 for reflection, 1 for refraction, 2 for diffuse/glossy int rayType(const Entity* entity, const Ray& ray, glm::dvec3& norm, glm::dvec2& minUV) { int type = 2; double IOR = entity->material.IOR; double opacity = entity->material.diffuse->getAlpha(minUV) * entity->material.opacity; double r0 = std::pow((1 - IOR) / (1 + IOR), 2); //Schlicks approximation of the fresnel term double fs = r0 + (1 - r0)*std::pow(1 - glm::dot(glm::reflect(ray.dir, norm), norm), 5); if (entity->material.roughness < .001) { type = 0; } if (drand() > opacity) { if (drand() < fs) type = 0; else type = 1; } return type; } //uses raymarching to determine the intersection point of a ray with the atmosphere bool raymarch(const Ray& r, glm::dvec3& hit, glm::dvec3& col, double mint, double maxt) { double t = mint + SHADOW_BIAS; double scatter; glm::dvec3 current = r.origin + mint*r.dir; while (t < maxt) { if (drand() < _scene->atmosphereDensity(current, col, scatter)) { hit = current; return true; } current += RAYMARCH_STEPSIZE * r.dir; t += RAYMARCH_STEPSIZE; } return false; } //computes a radiance estimate from the photons surrounding the hit point glm::dvec3 samplePhotons(glm::dvec3 pos, glm::dvec3 dir, int count) { double dist = .0; glm::dvec3 res(0, 0, 0); double scale = 0; std::vector<Photon*> photons = _photon_map->getInRange(pos, scale, dist); /*if(photons.size() > 9*MAX_PHOTONS_PER_LEAF && drand() < .01) { std::cout << "photons: " << photons.size() << "\n"; }*/ count = std::min(count, (int)photons.size()); std::partial_sort(photons.begin(), photons.begin() + count, photons.end(), [pos](const Photon* lhs, const Photon* rhs) {return vecLengthSquared(lhs->origin - pos) < vecLengthSquared(rhs->origin - pos); }); double maxDist = 0; //std::cout << photons.size() << "\n"; /*double lastDist = 0; if(photons.size() > 0) lastDist = vecLengthSquared(photons[0]->origin - pos);*/ for (int i = 0; i < count; i++) { Photon* p = photons[i]; /*if((vecLengthSquared(p->origin - pos) - lastDist) < 0) { std::cout << "non-ascending photon distance: " << lastDist << ", " << vecLengthSquared(p->origin - pos) << "\n"; } lastDist = vecLengthSquared(p->origin - pos);*/ //if (vecLengthSquared(p->origin - pos) < .01 && visible(Ray(pos + SHADOW_BIAS*dir, p->origin - pos - SHADOW_BIAS*dir), glm::length(p->origin - pos - SHADOW_BIAS*dir))) res += p->col*glm::dot(p->dir, dir); } if(photons.size() > 0) { maxDist = vecLengthSquared(photons[count - 1]->origin - pos); res /= (M_PI*maxDist); } return res; } //traces caustic photons from every light source void tracePhotons(int maxDepth, int count, Halton_sampler& halton_sampler, Halton_enum& halton_enum) { _photon_map->reserve(count); int total = 0; #pragma omp parallel { std::vector<Photon*> tmp; int tmpCount = 0; #pragma omp for for (int i = 0; i < count; i++) { for (Light* l : _scene->lights) { int tries = 0; bool stored = false; //srand(i); while (!stored && tries < 500) { float sx = halton_sampler.sample(0, i * 500 + tries); float sy = halton_sampler.sample(1, i * 500 + tries); //sx = fmod(halton_enum.scale_x(sx), 1.0); //sy = fmod(halton_enum.scale_y(sy), 1.0); //std::cout << i * 500 + tries << ": " << sx << ", " << sy << "\n"; glm::dvec3 pos = l->getPointInRange(sx, sy); glm::dvec3 dir = sphereCapSample_cos(glm::normalize(pos - l->pos), fmod(drand() + 5 * i, 1), fmod(drand() + 13 * i, 1), 2, l->angle); Ray r(pos, dir); glm::dvec3 hit, norm; glm::dvec3 col = (1.0/count)*.5*l->angle*l->col; glm::dvec2 UV; Entity* current; int depth = 0; bool term = false; bool isCaustic = false; if (!trace(r, hit, norm, UV, current)) { tries++; continue; } while (depth < maxDepth && !term) { double roughness = current->material.roughness; //std::cout << "tracing photon, depth: " << depth << "\n"; if (roughness < 0.1) { //std::cout << "tracing caustics photon at depth: " << depth << "\n"; if (!trace(r, hit, norm, UV, current)) { term = true; continue; } roughness = current->material.roughness; glm::dvec3 refDir, f, contrib; double offset = SHADOW_BIAS; sx = halton_sampler.sample(0, maxDepth*(i * 500 + tries) + depth); sy = halton_sampler.sample(1, maxDepth*(i * 500 + tries) + depth); //sx = fmod(halton_enum.scale_x(sx), 1.0); //sy = fmod(halton_enum.scale_y(sy), 1.0); secondaryRay(r, current, norm, UV, fmod(drand() + 5 * i, 1), fmod(drand() + 13 * i, 1), refDir, f, roughness, contrib, offset); double tmin = 0; double tmax = glm::length(hit - r.origin); if (_scene->atmosphereBounds(r, tmin, tmax)) { glm::dvec3 ahit, color; //std::cout << "atmosphere bounds hit: " << tmin << ", " << tmax << "\n"; if (raymarch(r, ahit, color, tmin, tmax)) { hit = ahit; refDir = randomUnitVec(fmod(drand() + 13 * i, 1), fmod(drand() + 7 * i, 1)); f = 1.0*color; roughness = 1; //std::cout << "atmosphere hit by photon\n"; } } col *= f; r.origin = hit + offset*norm; r.setDir(refDir); isCaustic = true; } if (depth > 0 && isCaustic && roughness >= 0.1) { //std::cout << "photon stored\n"; tmp.push_back(new Photon(hit, r.dir, col)); term = true; stored = true; } depth++; } tries++; } tmpCount += tries; } } #pragma omp critical { for (Photon* p : tmp) { _photon_map->push_back(p); //_scene->push_back(new sphere(p->origin, 0.01, Material(new texture(p->col), new texture(glm::dvec3(0, 0, 0)), 1, 1))); } total += tmpCount; } } std::cout << "total photon tests: " << total << "\n"; } bool running() const { return _running; } void stop() { _running = false; } void start() { _running = true; } int photons = PHOTONS; int photon_depth = PHOTON_DEPTH; int min_samples = MIN_SAMPLES; int max_samples = SAMPLES; double noise_thresh = NOISE_THRESH; glm::dvec3 ambient = glm::dvec3(0, 0, 0); std::shared_ptr<Image> getImage() const { return _image; } Camera _camera; private: bool _running = false; Octree* _scene; PhotonMap* _photon_map; std::shared_ptr<Image> _image;};
ex07.c
#include <stdio.h> #include <omp.h> static long num_steps = 1000000; double step; int main(int argv, char* argc) { int i; double x, pi, sum = 0.0; step = 1.0 / (double) num_steps; double startTime = omp_get_wtime(); #pragma omp parallel { double x; #pragma omp for reduction (+:sum) for (i = 0; i < num_steps; i++) { x = (i + 0.5) * step; sum += 4.0 / (1.0 + x * x); } } pi = step * sum; double endTime = omp_get_wtime(); printf ("Computed integral: %f\n", pi); printf ("Time elapsed: %f secs\n", (endTime - startTime)); return 0; }
#include <stdio.h> #include <omp.h> static long num_steps = 1000000; double step; int main(int argv, char* argc) { int i; double x, pi, sum = 0.0; step = 1.0 / (double) num_steps; double startTime = omp_get_wtime(); double x; for (i = 0; i < num_steps; i++) { x = (i + 0.5) * step; sum += 4.0 / (1.0 + x * x); } pi = step * sum; double endTime = omp_get_wtime(); printf ("Computed integral: %f\n", pi); printf ("Time elapsed: %f secs\n", (endTime - startTime)); return 0; }
#include <stdio.h> #include <omp.h> static long num_steps = 1000000; double step; int main(int argv, char* argc) { int i; double x, pi, sum = 0.0; step = 1.0 / (double) num_steps; double startTime = omp_get_wtime(); #pragma omp parallel { double x; #pragma omp for reduction (+:sum) for (i = 0; i < num_steps; i++) { x = (i + 0.5) * step; sum += 4.0 / (1.0 + x * x); } } pi = step * sum; double endTime = omp_get_wtime(); printf ("Computed integral: %f\n", pi); printf ("Time elapsed: %f secs\n", (endTime - startTime)); return 0; }
line_search_contact_strategy.h
// KRATOS ______ __ __ _____ __ __ __ // / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ / // / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ / // / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / / // \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS // // License: BSD License // license: ContactStructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_LINE_SEARCH_CONTACT_STRATEGY) #define KRATOS_LINE_SEARCH_CONTACT_STRATEGY /* System Includes */ /* External Includes */ /* Project includes */ #include "includes/kratos_parameters.h" #include "includes/define.h" #include "includes/model_part.h" #include "includes/variables.h" #include "solving_strategies/strategies/solving_strategy.h" #include "solving_strategies/strategies/line_search_strategy.h" #include "utilities/openmp_utils.h" #include "utilities/variable_utils.h" #include "utilities/atomic_utilities.h" // Convergence criterias #include "solving_strategies/convergencecriterias/convergence_criteria.h" // Default builder and solver #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" // TODO: Extend the descriptions namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** \brief Short class definition. This class */ template<class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class LineSearchContactStrategy : public LineSearchStrategy< TSparseSpace, TDenseSpace, TLinearSolver > { public: typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType; /** Counted pointer of ClassName */ KRATOS_CLASS_POINTER_DEFINITION( LineSearchContactStrategy ); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType; typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> NRBaseType; typedef LineSearchStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef LineSearchContactStrategy<TSparseSpace, TDenseSpace, TLinearSolver> ClassType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef std::size_t IndexType; /** * @brief Default constructor */ explicit LineSearchContactStrategy() { } /** * @brief Default constructor. (with parameters) * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ explicit LineSearchContactStrategy(ModelPart& rModelPart, Parameters ThisParameters) : BaseType(rModelPart, BaseType::GetDefaultParameters()) { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } /** * Default constructor * @param rModelPart: The model part of the problem * @param pScheme: The integration scheme * @param pNewLinearSolver: The linear solver employed * @param pNewConvergenceCriteria: The convergence criteria employed * @param MaxIterationNumber: The maximum number of iterations * @param CalculateReactions: The flag for the reaction calculation * @param ReformDofSetAtEachStep: The flag that allows to compute the modification of the DOF * @param MoveMeshFlag: The flag that allows to move the mesh */ LineSearchContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag) { KRATOS_TRY; Parameters default_parameters = this->GetDefaultParameters(); ThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * Default constructor * @param rModelPart: The model part of the problem * @param pScheme: The integration scheme * @param pNewLinearSolver: The linear solver employed * @param pNewConvergenceCriteria: The convergence criteria employed * @param MaxIterationNumber: The maximum number of iterations * @param CalculateReactions: The flag for the reaction calculation * @param ReformDofSetAtEachStep: The flag that allows to compute the modification of the DOF * @param MoveMeshFlag: The flag that allows to move the mesh */ LineSearchContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ) { KRATOS_TRY; Parameters default_parameters = this->GetDefaultParameters(); ThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * Destructor. */ ~LineSearchContactStrategy() override = default; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Create method * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ typename StrategyBaseType::Pointer Create( ModelPart& rModelPart, Parameters ThisParameters ) const override { return Kratos::make_shared<ClassType>(rModelPart, ThisParameters); } /** * @brief This method returns the defaulr parameters in order to avoid code duplication * @return Returns the default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "line_search_contact_strategy" })" ); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "line_search_contact_strategy"; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "LineSearchContactStrategy"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ bool mRecalculateFactor; // To check if we recalculate or not the scale factor ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * Performs all the required operations that should be done (for each step) * before solving the solution step. * A member variable should be used as a flag to make sure this function is called only once per step. */ void InitializeSolutionStep() override { BaseType::InitializeSolutionStep(); // TODO: Add something if necessary } /** * Here the database is updated */ void UpdateDatabase( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b, const bool MoveMesh ) override { typename TSchemeType::Pointer pScheme = this->GetScheme(); typename TBuilderAndSolverType::Pointer pBuilderAndSolver = this->GetBuilderAndSolver(); // FIXME: Separate in the parts of LM and displacement TSystemVectorType aux(b.size()); //TODO: do it by using the space TSparseSpace::Assign(aux, 0.5, Dx); TSystemVectorType DxDisp(b.size()); TSystemVectorType DxLM(b.size()); ComputeSplitDx(Dx, DxDisp, DxLM); // Compute residual without update TSparseSpace::SetToZero(b); pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b ); double roDisp; double roLM; ComputeMixedResidual(b, roDisp, roLM); // Compute half step residual NRBaseType::UpdateDatabase(A,aux,b,MoveMesh); TSparseSpace::SetToZero(b); pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b ); double rhDisp; double rhLM; ComputeMixedResidual(b, rhDisp, rhLM); // Compute full step residual (add another half Dx to the previous half) NRBaseType::UpdateDatabase(A,aux,b,MoveMesh); TSparseSpace::SetToZero(b); pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b ); double rfDisp; double rfLM; ComputeMixedResidual(b, rfDisp, rfLM); // We compute the parabola double XminDisp = 1e-3; double XmaxDisp = 1.0; double XminLM = 1e-3; double XmaxLM = 1.0; ComputeParabola(XminDisp, XmaxDisp, rfDisp, roDisp, rhDisp); ComputeParabola(XminLM, XmaxLM, rfLM, roLM, rhLM); // Perform final update TSparseSpace::Assign(aux,-(1.0 - XmaxDisp), DxDisp); TSparseSpace::UnaliasedAdd(aux,-(1.0 - XmaxLM), DxLM); NRBaseType::UpdateDatabase(A,aux,b,MoveMesh); } /** * This method split the vector of increment of DoF in displacement and LM * @param Dx The increment of displacements and LM * @param DxDisp The increment of displacements * @param DxLM The increment of LM */ void ComputeSplitDx( TSystemVectorType& Dx, TSystemVectorType& DxDisp, TSystemVectorType& DxLM ) { // Now we iterate over all the nodes NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes(); const int num_nodes = static_cast<int>(nodes_array.size()); #pragma omp parallel for for(int i = 0; i < num_nodes; ++i) { auto it_node = nodes_array.begin() + i; for(auto itDoF = it_node->GetDofs().begin() ; itDoF != it_node->GetDofs().end() ; itDoF++) { const int j = (**itDoF).EquationId(); const std::size_t CurrVar = (**itDoF).GetVariable().Key(); if ((CurrVar == DISPLACEMENT_X) || (CurrVar == DISPLACEMENT_Y) || (CurrVar == DISPLACEMENT_Z)) { DxDisp[j] = Dx[j]; DxLM[j] = 0.0; } else // Corresponding with contact { DxDisp[j] = 0.0; DxLM[j] = Dx[j]; } } } } /** * This method calculates the norm considering one norm for the displacement and other norm for the LM * @param b The residual vector * @param normDisp normDisp: The norm of the displacement * @param normLM The norm of the LM */ void ComputeMixedResidual( TSystemVectorType& b, double& normDisp, double& normLM ) { // Now we iterate over all the nodes NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes(); const int num_nodes = static_cast<int>(nodes_array.size()); #pragma omp parallel for for(int i = 0; i < num_nodes; ++i) { auto it_node = nodes_array.begin() + i; for(auto itDoF = it_node->GetDofs().begin() ; itDoF != it_node->GetDofs().end() ; itDoF++) { const int j = (**itDoF).EquationId(); const std::size_t CurrVar = (**itDoF).GetVariable().Key(); if ((CurrVar == DISPLACEMENT_X) || (CurrVar == DISPLACEMENT_Y) || (CurrVar == DISPLACEMENT_Z)) { AtomicAdd(normDisp, b[j] * b[j]); } else { // Corresponding with contact AtomicAdd(normLM, b[j] * b[j]); } } } normDisp = std::sqrt(normDisp); normLM = std::sqrt(normLM); } /** * This method computes the parabola necessary for the line search * @param Xmax The maximal abscissa * @param Xmin The norm of the LM * @param rf The residual norm of the full step * @param ro The residual norm without step * @param rh The residual norm of the half step */ void ComputeParabola( double& Xmax, double& Xmin, const double rf, const double ro, const double rh ) { // Compute optimal (limited to the range 0-1) // Parabola is y = a*x^2 + b*x + c -> min/max for // x=0 --> r=ro // x=1/2 --> r=rh // x=1 --> r = // c= ro, b= 4*rh -rf -3*ro, a= 2*rf - 4*rh + 2*ro // max found if a>0 at the position Xmax = (rf/4 - rh)/(rf - 2*rh); const double parabole_a = 2 * rf + 2 * ro - 4 * rh; const double parabole_b = 4 * rh - rf - 3 * ro; if( parabole_a > 0.0) // If parabola has a local minima { Xmax = -0.5 * parabole_b/parabole_a; // -b / 2a if( Xmax > 1.0) Xmax = 1.0; else if(Xmax < -1.0) Xmax = -1.0; } else // Parabola degenerates to either a line or to have a local max. best solution on either extreme { if(rf < ro) Xmax = 1.0; else Xmax = Xmin; // Should be zero, but otherwise it will stagnate } } /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@{ /** * Copy constructor. */ LineSearchContactStrategy(const LineSearchContactStrategy& Other) { }; private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class LineSearchContactStrategy */ ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } // namespace Kratos #endif /* KRATOS_LINE_SEARCH_CONTACT_STRATEGY */
// KRATOS ______ __ __ _____ __ __ __ // / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ / // / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ / // / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / / // \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS // // License: BSD License // license: ContactStructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_LINE_SEARCH_CONTACT_STRATEGY) #define KRATOS_LINE_SEARCH_CONTACT_STRATEGY /* System Includes */ /* External Includes */ /* Project includes */ #include "includes/kratos_parameters.h" #include "includes/define.h" #include "includes/model_part.h" #include "includes/variables.h" #include "solving_strategies/strategies/solving_strategy.h" #include "solving_strategies/strategies/line_search_strategy.h" #include "utilities/openmp_utils.h" #include "utilities/variable_utils.h" #include "utilities/atomic_utilities.h" // Convergence criterias #include "solving_strategies/convergencecriterias/convergence_criteria.h" // Default builder and solver #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" // TODO: Extend the descriptions namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** \brief Short class definition. This class */ template<class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class LineSearchContactStrategy : public LineSearchStrategy< TSparseSpace, TDenseSpace, TLinearSolver > { public: typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType; /** Counted pointer of ClassName */ KRATOS_CLASS_POINTER_DEFINITION( LineSearchContactStrategy ); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType; typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> NRBaseType; typedef LineSearchStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef LineSearchContactStrategy<TSparseSpace, TDenseSpace, TLinearSolver> ClassType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef std::size_t IndexType; /** * @brief Default constructor */ explicit LineSearchContactStrategy() { } /** * @brief Default constructor. (with parameters) * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ explicit LineSearchContactStrategy(ModelPart& rModelPart, Parameters ThisParameters) : BaseType(rModelPart, BaseType::GetDefaultParameters()) { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } /** * Default constructor * @param rModelPart: The model part of the problem * @param pScheme: The integration scheme * @param pNewLinearSolver: The linear solver employed * @param pNewConvergenceCriteria: The convergence criteria employed * @param MaxIterationNumber: The maximum number of iterations * @param CalculateReactions: The flag for the reaction calculation * @param ReformDofSetAtEachStep: The flag that allows to compute the modification of the DOF * @param MoveMeshFlag: The flag that allows to move the mesh */ LineSearchContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag) { KRATOS_TRY; Parameters default_parameters = this->GetDefaultParameters(); ThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * Default constructor * @param rModelPart: The model part of the problem * @param pScheme: The integration scheme * @param pNewLinearSolver: The linear solver employed * @param pNewConvergenceCriteria: The convergence criteria employed * @param MaxIterationNumber: The maximum number of iterations * @param CalculateReactions: The flag for the reaction calculation * @param ReformDofSetAtEachStep: The flag that allows to compute the modification of the DOF * @param MoveMeshFlag: The flag that allows to move the mesh */ LineSearchContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ) { KRATOS_TRY; Parameters default_parameters = this->GetDefaultParameters(); ThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * Destructor. */ ~LineSearchContactStrategy() override = default; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Create method * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ typename StrategyBaseType::Pointer Create( ModelPart& rModelPart, Parameters ThisParameters ) const override { return Kratos::make_shared<ClassType>(rModelPart, ThisParameters); } /** * @brief This method returns the defaulr parameters in order to avoid code duplication * @return Returns the default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "line_search_contact_strategy" })" ); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "line_search_contact_strategy"; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "LineSearchContactStrategy"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ bool mRecalculateFactor; // To check if we recalculate or not the scale factor ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * Performs all the required operations that should be done (for each step) * before solving the solution step. * A member variable should be used as a flag to make sure this function is called only once per step. */ void InitializeSolutionStep() override { BaseType::InitializeSolutionStep(); // TODO: Add something if necessary } /** * Here the database is updated */ void UpdateDatabase( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b, const bool MoveMesh ) override { typename TSchemeType::Pointer pScheme = this->GetScheme(); typename TBuilderAndSolverType::Pointer pBuilderAndSolver = this->GetBuilderAndSolver(); // FIXME: Separate in the parts of LM and displacement TSystemVectorType aux(b.size()); //TODO: do it by using the space TSparseSpace::Assign(aux, 0.5, Dx); TSystemVectorType DxDisp(b.size()); TSystemVectorType DxLM(b.size()); ComputeSplitDx(Dx, DxDisp, DxLM); // Compute residual without update TSparseSpace::SetToZero(b); pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b ); double roDisp; double roLM; ComputeMixedResidual(b, roDisp, roLM); // Compute half step residual NRBaseType::UpdateDatabase(A,aux,b,MoveMesh); TSparseSpace::SetToZero(b); pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b ); double rhDisp; double rhLM; ComputeMixedResidual(b, rhDisp, rhLM); // Compute full step residual (add another half Dx to the previous half) NRBaseType::UpdateDatabase(A,aux,b,MoveMesh); TSparseSpace::SetToZero(b); pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b ); double rfDisp; double rfLM; ComputeMixedResidual(b, rfDisp, rfLM); // We compute the parabola double XminDisp = 1e-3; double XmaxDisp = 1.0; double XminLM = 1e-3; double XmaxLM = 1.0; ComputeParabola(XminDisp, XmaxDisp, rfDisp, roDisp, rhDisp); ComputeParabola(XminLM, XmaxLM, rfLM, roLM, rhLM); // Perform final update TSparseSpace::Assign(aux,-(1.0 - XmaxDisp), DxDisp); TSparseSpace::UnaliasedAdd(aux,-(1.0 - XmaxLM), DxLM); NRBaseType::UpdateDatabase(A,aux,b,MoveMesh); } /** * This method split the vector of increment of DoF in displacement and LM * @param Dx The increment of displacements and LM * @param DxDisp The increment of displacements * @param DxLM The increment of LM */ void ComputeSplitDx( TSystemVectorType& Dx, TSystemVectorType& DxDisp, TSystemVectorType& DxLM ) { // Now we iterate over all the nodes NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes(); const int num_nodes = static_cast<int>(nodes_array.size()); for(int i = 0; i < num_nodes; ++i) { auto it_node = nodes_array.begin() + i; for(auto itDoF = it_node->GetDofs().begin() ; itDoF != it_node->GetDofs().end() ; itDoF++) { const int j = (**itDoF).EquationId(); const std::size_t CurrVar = (**itDoF).GetVariable().Key(); if ((CurrVar == DISPLACEMENT_X) || (CurrVar == DISPLACEMENT_Y) || (CurrVar == DISPLACEMENT_Z)) { DxDisp[j] = Dx[j]; DxLM[j] = 0.0; } else // Corresponding with contact { DxDisp[j] = 0.0; DxLM[j] = Dx[j]; } } } } /** * This method calculates the norm considering one norm for the displacement and other norm for the LM * @param b The residual vector * @param normDisp normDisp: The norm of the displacement * @param normLM The norm of the LM */ void ComputeMixedResidual( TSystemVectorType& b, double& normDisp, double& normLM ) { // Now we iterate over all the nodes NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes(); const int num_nodes = static_cast<int>(nodes_array.size()); for(int i = 0; i < num_nodes; ++i) { auto it_node = nodes_array.begin() + i; for(auto itDoF = it_node->GetDofs().begin() ; itDoF != it_node->GetDofs().end() ; itDoF++) { const int j = (**itDoF).EquationId(); const std::size_t CurrVar = (**itDoF).GetVariable().Key(); if ((CurrVar == DISPLACEMENT_X) || (CurrVar == DISPLACEMENT_Y) || (CurrVar == DISPLACEMENT_Z)) { AtomicAdd(normDisp, b[j] * b[j]); } else { // Corresponding with contact AtomicAdd(normLM, b[j] * b[j]); } } } normDisp = std::sqrt(normDisp); normLM = std::sqrt(normLM); } /** * This method computes the parabola necessary for the line search * @param Xmax The maximal abscissa * @param Xmin The norm of the LM * @param rf The residual norm of the full step * @param ro The residual norm without step * @param rh The residual norm of the half step */ void ComputeParabola( double& Xmax, double& Xmin, const double rf, const double ro, const double rh ) { // Compute optimal (limited to the range 0-1) // Parabola is y = a*x^2 + b*x + c -> min/max for // x=0 --> r=ro // x=1/2 --> r=rh // x=1 --> r = // c= ro, b= 4*rh -rf -3*ro, a= 2*rf - 4*rh + 2*ro // max found if a>0 at the position Xmax = (rf/4 - rh)/(rf - 2*rh); const double parabole_a = 2 * rf + 2 * ro - 4 * rh; const double parabole_b = 4 * rh - rf - 3 * ro; if( parabole_a > 0.0) // If parabola has a local minima { Xmax = -0.5 * parabole_b/parabole_a; // -b / 2a if( Xmax > 1.0) Xmax = 1.0; else if(Xmax < -1.0) Xmax = -1.0; } else // Parabola degenerates to either a line or to have a local max. best solution on either extreme { if(rf < ro) Xmax = 1.0; else Xmax = Xmin; // Should be zero, but otherwise it will stagnate } } /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@{ /** * Copy constructor. */ LineSearchContactStrategy(const LineSearchContactStrategy& Other) { }; private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class LineSearchContactStrategy */ ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } // namespace Kratos #endif /* KRATOS_LINE_SEARCH_CONTACT_STRATEGY */
// KRATOS ______ __ __ _____ __ __ __ // / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ / // / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ / // / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / / // \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS // // License: BSD License // license: ContactStructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_LINE_SEARCH_CONTACT_STRATEGY) #define KRATOS_LINE_SEARCH_CONTACT_STRATEGY /* System Includes */ /* External Includes */ /* Project includes */ #include "includes/kratos_parameters.h" #include "includes/define.h" #include "includes/model_part.h" #include "includes/variables.h" #include "solving_strategies/strategies/solving_strategy.h" #include "solving_strategies/strategies/line_search_strategy.h" #include "utilities/openmp_utils.h" #include "utilities/variable_utils.h" #include "utilities/atomic_utilities.h" // Convergence criterias #include "solving_strategies/convergencecriterias/convergence_criteria.h" // Default builder and solver #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" // TODO: Extend the descriptions namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** \brief Short class definition. This class */ template<class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class LineSearchContactStrategy : public LineSearchStrategy< TSparseSpace, TDenseSpace, TLinearSolver > { public: typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType; /** Counted pointer of ClassName */ KRATOS_CLASS_POINTER_DEFINITION( LineSearchContactStrategy ); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType; typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> NRBaseType; typedef LineSearchStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef LineSearchContactStrategy<TSparseSpace, TDenseSpace, TLinearSolver> ClassType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef std::size_t IndexType; /** * @brief Default constructor */ explicit LineSearchContactStrategy() { } /** * @brief Default constructor. (with parameters) * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ explicit LineSearchContactStrategy(ModelPart& rModelPart, Parameters ThisParameters) : BaseType(rModelPart, BaseType::GetDefaultParameters()) { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } /** * Default constructor * @param rModelPart: The model part of the problem * @param pScheme: The integration scheme * @param pNewLinearSolver: The linear solver employed * @param pNewConvergenceCriteria: The convergence criteria employed * @param MaxIterationNumber: The maximum number of iterations * @param CalculateReactions: The flag for the reaction calculation * @param ReformDofSetAtEachStep: The flag that allows to compute the modification of the DOF * @param MoveMeshFlag: The flag that allows to move the mesh */ LineSearchContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag) { KRATOS_TRY; Parameters default_parameters = this->GetDefaultParameters(); ThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * Default constructor * @param rModelPart: The model part of the problem * @param pScheme: The integration scheme * @param pNewLinearSolver: The linear solver employed * @param pNewConvergenceCriteria: The convergence criteria employed * @param MaxIterationNumber: The maximum number of iterations * @param CalculateReactions: The flag for the reaction calculation * @param ReformDofSetAtEachStep: The flag that allows to compute the modification of the DOF * @param MoveMeshFlag: The flag that allows to move the mesh */ LineSearchContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ) { KRATOS_TRY; Parameters default_parameters = this->GetDefaultParameters(); ThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * Destructor. */ ~LineSearchContactStrategy() override = default; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Create method * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ typename StrategyBaseType::Pointer Create( ModelPart& rModelPart, Parameters ThisParameters ) const override { return Kratos::make_shared<ClassType>(rModelPart, ThisParameters); } /** * @brief This method returns the defaulr parameters in order to avoid code duplication * @return Returns the default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "line_search_contact_strategy" })" ); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "line_search_contact_strategy"; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "LineSearchContactStrategy"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ bool mRecalculateFactor; // To check if we recalculate or not the scale factor ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * Performs all the required operations that should be done (for each step) * before solving the solution step. * A member variable should be used as a flag to make sure this function is called only once per step. */ void InitializeSolutionStep() override { BaseType::InitializeSolutionStep(); // TODO: Add something if necessary } /** * Here the database is updated */ void UpdateDatabase( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b, const bool MoveMesh ) override { typename TSchemeType::Pointer pScheme = this->GetScheme(); typename TBuilderAndSolverType::Pointer pBuilderAndSolver = this->GetBuilderAndSolver(); // FIXME: Separate in the parts of LM and displacement TSystemVectorType aux(b.size()); //TODO: do it by using the space TSparseSpace::Assign(aux, 0.5, Dx); TSystemVectorType DxDisp(b.size()); TSystemVectorType DxLM(b.size()); ComputeSplitDx(Dx, DxDisp, DxLM); // Compute residual without update TSparseSpace::SetToZero(b); pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b ); double roDisp; double roLM; ComputeMixedResidual(b, roDisp, roLM); // Compute half step residual NRBaseType::UpdateDatabase(A,aux,b,MoveMesh); TSparseSpace::SetToZero(b); pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b ); double rhDisp; double rhLM; ComputeMixedResidual(b, rhDisp, rhLM); // Compute full step residual (add another half Dx to the previous half) NRBaseType::UpdateDatabase(A,aux,b,MoveMesh); TSparseSpace::SetToZero(b); pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b ); double rfDisp; double rfLM; ComputeMixedResidual(b, rfDisp, rfLM); // We compute the parabola double XminDisp = 1e-3; double XmaxDisp = 1.0; double XminLM = 1e-3; double XmaxLM = 1.0; ComputeParabola(XminDisp, XmaxDisp, rfDisp, roDisp, rhDisp); ComputeParabola(XminLM, XmaxLM, rfLM, roLM, rhLM); // Perform final update TSparseSpace::Assign(aux,-(1.0 - XmaxDisp), DxDisp); TSparseSpace::UnaliasedAdd(aux,-(1.0 - XmaxLM), DxLM); NRBaseType::UpdateDatabase(A,aux,b,MoveMesh); } /** * This method split the vector of increment of DoF in displacement and LM * @param Dx The increment of displacements and LM * @param DxDisp The increment of displacements * @param DxLM The increment of LM */ void ComputeSplitDx( TSystemVectorType& Dx, TSystemVectorType& DxDisp, TSystemVectorType& DxLM ) { // Now we iterate over all the nodes NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes(); const int num_nodes = static_cast<int>(nodes_array.size()); #pragma omp parallel for for(int i = 0; i < num_nodes; ++i) { auto it_node = nodes_array.begin() + i; for(auto itDoF = it_node->GetDofs().begin() ; itDoF != it_node->GetDofs().end() ; itDoF++) { const int j = (**itDoF).EquationId(); const std::size_t CurrVar = (**itDoF).GetVariable().Key(); if ((CurrVar == DISPLACEMENT_X) || (CurrVar == DISPLACEMENT_Y) || (CurrVar == DISPLACEMENT_Z)) { DxDisp[j] = Dx[j]; DxLM[j] = 0.0; } else // Corresponding with contact { DxDisp[j] = 0.0; DxLM[j] = Dx[j]; } } } } /** * This method calculates the norm considering one norm for the displacement and other norm for the LM * @param b The residual vector * @param normDisp normDisp: The norm of the displacement * @param normLM The norm of the LM */ void ComputeMixedResidual( TSystemVectorType& b, double& normDisp, double& normLM ) { // Now we iterate over all the nodes NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes(); const int num_nodes = static_cast<int>(nodes_array.size()); #pragma omp parallel for for(int i = 0; i < num_nodes; ++i) { auto it_node = nodes_array.begin() + i; for(auto itDoF = it_node->GetDofs().begin() ; itDoF != it_node->GetDofs().end() ; itDoF++) { const int j = (**itDoF).EquationId(); const std::size_t CurrVar = (**itDoF).GetVariable().Key(); if ((CurrVar == DISPLACEMENT_X) || (CurrVar == DISPLACEMENT_Y) || (CurrVar == DISPLACEMENT_Z)) { AtomicAdd(normDisp, b[j] * b[j]); } else { // Corresponding with contact AtomicAdd(normLM, b[j] * b[j]); } } } normDisp = std::sqrt(normDisp); normLM = std::sqrt(normLM); } /** * This method computes the parabola necessary for the line search * @param Xmax The maximal abscissa * @param Xmin The norm of the LM * @param rf The residual norm of the full step * @param ro The residual norm without step * @param rh The residual norm of the half step */ void ComputeParabola( double& Xmax, double& Xmin, const double rf, const double ro, const double rh ) { // Compute optimal (limited to the range 0-1) // Parabola is y = a*x^2 + b*x + c -> min/max for // x=0 --> r=ro // x=1/2 --> r=rh // x=1 --> r = // c= ro, b= 4*rh -rf -3*ro, a= 2*rf - 4*rh + 2*ro // max found if a>0 at the position Xmax = (rf/4 - rh)/(rf - 2*rh); const double parabole_a = 2 * rf + 2 * ro - 4 * rh; const double parabole_b = 4 * rh - rf - 3 * ro; if( parabole_a > 0.0) // If parabola has a local minima { Xmax = -0.5 * parabole_b/parabole_a; // -b / 2a if( Xmax > 1.0) Xmax = 1.0; else if(Xmax < -1.0) Xmax = -1.0; } else // Parabola degenerates to either a line or to have a local max. best solution on either extreme { if(rf < ro) Xmax = 1.0; else Xmax = Xmin; // Should be zero, but otherwise it will stagnate } } /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@{ /** * Copy constructor. */ LineSearchContactStrategy(const LineSearchContactStrategy& Other) { }; private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class LineSearchContactStrategy */ ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } // namespace Kratos #endif /* KRATOS_LINE_SEARCH_CONTACT_STRATEGY */
omp-axpy3.c
// // omp-axpy.c // // // Created by Yaying Shi on 10/2/19. // #include "omp-axpy.h" void axpy(int N, float *Y, float *X, float a) { int i,j; //#pragma omp target map(to:X[0:N]) map(tofrom:Y[0:N]) #pragma omp parallel for for (i = 0; i < N; ++i){ Y[i] += a * X[i]; printf("this a tset: %f %f\n",X[i],Y[i]); } } int main(int argc, char*argv[]){ int N = 100; float Y[N], X[N]; float x = 5.0; for (int i = 0; i <N; i++){ Y[i] = (((float)rand()/(float)(10)) * x); X[i] = (((float)rand()/(float)(10)) * x); printf("this is Y: %f\n",Y[i]); } float a = 0.5; axpy(N,&Y[0],&X[0],a); return 0; }
// //omp - axpy.c // // //Created by Yaying Shi on 10 / 2 / 19. // #include "omp-axpy.h" void axpy(int N, float *Y, float *X, float a) { int i, j; // for (i = 0; i < N; ++i) { Y[i] += a * X[i]; printf("this a tset: %f %f\n", X[i], Y[i]); } } int main(int argc, char *argv[]) { int N = 100; float Y[N], X[N]; float x = 5.0; for (int i = 0; i < N; i++) { Y[i] = (((float)rand() / (float)(10)) * x); X[i] = (((float)rand() / (float)(10)) * x); printf("this is Y: %f\n", Y[i]); } float a = 0.5; axpy(N, &Y[0], &X[0], a); return 0; }
// //omp - axpy.c // // //Created by Yaying Shi on 10 / 2 / 19. // #include "omp-axpy.h" void axpy(int N, float *Y, float *X, float a) { int i, j; // #pragma omp target map(to:X[0:N]) map(tofrom:Y[0:N]) #pragma omp parallel for for (i = 0; i < N; ++i) { Y[i] += a * X[i]; printf("this a tset: %f %f\n", X[i], Y[i]); } } int main(int argc, char *argv[]) { int N = 100; float Y[N], X[N]; float x = 5.0; for (int i = 0; i < N; i++) { Y[i] = (((float)rand() / (float)(10)) * x); X[i] = (((float)rand() / (float)(10)) * x); printf("this is Y: %f\n", Y[i]); } float a = 0.5; axpy(N, &Y[0], &X[0], a); return 0; }
GB_binop__isgt_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isgt_int32 // A.*B function (eWiseMult): GB_AemultB__isgt_int32 // A*D function (colscale): GB_AxD__isgt_int32 // D*A function (rowscale): GB_DxB__isgt_int32 // C+=B function (dense accum): GB_Cdense_accumB__isgt_int32 // C+=b function (dense accum): GB_Cdense_accumb__isgt_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_int32 // C=scalar+B GB_bind1st__isgt_int32 // C=scalar+B' GB_bind1st_tran__isgt_int32 // C=A+scalar GB_bind2nd__isgt_int32 // C=A'+scalar GB_bind2nd_tran__isgt_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_INT32 || GxB_NO_ISGT_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isgt_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isgt_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isgt_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isgt_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isgt_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isgt_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isgt_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isgt_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isgt_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__isgt_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__isgt_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isgt_int32 // A.*B function (eWiseMult): GB_AemultB__isgt_int32 // A*D function (colscale): GB_AxD__isgt_int32 // D*A function (rowscale): GB_DxB__isgt_int32 // C+=B function (dense accum): GB_Cdense_accumB__isgt_int32 // C+=b function (dense accum): GB_Cdense_accumb__isgt_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_int32 // C=scalar+B GB_bind1st__isgt_int32 // C=scalar+B' GB_bind1st_tran__isgt_int32 // C=A+scalar GB_bind2nd__isgt_int32 // C=A'+scalar GB_bind2nd_tran__isgt_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_INT32 || GxB_NO_ISGT_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isgt_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isgt_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isgt_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isgt_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isgt_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isgt_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isgt_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isgt_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { int32_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isgt_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__isgt_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__isgt_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isgt_int32 // A.*B function (eWiseMult): GB_AemultB__isgt_int32 // A*D function (colscale): GB_AxD__isgt_int32 // D*A function (rowscale): GB_DxB__isgt_int32 // C+=B function (dense accum): GB_Cdense_accumB__isgt_int32 // C+=b function (dense accum): GB_Cdense_accumb__isgt_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_int32 // C=scalar+B GB_bind1st__isgt_int32 // C=scalar+B' GB_bind1st_tran__isgt_int32 // C=A+scalar GB_bind2nd__isgt_int32 // C=A'+scalar GB_bind2nd_tran__isgt_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_INT32 || GxB_NO_ISGT_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isgt_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isgt_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isgt_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isgt_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isgt_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isgt_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isgt_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isgt_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isgt_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__isgt_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__isgt_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_subassign_zombie.c
//------------------------------------------------------------------------------ // GB_subassign_zombie: C(I,J)<!,repl> = empty ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 00: C(I,J)<!,repl> = empty ; using S // M: NULL // Mask_comp: true // C_replace: true // accum: any (present or not; result is the same) // A: any (scalar or matrix; result is the same) // S: constructed // C: not bitmap #include "GB_subassign_methods.h" #undef GB_FREE_ALL #define GB_FREE_ALL GB_Matrix_free (&S) ; GrB_Info GB_subassign_zombie ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ; //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GrB_Info info ; GrB_Matrix S = NULL ; GB_OK (GB_subassign_symbolic (&S, C, I, ni, J, nj, false, Context)) ; ASSERT (GB_JUMBLED_OK (S)) ; // S can be returned as jumbled //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Sx = (int64_t *) S->x ; int64_t *GB_RESTRICT Ci = C->i ; //-------------------------------------------------------------------------- // Method 00: C(I,J)<!,repl> = empty ; using S //-------------------------------------------------------------------------- // Time: Optimal, O(nnz(S)), assuming S has already been constructed. //-------------------------------------------------------------------------- // Parallel: all entries in S can be processed entirely in parallel. //-------------------------------------------------------------------------- // All entries in C(I,J) are deleted. The result does not depend on A or // the scalar. int64_t snz = GB_NNZ (S) ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (snz, chunk, nthreads_max) ; int64_t nzombies = C->nzombies ; int64_t pS ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:nzombies) for (pS = 0 ; pS < snz ; pS++) { // S (inew,jnew) is a pointer back into C (I(inew), J(jnew)) int64_t pC = Sx [pS] ; int64_t i = Ci [pC] ; // ----[X A 0] or [X . 0]----------------------------------------------- // action: ( X ): still a zombie // ----[C A 0] or [C . 0]----------------------------------------------- // action: C_repl: ( delete ): becomes a zombie if (!GB_IS_ZOMBIE (i)) { nzombies++ ; Ci [pC] = GB_FLIP (i) ; } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- C->nzombies = nzombies ; GB_FREE_ALL ; return (GrB_SUCCESS) ; }
//------------------------------------------------------------------------------ // GB_subassign_zombie: C(I,J)<!,repl> = empty ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 00: C(I,J)<!,repl> = empty ; using S // M: NULL // Mask_comp: true // C_replace: true // accum: any (present or not; result is the same) // A: any (scalar or matrix; result is the same) // S: constructed // C: not bitmap #include "GB_subassign_methods.h" #undef GB_FREE_ALL #define GB_FREE_ALL GB_Matrix_free (&S) ; GrB_Info GB_subassign_zombie ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ; //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GrB_Info info ; GrB_Matrix S = NULL ; GB_OK (GB_subassign_symbolic (&S, C, I, ni, J, nj, false, Context)) ; ASSERT (GB_JUMBLED_OK (S)) ; // S can be returned as jumbled //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Sx = (int64_t *) S->x ; int64_t *GB_RESTRICT Ci = C->i ; //-------------------------------------------------------------------------- // Method 00: C(I,J)<!,repl> = empty ; using S //-------------------------------------------------------------------------- // Time: Optimal, O(nnz(S)), assuming S has already been constructed. //-------------------------------------------------------------------------- // Parallel: all entries in S can be processed entirely in parallel. //-------------------------------------------------------------------------- // All entries in C(I,J) are deleted. The result does not depend on A or // the scalar. int64_t snz = GB_NNZ (S) ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (snz, chunk, nthreads_max) ; int64_t nzombies = C->nzombies ; int64_t pS ; reduction(+:nzombies) for (pS = 0 ; pS < snz ; pS++) { // S (inew,jnew) is a pointer back into C (I(inew), J(jnew)) int64_t pC = Sx [pS] ; int64_t i = Ci [pC] ; // ----[X A 0] or [X . 0]----------------------------------------------- // action: ( X ): still a zombie // ----[C A 0] or [C . 0]----------------------------------------------- // action: C_repl: ( delete ): becomes a zombie if (!GB_IS_ZOMBIE (i)) { nzombies++ ; Ci [pC] = GB_FLIP (i) ; } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- C->nzombies = nzombies ; GB_FREE_ALL ; return (GrB_SUCCESS) ; }
//------------------------------------------------------------------------------ // GB_subassign_zombie: C(I,J)<!,repl> = empty ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 00: C(I,J)<!,repl> = empty ; using S // M: NULL // Mask_comp: true // C_replace: true // accum: any (present or not; result is the same) // A: any (scalar or matrix; result is the same) // S: constructed // C: not bitmap #include "GB_subassign_methods.h" #undef GB_FREE_ALL #define GB_FREE_ALL GB_Matrix_free (&S) ; GrB_Info GB_subassign_zombie ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ; //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GrB_Info info ; GrB_Matrix S = NULL ; GB_OK (GB_subassign_symbolic (&S, C, I, ni, J, nj, false, Context)) ; ASSERT (GB_JUMBLED_OK (S)) ; // S can be returned as jumbled //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Sx = (int64_t *) S->x ; int64_t *GB_RESTRICT Ci = C->i ; //-------------------------------------------------------------------------- // Method 00: C(I,J)<!,repl> = empty ; using S //-------------------------------------------------------------------------- // Time: Optimal, O(nnz(S)), assuming S has already been constructed. //-------------------------------------------------------------------------- // Parallel: all entries in S can be processed entirely in parallel. //-------------------------------------------------------------------------- // All entries in C(I,J) are deleted. The result does not depend on A or // the scalar. int64_t snz = GB_NNZ (S) ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (snz, chunk, nthreads_max) ; int64_t nzombies = C->nzombies ; int64_t pS ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:nzombies) for (pS = 0 ; pS < snz ; pS++) { // S (inew,jnew) is a pointer back into C (I(inew), J(jnew)) int64_t pC = Sx [pS] ; int64_t i = Ci [pC] ; // ----[X A 0] or [X . 0]----------------------------------------------- // action: ( X ): still a zombie // ----[C A 0] or [C . 0]----------------------------------------------- // action: C_repl: ( delete ): becomes a zombie if (!GB_IS_ZOMBIE (i)) { nzombies++ ; Ci [pC] = GB_FLIP (i) ; } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- C->nzombies = nzombies ; GB_FREE_ALL ; return (GrB_SUCCESS) ; }
hello3.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int nthreads, tid; omp_set_num_threads(4); int k =0; /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel for private(nthreads, tid) shared (k) for(k=0;k<8;k++) { /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from thread = %d and %d\n", tid,k); } /* All threads join master thread and disband */ }
#include <omp.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char *argv[]) { int nthreads, tid; omp_set_num_threads(4); int k = 0; /* Fork a team of threads giving them their own copies of variables */ for (k = 0; k < 8; k++) { /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from thread = %d and %d\n", tid, k); } /* All threads join master thread and disband */ }
#include <omp.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char *argv[]) { int nthreads, tid; omp_set_num_threads(4); int k = 0; /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel for private(nthreads, tid) shared (k) for (k = 0; k < 8; k++) { /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from thread = %d and %d\n", tid, k); } /* All threads join master thread and disband */ }
nco_rgr.c
/* $Header$ */ /* Purpose: NCO regridding utilities */ /* Copyright (C) 2015--present Charlie Zender This file is part of NCO, the netCDF Operators. NCO is free software. You may redistribute and/or modify NCO under the terms of the 3-Clause BSD License with exceptions described in the LICENSE file */ #include "nco_rgr.h" /* Regridding */ extern double min_dbl(double a, double b); extern double max_dbl(double a, double b); inline double min_dbl(double a, double b){return (a < b) ? a : b;} inline double max_dbl(double a, double b){return (a > b) ? a : b;} int /* O [enm] Return code */ nco_rgr_ctl /* [fnc] Control regridding logic */ (rgr_sct * const rgr, /* I/O [sct] Regridding structure */ trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */ { /* Purpose: Control regridding logic */ int rcd=NCO_NOERR; const char fnc_nm[]="nco_rgr_ctl()"; nco_bool flg_grd=False; /* [flg] Create SCRIP-format grid file */ nco_bool flg_map=False; /* [flg] Create ESMF-format mapfile */ nco_bool flg_nfr=False; /* [flg] Infer SCRIP-format grid file */ nco_bool flg_smf=False; /* [flg] ESMF regridding (unused) */ nco_bool flg_s1d=False; /* [flg] Unpack sparse-1D CLM/ELM variables */ nco_bool flg_tps=False; /* [flg] Tempest regridding (unused) */ nco_bool flg_vrt=False; /* [flg] Interpolate to new vertical grid */ nco_bool flg_wgt=False; /* [flg] Regrid with external weights */ /* Main control branching occurs here Branching complexity and utility will increase as regridding features are added */ if(rgr->flg_grd) flg_grd=True; if(rgr->flg_grd_src && rgr->flg_grd_dst && rgr->flg_wgt) flg_map=True; if(rgr->flg_nfr) flg_nfr=True; if(rgr->flg_wgt && !(rgr->flg_grd_src && rgr->flg_grd_dst)) flg_wgt=True; if(rgr->flg_s1d) flg_s1d=True; if(rgr->fl_vrt) flg_vrt=True; assert(!flg_smf); assert(!flg_tps); /* Create SCRIP-format grid file */ if(flg_grd) rcd=nco_grd_mk(rgr); /* Create ESMF-format map file */ if(flg_map) rcd=nco_map_mk(rgr); /* Infer SCRIP-format grid file from data file */ if(flg_nfr) rcd=nco_grd_nfr(rgr); /* Interpolate data file to new vertical grid */ if(flg_vrt) rcd=nco_ntp_vrt(rgr,trv_tbl); /* Unpack sparse-1D CLM/ELM variables into full file */ if(flg_s1d) rcd=nco_s1d_unpack(rgr,trv_tbl); /* Regrid data horizontally using weights from mapping file */ if(flg_wgt) rcd=nco_rgr_wgt(rgr,trv_tbl); /* Regrid using ESMF library 20150701: On-line weight generation with ESMF never worked well and was abandoned */ if(flg_smf){ #ifdef ENABLE_ESMF (void)fprintf(stderr,"%s: %s calling nco_rgr_esmf() to generate and apply regridding map\n",nco_prg_nm_get(),fnc_nm); rcd=nco_rgr_esmf(rgr); /* Close output and free dynamic memory */ (void)nco_fl_out_cls(rgr->fl_out,rgr->fl_out_tmp,rgr->out_id); #else /* !ENABLE_ESMF */ (void)fprintf(stderr,"%s: ERROR %s reports attempt to use ESMF regridding without built-in support. Re-configure with --enable_esmf.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); #endif /* !ENABLE_ESMF */ } /* !flg_smf */ /* Regrid using TempestRemap regridding 20180314: Weight generation with Tempest is implemented off-line via ncremap, not internally on-line However, do not deprecate this since TempestRemap2 has a library that could be accessed on-line */ if(flg_tps) rcd=nco_rgr_tps(rgr); return rcd; } /* end nco_rgr_ctl() */ rgr_sct * /* O [sct] Pointer to free'd regridding structure */ nco_rgr_free /* [fnc] Deallocate regridding structure */ (rgr_sct *rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Free all dynamic memory in regridding structure */ /* free() standalone command-line arguments */ if(rgr->cmd_ln) rgr->cmd_ln=(char *)nco_free(rgr->cmd_ln); if(rgr->grd_ttl) rgr->grd_ttl=(char *)nco_free(rgr->grd_ttl); if(rgr->fl_grd_src) rgr->fl_grd_src=(char *)nco_free(rgr->fl_grd_src); if(rgr->fl_grd_dst) rgr->fl_grd_dst=(char *)nco_free(rgr->fl_grd_dst); if(rgr->fl_hrz) rgr->fl_hrz=(char *)nco_free(rgr->fl_hrz); if(rgr->fl_in) rgr->fl_in=(char *)nco_free(rgr->fl_in); if(rgr->fl_map) rgr->fl_map=(char *)nco_free(rgr->fl_map); if(rgr->fl_msh) rgr->fl_msh=(char *)nco_free(rgr->fl_msh); if(rgr->fl_out) rgr->fl_out=(char *)nco_free(rgr->fl_out); if(rgr->fl_out_tmp) rgr->fl_out_tmp=(char *)nco_free(rgr->fl_out_tmp); if(rgr->fl_vrt) rgr->fl_vrt=(char *)nco_free(rgr->fl_vrt); if(rgr->var_nm) rgr->var_nm=(char *)nco_free(rgr->var_nm); if(rgr->xtn_var) rgr->xtn_var=(char **)nco_sng_lst_free(rgr->xtn_var,rgr->xtn_nbr); /* free() strings associated with grid properties */ if(rgr->fl_grd) rgr->fl_grd=(char *)nco_free(rgr->fl_grd); if(rgr->fl_hnt_dst) rgr->fl_hnt_dst=(char *)nco_free(rgr->fl_hnt_dst); if(rgr->fl_hnt_src) rgr->fl_hnt_src=(char *)nco_free(rgr->fl_hnt_src); if(rgr->fl_skl) rgr->fl_skl=(char *)nco_free(rgr->fl_skl); if(rgr->fl_ugrid) rgr->fl_ugrid=(char *)nco_free(rgr->fl_ugrid); /* Tempest */ if(rgr->drc_tps) rgr->drc_tps=(char *)nco_free(rgr->drc_tps); /* free() memory used to construct KVMs */ if(rgr->rgr_nbr > 0) rgr->rgr_arg=nco_sng_lst_free(rgr->rgr_arg,rgr->rgr_nbr); /* free() memory copied from KVMs */ if(rgr->area_nm) rgr->area_nm=(char *)nco_free(rgr->area_nm); if(rgr->bnd_nm) rgr->bnd_nm=(char *)nco_free(rgr->bnd_nm); if(rgr->bnd_tm_nm) rgr->bnd_tm_nm=(char *)nco_free(rgr->bnd_tm_nm); if(rgr->col_nm_in) rgr->col_nm_in=(char *)nco_free(rgr->col_nm_in); if(rgr->col_nm_out) rgr->col_nm_out=(char *)nco_free(rgr->col_nm_out); if(rgr->frc_nm) rgr->frc_nm=(char *)nco_free(rgr->frc_nm); if(rgr->ilev_nm_in) rgr->ilev_nm_in=(char *)nco_free(rgr->ilev_nm_in); if(rgr->ilev_nm_out) rgr->ilev_nm_out=(char *)nco_free(rgr->ilev_nm_out); if(rgr->lat_bnd_nm) rgr->lat_bnd_nm=(char *)nco_free(rgr->lat_bnd_nm); if(rgr->lat_nm_in) rgr->lat_nm_in=(char *)nco_free(rgr->lat_nm_in); if(rgr->lat_nm_out) rgr->lat_nm_out=(char *)nco_free(rgr->lat_nm_out); if(rgr->lat_vrt_nm) rgr->lat_vrt_nm=(char *)nco_free(rgr->lat_vrt_nm); if(rgr->lat_wgt_nm) rgr->lat_wgt_nm=(char *)nco_free(rgr->lat_wgt_nm); if(rgr->lev_nm_in) rgr->lev_nm_in=(char *)nco_free(rgr->lev_nm_in); if(rgr->lev_nm_out) rgr->lev_nm_out=(char *)nco_free(rgr->lev_nm_out); if(rgr->lon_bnd_nm) rgr->lon_bnd_nm=(char *)nco_free(rgr->lon_bnd_nm); if(rgr->lon_nm_in) rgr->lon_nm_in=(char *)nco_free(rgr->lon_nm_in); if(rgr->lon_nm_out) rgr->lon_nm_out=(char *)nco_free(rgr->lon_nm_out); if(rgr->lon_vrt_nm) rgr->lon_vrt_nm=(char *)nco_free(rgr->lon_vrt_nm); if(rgr->msk_nm) rgr->msk_nm=(char *)nco_free(rgr->msk_nm); if(rgr->plev_nm_in) rgr->plev_nm_in=(char *)nco_free(rgr->plev_nm_in); if(rgr->vrt_nm) rgr->vrt_nm=(char *)nco_free(rgr->vrt_nm); /* Lastly, free() regrid structure itself */ if(rgr) rgr=(rgr_sct *)nco_free(rgr); return rgr; } /* end nco_rgr_free() */ rgr_sct * /* O [sct] Regridding structure */ nco_rgr_ini /* [fnc] Initialize regridding structure */ (const char * const cmd_ln, /* I [sng] Command-line */ const int in_id, /* I [id] Input netCDF file ID */ char **rgr_arg, /* [sng] Regridding arguments */ const int rgr_arg_nbr, /* [nbr] Number of regridding arguments */ char * const rgr_in, /* I [sng] File containing fields to be regridded */ char * const rgr_out, /* I [sng] File containing regridded fields */ char * const rgr_grd_src, /* I [sng] File containing input grid */ char * const rgr_grd_dst, /* I [sng] File containing destination grid */ char * const rgr_hrz, /* I [sng] File containing horizontal coordinate grid */ char * const rgr_map, /* I [sng] File containing mapping weights from source to destination grid */ char * const rgr_var, /* I [sng] Variable for special regridding treatment */ char * const rgr_vrt, /* I [sng] File containing vertical coordinate grid */ const double wgt_vld_thr, /* I [frc] Weight threshold for valid destination value */ char **xtn_var, /* [sng] I Extensive variables */ const int xtn_nbr) /* [nbr] I Number of extensive variables */ { /* Purpose: Initialize regridding structure */ const char fnc_nm[]="nco_rgr_ini()"; rgr_sct *rgr; /* Allocate */ rgr=(rgr_sct *)nco_malloc(sizeof(rgr_sct)); /* Initialize variables directly or indirectly set via command-line (except for key-value arguments) */ rgr->cmd_ln=strdup(cmd_ln); /* [sng] Command-line */ rgr->flg_usr_rqs=False; /* [flg] User requested regridding */ rgr->out_id=int_CEWI; /* [id] Output netCDF file ID */ rgr->in_id=in_id; /* [id] Input netCDF file ID */ rgr->rgr_arg=rgr_arg; /* [sng] Regridding arguments */ rgr->rgr_nbr=rgr_arg_nbr; /* [nbr] Number of regridding arguments */ rgr->drc_tps=NULL; /* [sng] Directory where Tempest grids, meshes, and weights are stored */ rgr->flg_grd_src= rgr_grd_src ? True : False; /* [flg] User-specified input grid */ rgr->fl_grd_src=rgr_grd_src; /* [sng] File containing input grid */ rgr->flg_grd_dst= rgr_grd_dst ? True : False; /* [flg] User-specified destination grid */ rgr->fl_grd_dst=rgr_grd_dst; /* [sng] File containing destination grid */ rgr->fl_in=rgr_in; /* [sng] File containing fields to be regridded */ rgr->fl_out=rgr_out; /* [sng] File containing regridded fields */ rgr->fl_out_tmp=NULL_CEWI; /* [sng] Temporary file containing regridded fields */ rgr->flg_wgt= rgr_map ? True : False; /* [flg] User-specified mapping weights */ rgr->fl_map=rgr_map; /* [sng] File containing mapping weights from source to destination grid */ rgr->fl_hrz=rgr_hrz; /* [sng] [sng] File containing horizontal coordinate grid (for S1D) */ rgr->fl_vrt=rgr_vrt; /* [sng] [sng] File containing vertical coordinate grid */ rgr->var_nm=rgr_var; /* [sng] Variable for special regridding treatment */ rgr->xtn_var=xtn_var; /* [sng] Extensive variables */ rgr->xtn_nbr=xtn_nbr; /* [nbr] Number of extensive variables */ /* Did user explicitly request regridding? */ if(rgr_arg_nbr > 0 || rgr_grd_src != NULL || rgr_grd_dst != NULL || rgr_map != NULL || rgr_vrt != NULL) rgr->flg_usr_rqs=True; /* Initialize arguments after copying */ if(!rgr->fl_out) rgr->fl_out=(char *)strdup("/data/zender/rgr/rgr_out.nc"); if(!rgr->fl_grd_dst) rgr->fl_grd_dst=(char *)strdup("/data/zender/scrip/grids/remap_grid_T42.nc"); // if(!rgr->var_nm) rgr->var_nm=(char *)strdup("ORO"); if(nco_dbg_lvl_get() >= nco_dbg_crr){ (void)fprintf(stderr,"%s: INFO %s reports ",nco_prg_nm_get(),fnc_nm); (void)fprintf(stderr,"flg_usr_rqs = %d, ",rgr->flg_usr_rqs); (void)fprintf(stderr,"rgr_nbr = %d, ",rgr->rgr_nbr); (void)fprintf(stderr,"fl_grd_src = %s, ",rgr->fl_grd_src ? rgr->fl_grd_src : "NULL"); (void)fprintf(stderr,"fl_grd_dst = %s, ",rgr->fl_grd_dst ? rgr->fl_grd_dst : "NULL"); (void)fprintf(stderr,"fl_hrz = %s, ",rgr->fl_hrz ? rgr->fl_hrz : "NULL"); (void)fprintf(stderr,"fl_in = %s, ",rgr->fl_in ? rgr->fl_in : "NULL"); (void)fprintf(stderr,"fl_out = %s, ",rgr->fl_out ? rgr->fl_out : "NULL"); (void)fprintf(stderr,"fl_out_tmp = %s, ",rgr->fl_out_tmp ? rgr->fl_out_tmp : "NULL"); (void)fprintf(stderr,"fl_map = %s, ",rgr->fl_map ? rgr->fl_map : "NULL"); (void)fprintf(stderr,"fl_vrt = %s, ",rgr->fl_vrt ? rgr->fl_vrt : "NULL"); (void)fprintf(stderr,"\n"); } /* endif dbg */ /* Flags */ if(wgt_vld_thr == NC_MIN_DOUBLE){ rgr->flg_rnr=False; }else if(wgt_vld_thr >= 0.0 && wgt_vld_thr <= 1.0){ /* NB: Weight thresholds of 0.0 or nearly zero can lead to underflow or divide-by-zero errors */ // const double wgt_vld_thr_min=1.0e-10; /* [frc] Minimum weight threshold for valid destination value */ rgr->flg_rnr=True; rgr->wgt_vld_thr=wgt_vld_thr; }else{ (void)fprintf(stderr,"%s: ERROR weight threshold must be in [0.0,1.0] and user supplied wgt_vld_thr = %g\n",nco_prg_nm_get(),wgt_vld_thr); nco_exit(EXIT_FAILURE); } /* endif */ /* Parse extended kvm options */ char *sng_fnl=NULL; int cnv_nbr; /* [nbr] Number of elements converted by sscanf() */ int rgr_var_idx; /* [idx] Index over rgr_lst (i.e., all names explicitly specified in all "--rgr var1[,var2]=val" options) */ int rgr_var_nbr=0; kvm_sct *rgr_lst=NULL; /* [sct] List of all regrid specifications */ if(rgr_arg_nbr > 0){ /* Join arguments together */ sng_fnl=nco_join_sng(rgr_arg,rgr_arg_nbr); rgr_lst=nco_arg_mlt_prs(sng_fnl); if(sng_fnl) sng_fnl=(char *)nco_free(sng_fnl); /* Count number of keys */ for(rgr_var_idx=0;(rgr_lst+rgr_var_idx)->key;rgr_var_idx++,rgr_var_nbr++);/* !rgr_var_idx */ } /* !rgr_arg_nbr */ /* NULL-initialize key-value properties required for string variables */ rgr->area_nm=NULL; /* [sng] Name of variable containing gridcell area */ rgr->bnd_nm=NULL; /* [sng] Name of dimension to employ for spatial bounds */ rgr->bnd_tm_nm=NULL; /* [sng] Name of dimension to employ for temporal bounds */ rgr->col_nm_in=NULL; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ rgr->col_nm_out=NULL; /* [sng] Name of horizontal spatial output dimension on unstructured grid */ rgr->frc_nm=NULL; /* [sng] Name of variable containing gridcell fraction */ rgr->ilev_nm_in=NULL; /* [sng] Name of input dimension to recognize as vertical dimension at layer interfaces */ rgr->ilev_nm_out=NULL; /* [sng] Name of output vertical dimension at layer interfaces */ rgr->lat_bnd_nm=NULL; /* [sng] Name of rectangular boundary variable for latitude */ rgr->lat_dmn_nm=NULL; /* [sng] Name of latitude dimension in inferred grid */ rgr->lat_nm_in=NULL; /* [sng] Name of input dimension to recognize as latitude */ rgr->lat_nm_out=NULL; /* [sng] Name of output dimension for latitude */ rgr->lat_vrt_nm=NULL; /* [sng] Name of non-rectangular boundary variable for latitude */ rgr->lat_wgt_nm=NULL; /* [sng] Name of variable containing latitude weights */ rgr->lev_nm_in=NULL; /* [sng] Name of input dimension to recognize as vertical dimension at layer midpoints */ rgr->lev_nm_out=NULL; /* [sng] Name of output vertical dimension at layer midpoints */ rgr->lon_bnd_nm=NULL; /* [sng] Name of rectangular boundary variable for longitude */ rgr->lon_dmn_nm=NULL; /* [sng] Name of longitude dimension in inferred grid */ rgr->lon_nm_in=NULL; /* [sng] Name of dimension to recognize as longitude */ rgr->lon_nm_out=NULL; /* [sng] Name of output dimension for longitude */ rgr->lon_vrt_nm=NULL; /* [sng] Name of non-rectangular boundary variable for longitude */ rgr->msk_nm=NULL; /* [sng] Name of variable containing destination mask */ rgr->plev_nm_in=NULL; /* [sng] Name of input variable recognize as pure-pressure coordinate */ rgr->sgs_frc_nm=NULL; /* [sng] Name of variable sub-gridscale fraction */ rgr->sgs_msk_nm=NULL; /* [sng] Name of variable sub-gridscale mask */ rgr->vrt_nm=NULL; /* [sng] Name of dimension to employ for vertices */ /* Initialize key-value properties used in grid and weight generation */ rgr->area_mth=1; /* [enm] Method to compute grid cell area */ rgr->edg_typ=nco_edg_nil; /* [enm] Edge/Arc-type for triangle edges */ rgr->fl_grd=NULL; /* [sng] Name of SCRIP grid file to create */ rgr->fl_hnt_dst=NULL; /* [sng] ERWG hint destination */ rgr->fl_hnt_src=NULL; /* [sng] ERWG hint source */ rgr->fl_msh=NULL; /* [sng] Name of SCRIP intersection mesh file to create */ rgr->fl_skl=NULL; /* [sng] Name of skeleton data file to create */ rgr->fl_ugrid=NULL; /* [sng] Name of UGRID grid file to create */ rgr->flg_add_fll=False; /* [flg] Add _FillValue to fields with empty destination cells */ rgr->flg_area_out=True; /* [flg] Add area to output */ rgr->flg_cf_units=False; /* [flg] Generate CF-compliant (breaks ERWG 7.1.0r-) units fields in SCRIP-format grid files */ rgr->flg_cll_msr=True; /* [flg] Add cell_measures attribute */ rgr->flg_crv=False; /* [flg] Use curvilinear coordinates */ rgr->flg_dgn_area=False; /* [flg] Diagnose rather than copy inferred area */ rgr->flg_dgn_bnd=False; /* [flg] Diagnose rather than copy inferred bounds */ rgr->flg_erwg_units=True; /* [flg] Generate ERWG 7.1.0r-compliant SCRIP-format grid files */ rgr->flg_grd=False; /* [flg] Create SCRIP-format grid file */ rgr->flg_msk_apl=False; /* [flg] Apply msk_out to variables after regridding */ rgr->flg_msk_out=False; /* [flg] Add mask to output */ rgr->flg_nfr=False; /* [flg] Infer SCRIP-format grid file */ rgr->flg_s1d=False; /* [flg] Unpack sparse-1D CLM/ELM variables */ rgr->flg_stg=True; /* [flg] Write staggered grid with FV output */ rgr->grd_ttl=strdup("None given (supply with --rgr grd_ttl=\"Grid Title\")"); /* [enm] Grid title */ rgr->grd_typ=nco_grd_2D_eqa; /* [enm] Grid type */ rgr->idx_dbg=0; /* [idx] Index of gridcell for debugging */ rgr->lat_drc=nco_grd_lat_drc_s2n; /* [enm] Latitude grid direction */ rgr->lat_typ=nco_grd_lat_eqa; /* [enm] Latitude grid type */ rgr->lon_typ=nco_grd_lon_Grn_ctr; /* [enm] Longitude grid type */ rgr->lat_nbr=180; /* [nbr] Number of latitudes in destination grid */ rgr->lon_nbr=360; /* [nbr] Number of longitudes in destination grid */ rgr->lat_crv=0.0; /* [dgr] Latitudinal curvilinearity */ rgr->lon_crv=0.0; /* [dgr] Longitudinal curvilinearity */ rgr->lat_sth=NC_MAX_DOUBLE; /* [dgr] Latitude of southern edge of grid */ rgr->lon_wst=NC_MAX_DOUBLE; /* [dgr] Longitude of western edge of grid */ rgr->lat_nrt=NC_MAX_DOUBLE; /* [dgr] Latitude of northern edge of grid */ rgr->lon_est=NC_MAX_DOUBLE; /* [dgr] Longitude of eastern edge of grid */ rgr->msk_var=NULL; /* [sng] Mask-template variable */ rgr->ply_tri_mth=nco_ply_tri_mth_csz; /* [enm] Polygon-to-triangle decomposition method */ rgr->sgs_nrm=1.0; /* [sng] Sub-gridscale normalization */ rgr->tst=0L; /* [enm] Generic key for testing (undocumented) */ rgr->ntp_mth=nco_ntp_log; /* [enm] Interpolation method */ rgr->xtr_mth=nco_xtr_fll_ngh; /* [enm] Extrapolation method */ rgr->xtr_nsp=8; /* [sng] Extrapolation number of source points */ rgr->xtr_xpn=2.0; /* [sng] Exponent of distance in extrapolation (absolute value) */ rgr->wgt_typ=nco_wgt_con; /* [enm] Weight generation method */ /* Parse key-value properties */ char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */ for(rgr_var_idx=0;rgr_var_idx<rgr_var_nbr;rgr_var_idx++){ if(!strcmp(rgr_lst[rgr_var_idx].key,"grid") || !strcasecmp(rgr_lst[rgr_var_idx].key,"scrip")){ rgr->fl_grd=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_grd=True; continue; } /* !grid */ if(!strcmp(rgr_lst[rgr_var_idx].key,"hnt_dst") || !strcmp(rgr_lst[rgr_var_idx].key,"fl_hnt_dst")){ rgr->fl_hnt_dst=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !hnt_dst */ if(!strcmp(rgr_lst[rgr_var_idx].key,"hnt_src") || !strcmp(rgr_lst[rgr_var_idx].key,"fl_hnt_src")){ rgr->fl_hnt_src=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !hnt_src */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_var") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_var") || !strcmp(rgr_lst[rgr_var_idx].key,"mask") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_variable")){ rgr->msk_var=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_msk_out=True; continue; } /* !msk_var */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msh") || !strcmp(rgr_lst[rgr_var_idx].key,"mesh")){ rgr->fl_msh=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !msh */ if(!strcmp(rgr_lst[rgr_var_idx].key,"skl")){ rgr->fl_skl=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_grd=True; continue; } /* !skl */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"ugrid")){ rgr->fl_ugrid=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_nfr=True; continue; } /* !ugrid */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"fl_hrz") || !strcasecmp(rgr_lst[rgr_var_idx].key,"hrz")){ rgr->fl_hrz=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !hrz */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"fl_vrt") || !strcasecmp(rgr_lst[rgr_var_idx].key,"vrt")){ rgr->fl_vrt=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !vrt */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_area") || !strcmp(rgr_lst[rgr_var_idx].key,"no_area_out")){ rgr->flg_area_out=False; continue; } /* !area */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_msk") || !strcmp(rgr_lst[rgr_var_idx].key,"no_msk_out") || !strcmp(rgr_lst[rgr_var_idx].key,"no_mask") || !strcmp(rgr_lst[rgr_var_idx].key,"no_mask_out")){ rgr->flg_msk_out=False; continue; } /* !msk */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_apl") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_apply")){ rgr->flg_msk_apl=True; /* Ensure masked fields regridded with TR maps have _FillValue to guarantee BFB arithmetic with masked fields regridded with other maps that adhere to SCRIP/ESMF mask rules */ rgr->flg_add_fll=True; continue; } /* !msk_apl */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_out") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_out")){ rgr->flg_msk_out=True; continue; } /* !mask */ if(!strcmp(rgr_lst[rgr_var_idx].key,"add_fll") || !strcmp(rgr_lst[rgr_var_idx].key,"add_fill_value") || !strcmp(rgr_lst[rgr_var_idx].key,"fll_mpt") || !strcmp(rgr_lst[rgr_var_idx].key,"fill_empty")){ rgr->flg_add_fll=True; continue; } /* !add_fll */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cell_measures") || !strcmp(rgr_lst[rgr_var_idx].key,"cll_msr")){ rgr->flg_cll_msr=True; continue; } /* !cell_measures */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_cell_measures") || !strcmp(rgr_lst[rgr_var_idx].key,"no_cll_msr")){ rgr->flg_cll_msr=False; continue; } /* !cell_measures */ if(!strcmp(rgr_lst[rgr_var_idx].key,"curvilinear") || !strcmp(rgr_lst[rgr_var_idx].key,"crv")){ rgr->flg_crv=True; continue; } /* !curvilinear */ if(!strcmp(rgr_lst[rgr_var_idx].key,"diagnose_area") || !strcmp(rgr_lst[rgr_var_idx].key,"dgn_area")){ rgr->flg_dgn_area=True; continue; } /* !diagnose_area */ if(!strcmp(rgr_lst[rgr_var_idx].key,"diagnose_bounds") || !strcmp(rgr_lst[rgr_var_idx].key,"dgn_bnd")){ rgr->flg_dgn_bnd=True; continue; } /* !diagnose_bounds */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cf_units") || !strcmp(rgr_lst[rgr_var_idx].key,"CF_units")){ rgr->flg_cf_units=True; rgr->flg_erwg_units=False; continue; } /* !erwg_units */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cell_area_quad")){ rgr->area_mth=2; continue; } /* !area_nco */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cell_area_nco")){ rgr->area_mth=1; continue; } /* !area_nco */ if(!strcmp(rgr_lst[rgr_var_idx].key,"edg_typ") || !strcmp(rgr_lst[rgr_var_idx].key,"tri_arc") || !strcmp(rgr_lst[rgr_var_idx].key,"vrt_cnc")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"grt_crc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"gtc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"great_circle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"geodesic") || !strcasecmp(rgr_lst[rgr_var_idx].val,"orthodrome")){ rgr->edg_typ=nco_edg_gtc; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"sml_crc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ltr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"small_circle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"latitude_triangle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"true")){ rgr->edg_typ=nco_edg_smc; (void)fprintf(stderr,"%s: WARNING Requested to run with small-circle edges. This option has not yet been tested and validated. Use only at your own risk.\n",nco_prg_nm_get()); }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"crt") || !strcasecmp(rgr_lst[rgr_var_idx].val,"cartesian") || !strcasecmp(rgr_lst[rgr_var_idx].val,"planar") || !strcasecmp(rgr_lst[rgr_var_idx].val,"flat")){ rgr->edg_typ=nco_edg_crt; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !edg_typ */ if(!strcmp(rgr_lst[rgr_var_idx].key,"erwg_units") || !strcmp(rgr_lst[rgr_var_idx].key,"esmf_units") || !strcmp(rgr_lst[rgr_var_idx].key,"degrees")){ rgr->flg_cf_units=False; rgr->flg_erwg_units=True; continue; } /* !erwg_units */ if(!strcmp(rgr_lst[rgr_var_idx].key,"infer") || !strcmp(rgr_lst[rgr_var_idx].key,"nfr")){ rgr->flg_nfr=True; continue; } /* !infer */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_stagger") || !strcmp(rgr_lst[rgr_var_idx].key,"no_stg")){ rgr->flg_stg=False; continue; } /* !stagger */ if(!strcmp(rgr_lst[rgr_var_idx].key,"grd_ttl") || !strcmp(rgr_lst[rgr_var_idx].key,"ttl")){ if(rgr->grd_ttl) rgr->grd_ttl=(char *)nco_free(rgr->grd_ttl); rgr->grd_ttl=(char *)strdup(rgr_lst[rgr_var_idx].val); /* 20180828 Replace unquoted tildes with spaces (like LaTeX, NCL) so ncremap users can put tildes in place of spaces in ttl 20180905 Reverted this since quoting command in ncremap is superior solution */ if(False){ size_t ttl_lng=strlen(rgr->grd_ttl); for(size_t ttl_idx=0L;ttl_idx<ttl_lng;ttl_idx++) if(rgr->grd_ttl[ttl_idx] == '~'){ if(ttl_idx == 0L) rgr->grd_ttl[ttl_idx]=' '; // Always convert tilde to space if first character else if(rgr->grd_ttl[ttl_idx-1L] != '\\') rgr->grd_ttl[ttl_idx]=' '; // Convert tilde in other locations unless backslash-quoted } /* !tilde */ } /* !0 */ continue; } /* !grd_ttl */ if(!strcmp(rgr_lst[rgr_var_idx].key,"idx_dbg")){ rgr->idx_dbg=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !idx_dbg */ if(!strcmp(rgr_lst[rgr_var_idx].key,"latlon")){ cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%ld,%ld",&rgr->lat_nbr,&rgr->lon_nbr); assert(cnv_nbr == 2); continue; } /* !latlon */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lonlat")){ cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%ld,%ld",&rgr->lon_nbr,&rgr->lat_nbr); assert(cnv_nbr == 2); continue; } /* !lonlat */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nbr")){ rgr->lat_nbr=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !lat_nbr */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nbr")){ rgr->lon_nbr=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !lon_nbr */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"snwe")){ cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%lf,%lf,%lf,%lf",&rgr->lat_sth,&rgr->lat_nrt,&rgr->lon_wst,&rgr->lon_est); if(cnv_nbr != 4) (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); assert(cnv_nbr == 4); if(cnv_nbr != 4) abort(); /* CEWI Use cnv_nbr at least once outside of assert() to avoid gcc 4.8.2 set-but-not-used warning */ continue; } /* !snwe */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"wesn")){ if(cnv_nbr != 4) cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%lf,%lf,%lf,%lf",&rgr->lon_wst,&rgr->lon_est,&rgr->lat_sth,&rgr->lat_nrt); assert(cnv_nbr == 4); continue; } /* !wesn */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_crv")){ rgr->lat_crv=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !lat_crv */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_crv")){ rgr->lon_crv=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !lon_crv */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_sth")){ rgr->lat_sth=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); // rgr->lat_typ=nco_grd_lat_bb; continue; } /* !lat_sth */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_wst")){ rgr->lon_wst=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); rgr->lon_typ=nco_grd_lon_bb; continue; } /* !lon_wst */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nrt")){ rgr->lat_nrt=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); //rgr->lat_typ=nco_grd_lat_bb; continue; } /* !lat_nrt */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_est")){ rgr->lon_est=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); rgr->lon_typ=nco_grd_lon_bb; continue; } /* !lon_est */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_drc")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"s2n") || !strcasecmp(rgr_lst[rgr_var_idx].val,"south2north") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ston") || !strcasecmp(rgr_lst[rgr_var_idx].val,"southnorth")){ rgr->lat_drc=nco_grd_lat_drc_s2n; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"n2s") || !strcasecmp(rgr_lst[rgr_var_idx].val,"north2south") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ntos") || !strcasecmp(rgr_lst[rgr_var_idx].val,"northsouth")){ rgr->lat_drc=nco_grd_lat_drc_n2s; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !lat_drc */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_typ")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"cap") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fv") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fix") || !strcasecmp(rgr_lst[rgr_var_idx].val,"yarmulke")){ rgr->lat_typ=nco_grd_lat_fv; rgr->grd_typ=nco_grd_2D_fv; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"eqa") || !strcasecmp(rgr_lst[rgr_var_idx].val,"rgl") || !strcasecmp(rgr_lst[rgr_var_idx].val,"unf") || !strcasecmp(rgr_lst[rgr_var_idx].val,"uni")){ rgr->lat_typ=nco_grd_lat_eqa; rgr->grd_typ=nco_grd_2D_eqa; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"gss")){ rgr->lat_typ=nco_grd_lat_gss; rgr->grd_typ=nco_grd_2D_gss; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !lat_typ */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_typ")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"180_wst") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wst_180")) rgr->lon_typ=nco_grd_lon_180_wst; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"180_ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ctr_180")) rgr->lon_typ=nco_grd_lon_180_ctr; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"Grn_wst") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wst_Grn")) rgr->lon_typ=nco_grd_lon_Grn_wst; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"Grn_ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ctr_Grn")) rgr->lon_typ=nco_grd_lon_Grn_ctr; else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !lon_typ */ if(!strcmp(rgr_lst[rgr_var_idx].key,"area_nm")){ rgr->area_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !area_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"bnd_nm")){ rgr->bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !bnd_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"bnd_tm_nm")){ rgr->bnd_tm_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !bnd_tm_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"col_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"col_nm")){ rgr->col_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !col_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"col_nm_out")){ rgr->col_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !col_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"frc_nm")){ rgr->frc_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !frc_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm")){ rgr->ilev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !ilev_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm_out")){ rgr->ilev_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !ilev_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_bnd_nm")){ rgr->lat_bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_bnd_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_dmn_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"lat_dmn")){ rgr->lat_dmn_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_dmn_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lat_nm")){ rgr->lat_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nm_out")){ rgr->lat_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_vrt_nm")){ rgr->lat_vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_vrt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_wgt_nm")){ rgr->lat_wgt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_wgt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lev_nm")){ rgr->lev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lev_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lev_nm_out")){ rgr->lev_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lev_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_bnd_nm")){ rgr->lon_bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_bnd_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_dmn_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"lon_dmn")){ rgr->lon_dmn_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_dmn_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lon_nm")){ rgr->lon_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nm_out")){ rgr->lon_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_vrt_nm")){ rgr->lon_vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_vrt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"plev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"plev_nm")){ rgr->plev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !plev_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"ply_tri")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"csz")){ rgr->ply_tri_mth=nco_ply_tri_mth_csz; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"centroid") || !strcasecmp(rgr_lst[rgr_var_idx].val,"snl") || !strcasecmp(rgr_lst[rgr_var_idx].val,"mat")){ rgr->ply_tri_mth=nco_ply_tri_mth_ctr; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !ply_tri */ if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_frc_nm")){ rgr->sgs_frc_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !sgs_frc */ if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_msk_nm")){ rgr->sgs_msk_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !sgs_msk */ if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_nrm")){ rgr->sgs_nrm=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !sgs_nrm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"tst")){ rgr->tst=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !tst */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_nm")){ rgr->msk_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_msk_out=True; continue; } /* !msk_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"vrt_nm")){ rgr->vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !vrt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"vrt_ntp") || !strcmp(rgr_lst[rgr_var_idx].key,"ntp_mth")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"lin") || !strcasecmp(rgr_lst[rgr_var_idx].val,"linear") || !strcasecmp(rgr_lst[rgr_var_idx].val,"lnr")){ rgr->ntp_mth=nco_ntp_lnr; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"log") || !strcasecmp(rgr_lst[rgr_var_idx].val,"logarithmic") || !strcasecmp(rgr_lst[rgr_var_idx].val,"lgr")){ rgr->ntp_mth=nco_ntp_log; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !ntp_mth */ if(!strcmp(rgr_lst[rgr_var_idx].key,"xtr_mth")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"nrs_ngh") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ngh") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nearest_neighbor") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nn")){ rgr->xtr_mth=nco_xtr_fll_ngh; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"mss_val") || !strcasecmp(rgr_lst[rgr_var_idx].val,"msv") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fll_val") || !strcasecmp(rgr_lst[rgr_var_idx].val,"missing_value")){ rgr->xtr_mth=nco_xtr_fll_msv; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !xtr_mth */ if(!strcmp(rgr_lst[rgr_var_idx].key,"xtr_nsp") || !strcmp(rgr_lst[rgr_var_idx].key,"xtr_nbr_src_pnt") || !strcmp(rgr_lst[rgr_var_idx].key,"number_source_points") || !strcmp(rgr_lst[rgr_var_idx].key,"extrapolation_number_source_points")){ rgr->xtr_nsp=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !xtr_nsp */ if(!strcmp(rgr_lst[rgr_var_idx].key,"xtr_xpn") || !strcmp(rgr_lst[rgr_var_idx].key,"extrapolation_exponent") || !strcmp(rgr_lst[rgr_var_idx].key,"exponent_of_distance_in_extrapolation")){ rgr->xtr_xpn=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !xtr_xpn */ if(!strcmp(rgr_lst[rgr_var_idx].key,"wgt_typ") || !strcmp(rgr_lst[rgr_var_idx].key,"weight_type")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"con") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nco_con") || !strcasecmp(rgr_lst[rgr_var_idx].val,"conservative") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wgt_con")) rgr->wgt_typ=nco_wgt_con; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"idw") || !strcasecmp(rgr_lst[rgr_var_idx].val,"dwe") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nco_idw") || !strcasecmp(rgr_lst[rgr_var_idx].val,"distance_weighted") || !strcasecmp(rgr_lst[rgr_var_idx].val,"inverse_distance") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wgt_idw")) rgr->wgt_typ=nco_wgt_idw; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"bln") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nco_bln") || !strcasecmp(rgr_lst[rgr_var_idx].val,"bilinear") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wgt_bln")) rgr->wgt_typ=nco_wgt_bln; else { (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !wgt_typ */ (void)fprintf(stderr,"%s: ERROR %s reports unrecognized key-value option to --rgr switch: %s\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key); nco_exit(EXIT_FAILURE); } /* !rgr_var_idx */ /* Eliminate sticky wickets: Give nfr precedence over grd */ if(rgr->flg_nfr && rgr->flg_grd) rgr->flg_grd=False; /* Revert to defaults for any names not specified on command-line */ if(!rgr->area_nm) rgr->area_nm=(char *)strdup("area"); /* [sng] Name of variable containing gridcell area */ if(!rgr->bnd_nm) rgr->bnd_nm=(char *)strdup("nvertices"); /* [sng] Name of dimension to employ for spatial bounds */ /* NB: CESM uses nbnd and ilev for temporal and vertical bounds, respectively (CESM outputs no horizontal spatial bounds). NCO defaults to nbnd for all bounds with two endpoints. */ if(!rgr->bnd_tm_nm) rgr->bnd_tm_nm=(char *)strdup("nbnd"); /* [sng] Name of dimension to employ for temporal bounds */ if(!rgr->col_nm_in) rgr->col_nm_in=(char *)strdup("ncol"); /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ if(!rgr->frc_nm) rgr->frc_nm=(char *)strdup("frac_b"); /* [sng] Name of variable containing gridcell fraction */ if(!rgr->ilev_nm_in) rgr->ilev_nm_in=(char *)strdup("ilev"); /* [sng] Name of input dimension to recognize as vertical dimension at layer interfaces */ if(!rgr->lat_bnd_nm) rgr->lat_bnd_nm=(char *)strdup("lat_bnds"); /* [sng] Name of rectangular boundary variable for latitude */ if(!rgr->lat_nm_in) rgr->lat_nm_in=(char *)strdup("lat"); /* [sng] Name of input dimension to recognize as latitude */ if(!rgr->lev_nm_in) rgr->lev_nm_in=(char *)strdup("lev"); /* [sng] Name of input dimension to recognize as vertical dimension at layer midpoints */ if(!rgr->lat_vrt_nm) rgr->lat_vrt_nm=(char *)strdup("lat_vertices"); /* [sng] Name of non-rectangular boundary variable for latitude */ if(!rgr->lat_wgt_nm) rgr->lat_wgt_nm=(char *)strdup("gw"); /* [sng] Name of variable containing latitude weights */ if(!rgr->lon_bnd_nm) rgr->lon_bnd_nm=(char *)strdup("lon_bnds"); /* [sng] Name of rectangular boundary variable for longitude */ if(!rgr->lon_nm_in) rgr->lon_nm_in=(char *)strdup("lon"); /* [sng] Name of dimension to recognize as longitude */ if(!rgr->lon_vrt_nm) rgr->lon_vrt_nm=(char *)strdup("lon_vertices"); /* [sng] Name of non-rectangular boundary variable for longitude */ if(!rgr->msk_nm) rgr->msk_nm=(char *)strdup("mask_b"); /* [sng] Name of variable containing destination mask */ if(!rgr->vrt_nm) rgr->vrt_nm=(char *)strdup("nv"); /* [sng] Name of dimension to employ for vertices */ if(!rgr->plev_nm_in) rgr->plev_nm_in=(char *)strdup("plev"); /* [sng] Name of variable to recognize as pure pressure coordinate */ /* Derived from defaults and command-line arguments */ // On second thought, do not strdup() these here. This way, NULL means user never specified lon/lat-out names // if(!rgr->col_nm_out) rgr->col_nm_out=(char *)strdup("ncol"); /* [sng] Name of dimension to output as horizontal spatial dimension on unstructured grid */ // if(!rgr->lat_nm_out) rgr->lat_nm_out=(char *)strdup("lat"); /* [sng] Name of dimension to output as latitude */ // if(!rgr->lon_nm_out) rgr->lon_nm_out=(char *)strdup("lon"); /* [sng] Name of dimension to output as longitude */ // if(!rgr->lat_nm_out) rgr->lat_nm_out=(char *)strdup(rgr_lat_nm_in); /* [sng] Name of output dimension for latitude */ // if(!rgr->lon_nm_out) rgr->lon_nm_out=(char *)strdup(rgr_lon_nm_in); /* [sng] Name of output dimension for longitude */ /* Free kvms */ if(rgr_lst) rgr_lst=nco_kvm_lst_free(rgr_lst,rgr_var_nbr); return rgr; } /* end nco_rgr_ini() */ int /* O [enm] Return code */ nco_ntp_vrt /* [fnc] Interpolate vertically */ (rgr_sct * const rgr, /* I/O [sct] Regridding structure */ trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */ { /* Purpose: Interpolate fields to new vertical grid specified in a vertical file */ const char fnc_nm[]="nco_ntp_vrt()"; /* [sng] Function name */ char *fl_tpl; /* [sng] Template file (vertical grid file) */ char *fl_pth_lcl=NULL; int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int in_id; /* I [id] Input netCDF file ID */ int tpl_id; /* [id] Input netCDF file ID (for vertical grid template) */ int md_open; /* [enm] Mode flag for nc_open() call */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int dmn_idx; /* [idx] Dimension index */ int rec_idx; /* [idx] Record dimension index */ nco_bool FL_RTR_RMT_LCN; nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s obtaining vertical grid from %s\n",nco_prg_nm_get(),fnc_nm,rgr->fl_vrt); /* Duplicate (because nco_fl_mk_lcl() free()'s its fl_in) */ fl_tpl=(char *)strdup(rgr->fl_vrt); /* Make sure file is on local system and is readable or die trying */ fl_tpl=nco_fl_mk_lcl(fl_tpl,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_tpl,md_open,&bfr_sz_hnt,&tpl_id); /* Formula-terms for hybrid pressure vertical grid on unstructured CAM/EAM horizontal grid: prs_mdp[time,lev,col]=P0*hyam[lev] +PS[time,col]*hybm[lev] prs_ntf[time,lev,col]=P0*hyai[ilev]+PS[time,col]*hybi[ilev] */ /* Formula-terms for hybrid pressure vertical grid on ECMWF RLL horizontal grid: prs_mdp[time,lev,lat,lon]=hyam[lev] +exp(lnsp[time,lat,lon])*hybm[lev] prs_ntf[time,lev,lat,lon]=hyai[ilev]+exp(lnsp[time,lat,lon])*hybi[ilev] */ /* For simplicity and code re-use, all single-variable (not hybrid-variable) coordinate systems adopt "lev" semantics This includes pure pressure coordinates and eventually will include sigma, depth, and height coordinates Only hybrid coordinates will refer to the "ilev" levels and indices All single coordinate systems will refer to "lev" levels and indices */ int dpt_id; /* [id] Ocean depth ID */ int hyai_id=NC_MIN_INT; /* [id] Hybrid A coefficient at layer interfaces ID */ int hyam_id=NC_MIN_INT; /* [id] Hybrid A coefficient at layer midpoints ID */ int hybi_id=NC_MIN_INT; /* [id] Hybrid B coefficient at layer interfaces ID */ int hybm_id=NC_MIN_INT; /* [id] Hybrid B coefficient at layer midpoints ID */ int ilev_id=NC_MIN_INT; /* [id] Interface pressure ID */ int lev_id=NC_MIN_INT; /* [id] Midpoint pressure ID */ int p0_id=NC_MIN_INT; /* [id] Reference pressure ID */ int ps_id=NC_MIN_INT; /* [id] Surface pressure ID */ int plev_id; /* [id] Air pressure ID */ nco_bool flg_grd_hyb_cameam=False; /* [flg] Hybrid coordinate vertical grid uses CAM/EAM conventions */ nco_bool flg_grd_hyb_ecmwf=False; /* [flg] Hybrid coordinate vertical grid uses ECMWF conventions */ nco_bool flg_grd_in_dpt=False; /* [flg] Input depth coordinate vertical grid */ nco_bool flg_grd_in_hyb=False; /* [flg] Input hybrid coordinate vertical grid */ nco_bool flg_grd_in_prs=False; /* [flg] Input pressure coordinate vertical grid */ nco_bool flg_grd_out_dpt=False; /* [flg] Output depth coordinate vertical grid */ nco_bool flg_grd_out_hyb=False; /* [flg] Output hybrid coordinate vertical grid */ nco_bool flg_grd_out_prs=False; /* [flg] Output pressure coordinate vertical grid */ nco_bool flg_vrt_tm=False; /* [flg] Output depends on time-varying vertical grid */ nco_grd_vrt_typ_enm nco_vrt_grd_in=nco_vrt_grd_nil; /* [enm] Vertical grid type for input grid */ nco_grd_vrt_typ_enm nco_vrt_grd_out=nco_vrt_grd_nil; /* [enm] Vertical grid type for output grid */ nco_ntp_typ_enm ntp_mth=rgr->ntp_mth; /* [enm] Interpolation method */ nco_xtr_typ_enm xtr_mth=rgr->xtr_mth; /* [enm] Extrapolation method */ /* Determine output grid type */ if((rcd=nco_inq_varid_flg(tpl_id,"hyai",&hyai_id)) == NC_NOERR){ nco_vrt_grd_out=nco_vrt_grd_hyb; /* EAM */ flg_grd_out_hyb=True; }else if((rcd=nco_inq_varid_flg(tpl_id,"plev",&plev_id)) == NC_NOERR){ nco_vrt_grd_out=nco_vrt_grd_prs; /* NCEP */ flg_grd_out_prs=True; }else if((rcd=nco_inq_varid_flg(tpl_id,"depth",&dpt_id)) == NC_NOERR){ nco_vrt_grd_out=nco_vrt_grd_dpt; /* MPAS */ flg_grd_out_dpt=True; }else{ /* !hyai */ (void)fprintf(stdout,"%s: ERROR %s Unable to locate hybrid-sigma/pressure or pure-pressure vertical grid coordinate information in vertical grid file\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT ensure vertical grid coordinate file contains a valid vertical grid coordinate\n",nco_prg_nm_get()); return NCO_ERR; } /* !hyai */ if(flg_grd_out_hyb){ rcd=nco_inq_varid(tpl_id,"hyai",&hyai_id); rcd=nco_inq_varid(tpl_id,"hyam",&hyam_id); rcd=nco_inq_varid(tpl_id,"hybi",&hybi_id); rcd=nco_inq_varid(tpl_id,"hybm",&hybm_id); rcd=nco_inq_varid(tpl_id,"P0",&p0_id); rcd=nco_inq_varid_flg(tpl_id,"ilev",&ilev_id); rcd=nco_inq_varid_flg(tpl_id,"lev",&lev_id); rcd=nco_inq_varid_flg(tpl_id,"PS",&ps_id); } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ rcd=nco_inq_varid(tpl_id,"plev",&lev_id); } /* !flg_grd_out_prs */ if(flg_grd_out_dpt){ rcd=nco_inq_varid(tpl_id,"depth",&lev_id); } /* !flg_grd_out_dpt */ const int hyai_id_tpl=hyai_id; /* [id] Hybrid A coefficient at layer interfaces ID */ const int hyam_id_tpl=hyam_id; /* [id] Hybrid A coefficient at layer midpoints ID */ const int hybi_id_tpl=hybi_id; /* [id] Hybrid B coefficient at layer interfaces ID */ const int hybm_id_tpl=hybm_id; /* [id] Hybrid B coefficient at layer midpoints ID */ const int p0_id_tpl=p0_id; /* [id] Reference pressure ID */ const int ilev_id_tpl=ilev_id; /* [id] Interface pressure ID */ const int lev_id_tpl=lev_id; /* [id] Midpoint pressure ID */ const int ps_id_tpl=ps_id; /* [id] Surface pressure ID */ char *ilev_nm_in=NULL; /* [sng] Interface level name */ char *lev_nm_in; char *ilev_nm_out; char *lev_nm_out; char *plev_nm_in; /* [sng] Pure-pressure coordnate name */ char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */ int *dmn_ids_in=NULL; /* [nbr] Input file dimension IDs */ int *dmn_ids_out=NULL; /* [nbr] Output file dimension IDs */ int *dmn_ids_rec=NULL; /* [id] Unlimited dimension IDs */ int dmn_nbr_ps; /* [nbr] Number of dimensions in PS variable */ int dmn_nbr_in; /* [nbr] Number of dimensions in input file */ int dmn_nbr_out; /* [nbr] Number of dimensions in output file */ int dmn_id_ilev_out=NC_MIN_INT; /* [id] Dimension ID for interface level in output file */ int dmn_id_lev_out=NC_MIN_INT; /* [id] Dimension ID for midpoint level in output file */ int dmn_id_ilev_in=NC_MIN_INT; /* [id] Dimension ID for interface level in file to be interpolated */ int dmn_id_lev_in=NC_MIN_INT; /* [id] Dimension ID for midpoint level in file to be interpolated */ int dmn_id_tm_in=NC_MIN_INT; /* [id] Dimension ID for time in file to be interpolated */ int dmn_nbr_rec; /* [nbr] Number of unlimited dimensions */ int dmn_idx_tm_in=NC_MIN_INT; /* [idx] Index of record coordinate in input hybrid coordinate PS field */ long *dmn_cnt_in=NULL; long *dmn_cnt_out=NULL; long *dmn_srt=NULL; long ilev_nbr_in; long lev_nbr_in; long ilev_nbr_out; long lev_nbr_out; long tm_idx=0L; /* [idx] Current timestep */ long tm_nbr=1L; /* [idx] Number of timesteps in vertical grid */ long tm_nbr_in=1L; /* [nbr] Number of timesteps in input vertical grid definition */ long tm_nbr_out=1L; /* [nbr] Number of timesetps in output vertical grid definition */ size_t grd_idx; /* [idx] Gridcell index */ size_t grd_sz_in=1L; /* [nbr] Number of elements in single layer of input grid */ size_t grd_sz_out=1L; /* [nbr] Number of elements in single layer of output grid */ size_t idx_fst; /* [idx] Index-offset to current surface pressure timeslice */ if(flg_grd_out_hyb){ /* Interrogate hyai/hyam to obtain ilev/lev dimensions */ rcd=nco_inq_vardimid(tpl_id,hyai_id,&dmn_id_ilev_out); rcd=nco_inq_vardimid(tpl_id,hyam_id,&dmn_id_lev_out); rcd=nco_inq_dimlen(tpl_id,dmn_id_ilev_out,&ilev_nbr_out); rcd=nco_inq_dimlen(tpl_id,dmn_id_lev_out,&lev_nbr_out); rcd=nco_inq_dimname(tpl_id,dmn_id_ilev_out,dmn_nm); ilev_nm_out=strdup(dmn_nm); rcd=nco_inq_dimname(tpl_id,dmn_id_lev_out,dmn_nm); lev_nm_out=strdup(dmn_nm); /* Interrogate PS, if any, for horizontal dimensions */ if(ps_id_tpl != NC_MIN_INT){ rcd=nco_inq_varndims(tpl_id,ps_id,&dmn_nbr_ps); dmn_nbr_out=dmn_nbr_ps; dmn_ids_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_cnt_out=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long)); dmn_srt=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long)); rcd=nco_inq_vardimid(tpl_id,ps_id,dmn_ids_out); rcd=nco_inq_unlimdims(tpl_id,&dmn_nbr_rec,(int *)NULL); if(dmn_nbr_rec > 0){ dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int)); rcd=nco_inq_unlimdims(tpl_id,&dmn_nbr_rec,dmn_ids_rec); } /* !dmn_nbr_rec */ for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ rcd=nco_inq_dimlen(tpl_id,dmn_ids_out[dmn_idx],dmn_cnt_out+dmn_idx); /* 20190330: Allow possibility that PS has time dimension > 1 We want horizontal not temporal dimensions to contribute to grd_sz Temporal dimension is usually unlimited Only multiply grd_sz by fixed (non-unlimited) dimension sizes Corner-case exception when PS spatial dimension on unstructured grid is unlimited */ for(rec_idx=0;rec_idx<dmn_nbr_rec;rec_idx++) if(dmn_ids_out[dmn_idx] == dmn_ids_rec[rec_idx]) break; if(rec_idx == dmn_nbr_rec || dmn_nbr_out == 1) grd_sz_out*=dmn_cnt_out[dmn_idx]; if(rec_idx != dmn_nbr_rec && dmn_nbr_out > 1 && dmn_cnt_out[dmn_idx] > 1L){ tm_nbr_out=dmn_cnt_out[dmn_idx]; if(tm_nbr_out > 1L) flg_vrt_tm=True; } /* tm_nbr_out > 1 */ dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec); } /* !ps_id_tpl */ } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ /* Interrogate plev to obtain plev dimensions */ rcd=nco_inq_vardimid(tpl_id,lev_id,&dmn_id_lev_out); rcd=nco_inq_dimlen(tpl_id,dmn_id_lev_out,&lev_nbr_out); rcd=nco_inq_dimname(tpl_id,dmn_id_lev_out,dmn_nm); ilev_nbr_out=lev_nbr_out; } /* !flg_grd_out_prs */ double *hyai_out=NULL; /* [frc] Hybrid A coefficient at layer interfaces on output grid */ double *hyam_out=NULL; /* [frc] Hybrid A coefficient at layer midpoints on output grid */ double *hybi_out=NULL; /* [frc] Hybrid B coefficient at layer interfaces on output grid */ double *hybm_out=NULL; /* [frc] Hybrid B coefficient at layer midpoints on output grid */ double *ilev_out=NULL; /* [hPa] Interface pressure on output grid */ double *lev_out=NULL; /* [hPa] Midpoint pressure on output grid */ double *ps_out=NULL; /* [Pa] Surface pressure on output grid */ double *prs_mdp_out=NULL; /* [Pa] Midpoint pressure on output grid */ double *prs_ntf_out=NULL; /* [Pa] Interface pressure on output grid */ double p0_out; /* [Pa] Reference pressure on output grid */ long ilev_idx; /* [idx] Interface level index */ long lev_idx; /* [idx] Level index */ const nc_type crd_typ_out=NC_DOUBLE; nc_type var_typ_rgr; /* [enm] Variable type used during regridding */ var_typ_rgr=NC_DOUBLE; /* NB: Perform interpolation in double precision */ if(flg_grd_out_hyb){ hyai_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr)); hyam_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); hybi_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr)); hybm_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); ilev_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr)); lev_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(tpl_id,hyai_id,hyai_out,crd_typ_out); rcd=nco_get_var(tpl_id,hyam_id,hyam_out,crd_typ_out); rcd=nco_get_var(tpl_id,hybi_id,hybi_out,crd_typ_out); rcd=nco_get_var(tpl_id,hybm_id,hybm_out,crd_typ_out); rcd=nco_get_var(tpl_id,p0_id,&p0_out,crd_typ_out); if(ilev_id_tpl != NC_MIN_INT){ rcd=nco_get_var(tpl_id,ilev_id,ilev_out,crd_typ_out); }else{ /* p0 is in Pa but ilev traditionally given in hPa */ for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++) ilev_out[ilev_idx]=p0_out*(hyai_out[ilev_idx]+hybi_out[ilev_idx])/100.0; } /* !ilev_id_tpl */ if(lev_id_tpl != NC_MIN_INT){ rcd=nco_get_var(tpl_id,lev_id,lev_out,crd_typ_out); }else{ /* p0 is in Pa but lev traditionally given in hPa */ for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++) lev_out[lev_idx]=p0_out*(hyam_out[lev_idx]+hybm_out[lev_idx])/100.0; } /* !ilev_id_tpl */ } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ lev_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(tpl_id,lev_id,lev_out,crd_typ_out); } /* !flg_grd_out_prs */ /* For vertical interpolation (unlike horizontal regridding), the destination grid is known a priori Straightforward copy all variables and attributes that define grid from fl_tpl to output would work in theory, but would not allow dynamic identification and relabeling of names */ /* if(flg_grd_out_hyb){ const int vrt_grd_lst_nbr=8; const char *vrt_grd_lst[]={"/hyai","/hyam","/hybi","/hybm","/ilev","/lev","/P0","/PS"}; } if(flg_grd_out_prs){ const int vrt_grd_lst_nbr=1; const char *vrt_grd_lst[]={"/plev"}; } */ /* Above this line, fl_tpl and tpl_id refer to vertical coordinate file (i.e., template file) Below this line, fl_in and in_id refer to input file to be vertically regridded Do not close template file until all grid variables have been copied For maximum efficiency, do this after defining all interpolated variables in output That way no file needs to exit define mode or enter data mode more than once However this requires keeping template file, input data file, and output file simulataneously open */ in_id=rgr->in_id; out_id=rgr->out_id; /* Determine input grid type */ if(rgr->plev_nm_in) plev_nm_in=rgr->plev_nm_in; if((rcd=nco_inq_varid_flg(in_id,"hyai",&hyai_id)) == NC_NOERR){ nco_vrt_grd_in=nco_vrt_grd_hyb; /* EAM */ flg_grd_in_hyb=True; }else if((rcd=nco_inq_varid_flg(in_id,plev_nm_in,&plev_id)) == NC_NOERR){ nco_vrt_grd_in=nco_vrt_grd_prs; /* NCEP */ flg_grd_in_prs=True; }else if((rcd=nco_inq_varid_flg(in_id,"depth",&dpt_id)) == NC_NOERR){ nco_vrt_grd_in=nco_vrt_grd_dpt; /* NCEP */ flg_grd_in_dpt=True; }else{ /* !hyai */ (void)fprintf(stdout,"%s: ERROR %s Unable to locate hybrid-sigma/pressure or pure-pressure vertical grid coordinate information in input file\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT only invoke vertical interpolation on files that contain variables with vertical dimensions, and with known vertical coordinate variable names. These default to \"hyai\" for hybrid, \"plev\" for pressure, \"depth\" for depth. See http://nco.sf.net/nco.html#lev_nm for options to change these names at run-time, e.g., \"--rgr plev_nm=vrt_nm\"\n",nco_prg_nm_get()); return NCO_ERR; } /* !hyai */ /* Sanity checks: One type of input and one type of output grid detected */ assert(!(flg_grd_in_hyb && flg_grd_in_prs)); assert(!(flg_grd_in_hyb && flg_grd_in_dpt)); assert(!(flg_grd_in_prs && flg_grd_in_dpt)); assert(flg_grd_in_hyb || flg_grd_in_prs || flg_grd_in_dpt); assert(!(flg_grd_out_hyb && flg_grd_out_prs)); assert(!(flg_grd_out_hyb && flg_grd_out_dpt)); assert(!(flg_grd_out_prs && flg_grd_out_dpt)); assert(flg_grd_out_hyb || flg_grd_out_prs || flg_grd_out_dpt); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG Input grid flags : flg_grd_in_hyb = %d, flg_grd_in_prs = %d, flg_grd_in_dpt = %d\n",nco_prg_nm_get(),flg_grd_in_hyb,flg_grd_in_prs,flg_grd_in_dpt); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG Output grid flags: flg_grd_out_hyb = %d, flg_grd_out_prs = %d, flg_grd_out_dpt = %d\n",nco_prg_nm_get(),flg_grd_out_hyb,flg_grd_out_prs,flg_grd_out_dpt); /* 20191219: This block is not used, deprecate it? Or use once new coordinates like altitude, depth supported? */ nco_vrt_ntp_typ_enm nco_vrt_ntp_typ=nco_ntp_nil; /* Vertical interpolation type */ if(nco_vrt_grd_in == nco_vrt_grd_hyb && nco_vrt_grd_out == nco_vrt_grd_hyb) nco_vrt_ntp_typ=nco_ntp_hyb_to_hyb; if(nco_vrt_grd_in == nco_vrt_grd_hyb && nco_vrt_grd_out == nco_vrt_grd_prs) nco_vrt_ntp_typ=nco_ntp_hyb_to_prs; if(nco_vrt_grd_in == nco_vrt_grd_prs && nco_vrt_grd_out == nco_vrt_grd_hyb) nco_vrt_ntp_typ=nco_ntp_prs_to_hyb; if(nco_vrt_grd_in == nco_vrt_grd_prs && nco_vrt_grd_out == nco_vrt_grd_prs) nco_vrt_ntp_typ=nco_ntp_prs_to_prs; assert(nco_vrt_ntp_typ != nco_ntp_nil); /* Variables on input grid, i.e., on grid in data file to be interpolated */ if(flg_grd_in_hyb){ rcd=nco_inq_varid(in_id,"hyai",&hyai_id); rcd=nco_inq_varid(in_id,"hyam",&hyam_id); rcd=nco_inq_varid(in_id,"hybi",&hybi_id); rcd=nco_inq_varid(in_id,"hybm",&hybm_id); /* 20190602: ECMWF hybrid vertical grid parameters and dimensions differ from CAM/EAM: ECMWF defines vertical dimensions "nhym" and "nhyi" specifically for hy[ab][im] and uses "lev" and "lev_2" for all other variables, whereas CAM/EAM uses same dimensions "lev" and "ilev" for all vertical variables including hybrid coefficients ECMWF provides "hya?" as a constant in Pa and "hyb?" as a dimensionless coefficient of PS, whereas CAM/EAM provides "hya?" and "hyb?" both as dimensionless coefficients of P0 and PS ECMWF provides "lev" and "lev_2" with midpoint and surface pressure indices (not values), respectively, whereas CAM/EAM provides "lev" and "ilev" coordinate values in hPa ECMWF provides dimensionless "lnsp" for log(surface pressure) whereas CAM/EAM provides "PS" for surface pressure in Pa ECMWF "lnsp" has degenerate level dimension "lev_2" whereas CAM/EAM "PS" has no "ilev" dimension ECMWF uses hya? instead of reference pressure whereas CAM/EAM provides "P0" in hPa */ if((rcd=nco_inq_varid_flg(in_id,"lnsp",&ps_id)) == NC_NOERR) flg_grd_hyb_ecmwf=True; else if((rcd=nco_inq_varid_flg(in_id,"PS",&ps_id)) == NC_NOERR) flg_grd_hyb_cameam=True; else{ (void)fprintf(stderr,"%s: ERROR %s Unable to find surface pressure variable required for hybrid grid in input file\n",nco_prg_nm_get(),fnc_nm); abort(); } /* !rcd */ if(flg_grd_hyb_cameam){ rcd=nco_inq_varid(in_id,"P0",&p0_id); ilev_id=NC_MIN_INT; lev_id=NC_MIN_INT; if(ilev_id_tpl == NC_MIN_INT) rcd=nco_inq_varid_flg(in_id,"ilev",&ilev_id); if(lev_id_tpl == NC_MIN_INT) rcd=nco_inq_varid_flg(in_id,"lev",&lev_id); } /* !flg_grd_hyb_cameam */ /* 20190603: We require ECMWF IFS input to have a "lev" coordinate so we can use "lev" dimension not "nhyb" */ if(flg_grd_hyb_ecmwf) rcd=nco_inq_varid(in_id,"lev",&lev_id); } /* !flg_grd_in_hyb */ if(flg_grd_in_prs){ rcd=nco_inq_varid(in_id,plev_nm_in,&lev_id); if((rcd=nco_inq_varid_flg(in_id,"PS",&ps_id)) == NC_NOERR){ /* Output file creation procedure discriminates between input surface pressure dimensioned as CAM/EAM vs. ECMWF */ flg_grd_hyb_cameam=True; if(flg_grd_out_hyb && (ps_id_tpl == NC_MIN_INT)) (void)fprintf(stderr,"%s: INFO %s detects variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in pure-pressure input data file. PS will be copied directly from pure-pressure grid input dataset to, and used to construct the pressures of, the output hybrid-coordinate data file.\n",nco_prg_nm_get(),fnc_nm); if(flg_grd_out_hyb && (ps_id_tpl != NC_MIN_INT)) (void)fprintf(stderr,"%s: INFO %s detects variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in both vertical-grid file and pure-pressure input data file. The vertical grid-file takes precedence. PS will be copied directly from vertical-grid file to, and used to construct the pressures of, the output hybrid-coordinate data file. PS in input pure-pressure file will be ignored.\n",nco_prg_nm_get(),fnc_nm); }else{ if(flg_grd_out_hyb && (ps_id_tpl == NC_MIN_INT)){ (void)fprintf(stderr,"%s: ERROR %s does not find variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in pure-pressure input data file or in vertical grid-file for hybrid-pressure output. PS must be present in at least one of these files in order to construct the output hybrid-coordinate pressures.\nHINT: Append a valid PS to the inpud data file or vertical grid-file.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !ps_id_tpl */ } /* !ps_id */ } /* !flg_grd_in_prs */ if(flg_grd_in_dpt){ rcd=nco_inq_varid(in_id,"depth",&lev_id); } /* !flg_grd_in_dpt */ const int ilev_id_in=ilev_id; /* [id] Interface pressure ID */ const int lev_id_in=lev_id; /* [id] Midpoint pressure ID */ const int ps_id_in=ps_id; /* [id] Surface pressure ID */ /* Identify all record-dimensions in input file */ rcd=nco_inq_unlimdims(in_id,&dmn_nbr_rec,(int *)NULL); if(dmn_nbr_rec > 0){ dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int)); rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec); } /* !dmn_nbr_rec */ if(flg_grd_in_hyb){ /* Get hybrid vertical information first */ rcd=nco_inq_varndims(in_id,ps_id,&dmn_nbr_in); rcd=nco_inq_vardimid(in_id,hyai_id,&dmn_id_ilev_in); if(flg_grd_hyb_cameam) rcd=nco_inq_vardimid(in_id,hyam_id,&dmn_id_lev_in); if(flg_grd_hyb_ecmwf) rcd=nco_inq_vardimid(in_id,lev_id,&dmn_id_lev_in); rcd=nco_inq_dimlen(in_id,dmn_id_ilev_in,&ilev_nbr_in); rcd=nco_inq_dimlen(in_id,dmn_id_lev_in,&lev_nbr_in); rcd=nco_inq_dimname(in_id,dmn_id_ilev_in,dmn_nm); ilev_nm_in=strdup(dmn_nm); rcd=nco_inq_dimname(in_id,dmn_id_lev_in,dmn_nm); lev_nm_in=strdup(dmn_nm); } /* !flg_grd_in_hyb */ if(flg_grd_in_prs){ /* Interrogate plev to obtain plev dimensions */ rcd=nco_inq_vardimid(in_id,lev_id,&dmn_id_lev_in); rcd=nco_inq_dimlen(in_id,dmn_id_lev_in,&lev_nbr_in); rcd=nco_inq_dimname(in_id,dmn_id_lev_in,dmn_nm); lev_nm_in=strdup(dmn_nm); /* Define horizontal grid if no PS is provided (i.e., pure-pressure to pure-pressure interpolation) */ if(!flg_grd_out_hyb){ /* Problem: What is horizontal grid size of pressure grid file? Algorithm: Examine first multi-dimensional variable that includes plev dimension Assume horizontal dimensions vary more rapidly than (i.e., follow) plev Compute horizontal grid size accordingly Set output horizontal size to input horizontal size */ int var_nbr; /* [nbr] Number of variables in file */ int var_idx; /* [idx] Index over variables in file */ rcd=nco_inq(in_id,&dmn_nbr_in,&var_nbr,(int *)NULL,(int *)NULL); dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_cnt_in=(long *)nco_malloc(dmn_nbr_in*sizeof(long)); for(var_idx=0;var_idx<var_nbr;var_idx++){ rcd=nco_inq_varndims(in_id,var_idx,&dmn_nbr_in); rcd=nco_inq_vardimid(in_id,var_idx,dmn_ids_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++) if(dmn_ids_in[dmn_idx] == dmn_id_lev_in) break; /* Does current variable have lev dimension? */ if(dmn_idx < dmn_nbr_in){ /* Yes. Do any dimensions vary more rapidly than lev? */ if(dmn_idx < dmn_nbr_in-1){ /* Yes. Assume remaining dimension are horizontal spatial dimensions */ char var_nm[NC_MAX_NAME+1L]; (void)nc_inq_varname(in_id,var_idx,var_nm); for(int dmn_idx_hrz=dmn_idx+1;dmn_idx_hrz<dmn_nbr_in;dmn_idx_hrz++){ rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx_hrz],dmn_cnt_in+dmn_idx_hrz); grd_sz_in*=dmn_cnt_in[dmn_idx_hrz]; } /* !dmn_idx_hrz */ break; } /* !dmn_idx */ } /* !dmn_idx */ } /* !var_idx */ assert(var_idx != var_nbr); grd_sz_out=grd_sz_in; } /* !flg_grd_out_hyb */ } /* !flg_grd_in_prs */ double *hyai_in=NULL; /* [frc] Hybrid A coefficient at layer interfaces on input grid */ double *hyam_in=NULL; /* [frc] Hybrid A coefficient at layer midpoints on input grid */ double *hybi_in=NULL; /* [frc] Hybrid B coefficient at layer interfaces on input grid */ double *hybm_in=NULL; /* [frc] Hybrid B coefficient at layer midpoints on input grid */ double *lev_in=NULL; /* [Pa] Air pressure on input grid */ double *prs_mdp_in=NULL; /* [Pa] Midpoint pressure on input grid */ double *prs_ntf_in=NULL; /* [Pa] Interface pressure on input grid */ double *ps_in=NULL; /* [Pa] Surface pressure on input grid */ double p0_in; /* [Pa] Reference pressure on input grid */ if(flg_grd_in_hyb){ hyai_in=(double *)nco_malloc(ilev_nbr_in*nco_typ_lng(var_typ_rgr)); hyam_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr)); hybi_in=(double *)nco_malloc(ilev_nbr_in*nco_typ_lng(var_typ_rgr)); hybm_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(in_id,hyai_id,hyai_in,crd_typ_out); rcd=nco_get_var(in_id,hyam_id,hyam_in,crd_typ_out); rcd=nco_get_var(in_id,hybi_id,hybi_in,crd_typ_out); rcd=nco_get_var(in_id,hybm_id,hybm_in,crd_typ_out); if(flg_grd_hyb_cameam) rcd=nco_get_var(in_id,p0_id,&p0_in,crd_typ_out); /* ECMWF distributes IFS forecasts with lnsp = log(surface pressure) */ if(flg_grd_hyb_ecmwf){ /* Decompose ECMWF hya? convention into CAM/EAM-like product of P0 and hya? */ p0_in=100000.0; for(size_t idx=0;idx<lev_nbr_in;idx++){ hyai_in[idx]/=p0_in; hyam_in[idx]/=p0_in; } /* !idx */ } /* flg_grd_hyb_ecmwf */ } /* !flg_grd_in_hyb */ if(flg_grd_in_prs){ lev_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(in_id,lev_id,lev_in,crd_typ_out); } /* !flg_grd_in_prs */ /* Always obtain surface pressure if input or output grid is hybrid */ if(flg_grd_in_hyb || flg_grd_out_hyb){ /* Copy horizontal grid information from input file LHS variables were set above if PS is in template file */ if(ps_id_tpl == NC_MIN_INT){ /* NB: dmn_nbr_in/out in this block refer only to horizontal dimensions necessary to define PS */ rcd=nco_inq_varndims(in_id,ps_id,&dmn_nbr_in); /* This is harmlessly repeated for hybrid input files */ dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_cnt_in=(long *)nco_malloc((dmn_nbr_in+1)*sizeof(long)); if(!dmn_srt) dmn_srt=(long *)nco_malloc((dmn_nbr_in+1)*sizeof(long)); /* NB: Only allocate dmn_srt once */ rcd=nco_inq_vardimid(in_id,ps_id,dmn_ids_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_in+dmn_idx); /* 20190330: Allow possibility that PS has time dimension > 1 We want horizontal not temporal dimensions to contribute to grd_sz Temporal dimension is usually unlimited Only multiply grd_sz by fixed (non-unlimited) dimension sizes Corner-case exception when PS spatial dimension on unstructured grid is unlimited */ for(rec_idx=0;rec_idx<dmn_nbr_rec;rec_idx++) if(dmn_ids_in[dmn_idx] == dmn_ids_rec[rec_idx]) break; if(rec_idx == dmn_nbr_rec || dmn_nbr_in == 1) grd_sz_in*=dmn_cnt_in[dmn_idx]; if(rec_idx != dmn_nbr_rec && dmn_nbr_in > 1 && dmn_cnt_in[dmn_idx] > 1L){ dmn_id_tm_in=dmn_ids_in[dmn_idx]; dmn_idx_tm_in=dmn_idx; tm_nbr_in=dmn_cnt_in[dmn_idx_tm_in]; if(tm_nbr_in > 1L) flg_vrt_tm=True; } /* tm_nbr_in > 1 */ dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ /* Given all input PS information, define output PS information */ dmn_nbr_ps=dmn_nbr_out=dmn_nbr_in; dmn_ids_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_cnt_out=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long)); /* fxm: next line works for hyb_in and is buggy for prs_in */ memcpy(dmn_ids_out,dmn_ids_in,dmn_nbr_in*sizeof(int)); memcpy(dmn_cnt_out,dmn_cnt_in,dmn_nbr_in*sizeof(long)); grd_sz_out=grd_sz_in; tm_nbr_out=tm_nbr_in; }else{ /* !ps_id_tpl */ /* 20200825: We have already defined grd_sz_out if PS is in template file We have already defined grd_sz_in and grd_sz_out := grd_sz_in when PS not in template file We have already defined grd_sz_in if input file is pure-pressure However, we have not yet defined grd_sz_in if input file is hybrid Expectation is that grd_sz_in (from input file) = grd_sz_out (from template file) An independent check on this would examine dimension sizes in input file Such a check would immediately flag horizontal mismatches between vertical file and input file The check could not rely on PS being present in input file The check could/should examine the first horizontal variable in input file This would require a lot of code, so we just assume it is true */ grd_sz_in=grd_sz_out; } /* !ps_id_tpl */ /* Timestep sequencing NB: tm_nbr_??? variables count timesteps in vertical grid definitions These are not necessarily the same as the number of timesteps in either file Time-invariant hybrid or pure-pressure coordinates are valid vertical grids for timeseries Usually hybrid grids have as many timesteps in the grids as in the timeseries Usually pressure grids are time-invariant (as of 20190511 time-varying pure pressure grids are still not supported) This implementation interpolates timeseries to/from time-invariant vertical grids in one OpenMP call! */ if(tm_nbr_in > 1L || tm_nbr_out > 1L){ if(tm_nbr_in > tm_nbr_out) assert((float)tm_nbr_in/(float)tm_nbr_out == tm_nbr_in/tm_nbr_out); else assert((float)tm_nbr_out/(float)tm_nbr_in == tm_nbr_out/tm_nbr_in); } /* !tm_nbr_in */ tm_nbr=tm_nbr_in > tm_nbr_out ? tm_nbr_in : tm_nbr_out; /* Sanity checks */ if(grd_sz_in != grd_sz_out || tm_nbr_in != tm_nbr_out) (void)fprintf(stdout,"%s: ERROR %s reports that temporal or horizontal spatial dimensions differ: grd_sz_in = %ld != %ld = grd_sz_out, and/or tm_nbr_in = %ld != %ld = tm_nbr_out\n",nco_prg_nm_get(),fnc_nm,grd_sz_in,grd_sz_out,tm_nbr_in,tm_nbr_out); assert(grd_sz_in == grd_sz_out); assert(tm_nbr_in == tm_nbr_out); ps_in=(double *)nco_malloc_dbg(tm_nbr_in*grd_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() ps_in value buffer"); /* Surface pressure comes from either hybrid vertical grid-files, hybrid data files, or pressure data files that provide surface pressure */ if(flg_grd_in_hyb || (flg_grd_in_prs && ps_id_tpl == NC_MIN_INT)) rcd=nco_get_var(in_id,ps_id,ps_in,crd_typ_out); /* ECMWF distributes IFS forecasts with lnsp = log(surface pressure) */ if(flg_grd_hyb_ecmwf){ /* Convert ECMWF-provided log(surface_pressure) to surface_pressure */ const size_t ps_sz_in=tm_nbr_in*grd_sz_in; /* [nbr] Number of elements in ps_in */ for(size_t idx=0;idx<ps_sz_in;idx++) ps_in[idx]=exp(ps_in[idx]); } /* flg_grd_hyb_ecmwf */ /* Finally have enough information to allocate output pressure grid */ ps_out=(double *)nco_malloc_dbg(tm_nbr_out*grd_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() ps_out value buffer"); /* Get PS from output horizontal grid, if available, otherwise copy from input horizontal grid */ if(ps_id_tpl != NC_MIN_INT){ rcd=nco_get_var(tpl_id,ps_id_tpl,ps_out,crd_typ_out); /* NB: Here we read from tpl_id one last time */ }else{ memcpy(ps_out,ps_in,tm_nbr_in*grd_sz_in*nco_typ_lng(var_typ_rgr)); } /* !ps_id_tpl */ } /* ! */ /* Compare input and output surface pressure fields to determine whether subterranean extrapolation required */ nco_bool flg_add_msv_att; /* [flg] Extrapolation requires _FillValue */ flg_add_msv_att=False; /* Extrapolation type xtr_fll_msv may cause need to create _FillValue attributes */ if(xtr_mth == nco_xtr_fll_msv){ const size_t ps_sz=tm_nbr*grd_sz_in; // [nbr] Size of surface-pressure field double *prs_max_in=NULL; /* [Pa] Maximum midpoint pressure on input grid */ double *prs_max_out=NULL; /* [Pa] Maximum midpoint pressure on output grid */ double *prs_min_in=NULL; /* [Pa] Minimum midpoint pressure on input grid */ double *prs_min_out=NULL; /* [Pa] Minimum midpoint pressure on output grid */ long idx_lev_max; // [idx] Index of midpoint level with greatest pressure long idx_lev_min; // [idx] Index of midpoint level with lowest pressure size_t idx; // [idx] Counting index prs_max_in=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_max_in value buffer"); prs_max_out=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_max_out value buffer"); prs_min_in=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_min_in value buffer"); prs_min_out=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_min_out value buffer"); if(flg_grd_in_hyb){ // fxm: assumes hybrid grid has least/greatest pressure at top/bottom level idx_lev_max=lev_nbr_in-1; idx_lev_min=0L; for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){ idx_fst=tm_idx*grd_sz_in; for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++){ prs_max_in[grd_idx+idx_fst]=p0_in*hyam_in[idx_lev_max]+ps_in[idx_fst+grd_idx]*hybm_in[idx_lev_max]; prs_min_in[grd_idx+idx_fst]=p0_in*hyam_in[idx_lev_min]+ps_in[idx_fst+grd_idx]*hybm_in[idx_lev_min]; } /* !grd_idx */ } /* !tm_idx */ } /* !flg_grd_in_hyb */ if(flg_grd_out_hyb){ // fxm: assumes hybrid grid has least/greatest pressure at top/bottom level idx_lev_max=lev_nbr_out-1; idx_lev_min=0L; for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){ idx_fst=tm_idx*grd_sz_out; for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++){ prs_max_out[grd_idx+idx_fst]=p0_out*hyam_out[idx_lev_max]+ps_out[idx_fst+grd_idx]*hybm_out[idx_lev_max]; prs_min_out[grd_idx+idx_fst]=p0_out*hyam_out[idx_lev_min]+ps_out[idx_fst+grd_idx]*hybm_out[idx_lev_min]; } /* !grd_idx */ } /* !tm_idx */ } /* !flg_grd_out_hyb */ if(flg_grd_in_prs){ double lev_in_max; double lev_in_min; if(lev_in[0] < lev_in[1]) lev_in_max=lev_in[lev_nbr_in-1]; else lev_in_max=lev_in[0]; if(lev_in[0] < lev_in[1]) lev_in_min=lev_in[0]; else lev_in_max=lev_in[lev_nbr_in-1]; for(size_t idx_in=0;idx_in<ps_sz;idx_in++) prs_max_in[idx_in]=lev_in_max; for(size_t idx_in=0;idx_in<ps_sz;idx_in++) prs_min_in[idx_in]=lev_in_min; } /* !flg_grd_in_prs */ if(flg_grd_out_prs){ double lev_out_max; double lev_out_min; if(lev_out[0] < lev_out[1]) lev_out_max=lev_out[lev_nbr_out-1]; else lev_out_max=lev_out[0]; if(lev_out[0] < lev_out[1]) lev_out_min=lev_out[0]; else lev_out_min=lev_out[lev_nbr_out-1]; for(size_t idx_out=0;idx_out<ps_sz;idx_out++) prs_max_out[idx_out]=lev_out_max; for(size_t idx_out=0;idx_out<ps_sz;idx_out++) prs_min_out[idx_out]=lev_out_min; } /* !flg_grd_out_prs */ for(idx=0;idx<ps_sz;idx++) if(prs_max_out[idx] > prs_max_in[idx]) break; if(idx < ps_sz) flg_add_msv_att=True; for(idx=0;idx<ps_sz;idx++) if(prs_min_out[idx] < prs_min_in[idx]) break; if(idx < ps_sz) flg_add_msv_att=True; if(flg_add_msv_att && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports at least one point in at least one output level requires extrapolation (not interpolation). Will ensure that all interpolated fields have _FillValue attribute.\n",nco_prg_nm_get(),fnc_nm); if(prs_max_in) prs_max_in=(double *)nco_free(prs_max_in); if(prs_max_out) prs_max_out=(double *)nco_free(prs_max_out); if(prs_min_in) prs_min_in=(double *)nco_free(prs_min_in); if(prs_min_out) prs_min_out=(double *)nco_free(prs_min_out); } /* !xtr_mth */ /* Lay-out regridded file */ //(void)fprintf(stdout,"%s: DEBUG quark1 dmn_nbr_out = %d, dmn_nbr_ps = %d\n",nco_prg_nm_get(),dmn_nbr_out,dmn_nbr_ps); /* Use explicitly specified output names, if any, otherwise use template names (either explicitly specified or discovered by fuzzing) */ if(rgr->lev_nm_out) lev_nm_out=rgr->lev_nm_out; if(rgr->ilev_nm_out){ if(flg_grd_out_hyb) ilev_nm_out=rgr->ilev_nm_out; if(flg_grd_out_prs) lev_nm_out=rgr->ilev_nm_out; } /* !ilev_nm_out */ if(flg_grd_out_prs){ /* Unless user explicitly specifies output name, use same name as input */ if(!rgr->lev_nm_out) lev_nm_out=(char *)strdup(plev_nm_in); /* Hybrid-sigma/pressure interface variables, if any, must also be output to pure-pressure files on lev grid */ ilev_nm_out=(char *)strdup(lev_nm_out); } /* !flg_grd_out_prs */ /* Define new vertical dimensions before all else */ if(flg_grd_out_hyb){ rcd=nco_def_dim(out_id,ilev_nm_out,ilev_nbr_out,&dmn_id_ilev_out); rcd=nco_def_dim(out_id,lev_nm_out,lev_nbr_out,&dmn_id_lev_out); /* Horizontal dimensions necessary to define PS variable */ for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ if(ps_id_tpl != NC_MIN_INT){ rcd=nco_inq_dimname(tpl_id,dmn_ids_out[dmn_idx],dmn_nm); }else{ rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm); rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_out+dmn_idx); } /* !ps_id_tpl */ if(flg_grd_hyb_cameam) rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx); /* 20190602: ECMWF IFS PS variable has degenerate vertical dimension (lev_2). Avoid re-definition */ if(flg_grd_hyb_ecmwf) if(strcmp(dmn_nm,ilev_nm_out)) if(strcmp(dmn_nm,lev_nm_out)) rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx); } /* !dmn_idx */ } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ rcd=nco_def_dim(out_id,lev_nm_out,lev_nbr_out,&dmn_id_lev_out); } /* !flg_grd_out_prs */ /* Do not extract grid variables (that are also extensive variables) like ilev, lev, hyai, hyam, hybi, hybm */ /* Exception list source: CAM: hyai, hyam, hybi, hybm, ilev, lev, P0, PS EAM: hyai, hyam, hybi, hybm, ilev, lev, P0, PS ECMWF: hyai, hyam, hybi, hybm, lev, lnsp NCEP: plev */ const int var_xcl_lst_nbr=10; /* [nbr] Number of objects on exclusion list */ const char *var_xcl_lst[]={"/hyai","/hyam","/hybi","/hybm","/ilev","/lev","/P0","/plev","/PS","/lnsp"}; int var_cpy_nbr=0; /* [nbr] Number of copied variables */ int var_rgr_nbr=0; /* [nbr] Number of regridded variables */ int var_xcl_nbr=0; /* [nbr] Number of deleted variables */ int var_crt_nbr=0; /* [nbr] Number of created variables */ long idx; /* [idx] Generic index */ unsigned int idx_tbl; /* [idx] Counter for traversal table */ const unsigned int trv_nbr=trv_tbl->nbr; /* [idx] Number of traversal table entries */ for(idx=0;idx<var_xcl_lst_nbr;idx++){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,var_xcl_lst[idx])) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* !idx_tbl */ } /* !idx */ /* 20191001: Do not automatically define plev_nm_in in pressure-grid output files The variable named lev_nm_out in the input data file is always defined in the output file So if plev_nm_in == lev_nm_out it will be defined anyway */ if(flg_grd_in_prs && flg_grd_out_prs && strcmp(plev_nm_in,lev_nm_out)){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm,plev_nm_in)) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* !idx_tbl */ } /* !idx */ char *var_nm; /* [sng] Variable name */ int *dmn_id_in=NULL; /* [id] Dimension IDs */ int *dmn_id_out=NULL; /* [id] Dimension IDs */ int var_id_in; /* [id] Variable ID */ int var_id_out; /* [id] Variable ID */ nc_type var_typ_out; /* [enm] Variable type to write to disk */ nco_bool PCK_ATT_CPY=True; /* [flg] Copy attributes "scale_factor", "add_offset" */ int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; dfl_lvl=rgr->dfl_lvl; fl_out_fmt=rgr->fl_out_fmt; /* Define new coordinates and grid variables in regridded file */ const int dmn_nbr_0D=0; /* [nbr] Rank of 0-D grid variables (scalars) */ const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ //const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ //const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */ //const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */ if(flg_grd_out_hyb){ rcd+=nco_def_var(out_id,"hyai",crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&hyai_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hyai_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"hyam",crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&hyam_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hyam_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"hybi",crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&hybi_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hybi_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"hybm",crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&hybm_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hybm_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,ilev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&ilev_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ilev_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&lev_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lev_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"P0",crd_typ_out,dmn_nbr_0D,(int *)NULL,&p0_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,p0_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; // for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ // rcd=nco_inq_dimname(out_id,dmn_ids_out[dmn_idx],dmn_nm); // (void)fprintf(stdout,"%s: DEBUG quark5 dmn_nbr_out = %d, dmn_nbr_ps = %d, dmn_idx = %d, dmn_ids_out[%d] = %d, dmn_nm = %s\n",nco_prg_nm_get(),dmn_nbr_out,dmn_nbr_ps,dmn_idx,dmn_idx,dmn_ids_out[dmn_idx],dmn_nm); // } /* !dmn_idx */ if(flg_grd_hyb_cameam) rcd+=nco_def_var(out_id,"PS",crd_typ_out,dmn_nbr_ps,dmn_ids_out,&ps_id); if(flg_grd_hyb_ecmwf){ /* Remove degenerate ECMWF vertical dimension so that output PS has dmn_nbr_ps-1 not dmn_nbr_ps dimensions */ int dmn_nbr_out_ecmwf=0; for(dmn_idx=0;dmn_idx<dmn_nbr_ps;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm); if(strcmp(dmn_nm,ilev_nm_out) && strcmp(dmn_nm,lev_nm_out) && strcmp(dmn_nm,"lev_2")) rcd=nco_inq_dimid(out_id,dmn_nm,dmn_ids_out+dmn_nbr_out_ecmwf++); } /* !dmn_idx */ rcd+=nco_def_var(out_id,"PS",crd_typ_out,dmn_nbr_out_ecmwf,dmn_ids_out,&ps_id); } /* !flg_grd_hyb_ecmwf */ if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ps_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; (void)nco_att_cpy(tpl_id,out_id,hyai_id_tpl,hyai_id,PCK_ATT_CPY); (void)nco_att_cpy(tpl_id,out_id,hyam_id_tpl,hyam_id,PCK_ATT_CPY); (void)nco_att_cpy(tpl_id,out_id,hybi_id_tpl,hybi_id,PCK_ATT_CPY); (void)nco_att_cpy(tpl_id,out_id,hybm_id_tpl,hybm_id,PCK_ATT_CPY); if(p0_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,p0_id_tpl,p0_id,PCK_ATT_CPY); /* p0 not expected to be in ECMWF grids */ if(ilev_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,ilev_id_tpl,ilev_id,PCK_ATT_CPY); else if(ilev_id_in != NC_MIN_INT) (void)nco_att_cpy(in_id,out_id,ilev_id_in,ilev_id,PCK_ATT_CPY); if(lev_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,lev_id_tpl,lev_id,PCK_ATT_CPY); else if(lev_id_in != NC_MIN_INT) (void)nco_att_cpy(in_id,out_id,lev_id_in,lev_id,PCK_ATT_CPY); if(ps_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,ps_id_tpl,ps_id,PCK_ATT_CPY); else (void)nco_att_cpy(in_id,out_id,ps_id_in,ps_id,PCK_ATT_CPY); } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ rcd+=nco_def_var(out_id,lev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&lev_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lev_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; (void)nco_att_cpy(tpl_id,out_id,lev_id_tpl,lev_id,PCK_ATT_CPY); dmn_id_ilev_out=dmn_id_lev_out; } /* !flg_grd_out_prs */ /* No further access to template file, close it */ nco_close(tpl_id); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_tpl); char *dmn_nm_cp; /* [sng] Dimension name as char * to reduce indirection */ nco_bool has_ilev; /* [flg] Contains interface level dimension */ nco_bool has_lev; /* [flg] Contains midpoint level dimension */ nco_bool has_tm; /* [flg] Contains time dimension */ nco_bool need_prs_ntf=False; /* [flg] At least one variable to regrid is on interface levels */ nco_bool need_prs_mdp=False; /* [flg] At least one variable to regrid is on midpoint levels */ trv_sct trv; /* [sct] Traversal table object structure to reduce indirection */ /* Define regridding flag for each variable */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ dmn_nbr_in=trv_tbl->lst[idx_tbl].nbr_dmn; has_ilev=False; has_lev=False; for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ /* Pre-determine flags necessary during next loop */ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; /* fxm: Generalize to include any variable containing coordinates with "standard_name" = "atmosphere_hybrid_sigma_pressure_coordinate" */ if(!has_ilev && ilev_nm_in) has_ilev=!strcmp(dmn_nm_cp,ilev_nm_in); if(!has_lev) has_lev=!strcmp(dmn_nm_cp,lev_nm_in); } /* end loop over dimensions */ /* Regrid variables that contain either vertical dimension */ if(has_ilev || has_lev){ trv_tbl->lst[idx_tbl].flg_rgr=True; var_rgr_nbr++; if(has_ilev) need_prs_ntf=True; if(has_lev) need_prs_mdp=True; } /* endif */ assert(!(has_ilev && has_lev)); /* Copy all variables that are not regridded or omitted */ if(!trv_tbl->lst[idx_tbl].flg_rgr) var_cpy_nbr++; } /* end nco_obj_typ_var */ } /* end idx_tbl */ if(!var_rgr_nbr) (void)fprintf(stdout,"%s: WARNING %s reports no variables fit interpolation criteria. The vertical interpolator expects something to interpolate, and variables not interpolated are copied straight to output. HINT: If the name(s) of the input vertical grid dimensions (e.g., ilev and lev) do not match NCO's preset defaults (case-insensitive unambiguous forms and abbreviations of \"ilev\", \"lev\", and/or \"plev\", respectively) then change the dimension names that NCO looks for. Instructions are at http://nco.sf.net/nco.html#regrid. For hybrid-pressure coordinate grids, ensure that the \"ilev\" and \"lev\" variable names are known with, e.g., \"ncks --rgr ilev_nm=interface_level --rgr lev_nm=midpoint_level\" or \"ncremap -R '--rgr ilev=interface_level --rgr lev=midpoint_level'\". For pure pressure grids, ensure the \"plev\" coordinate name is defined with, e.g., \"ncks --rgr plev_nm=pressure_level\" or \"ncremap -R '--rgr plev=pressure_level'\".\n",nco_prg_nm_get(),fnc_nm); if(nco_dbg_lvl_get() >= nco_dbg_fl){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr) (void)fprintf(stderr,"Interpolate %s? %s\n",trv.nm,trv.flg_rgr ? "Yes" : "No"); } /* end idx_tbl */ } /* end dbg */ /* Pre-allocate dimension ID and cnt/srt space */ int dmn_nbr_max; /* [nbr] Maximum number of dimensions variable can have in input or output */ rcd+=nco_inq_ndims(in_id,&dmn_nbr_max); dmn_id_in=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); aed_sct aed_mtd_fll_val; char *att_nm_fll_val=strdup("_FillValue"); int flg_pck; /* [flg] Variable is packed on disk */ nco_bool has_mss_val; /* [flg] Has numeric missing value attribute */ double mss_val_dbl; double mss_val_cmp_dbl; /* Missing value for comparison to double precision values */ float mss_val_flt; if(flg_add_msv_att){ aed_mtd_fll_val.att_nm=att_nm_fll_val; aed_mtd_fll_val.mode=aed_create; aed_mtd_fll_val.sz=1L; mss_val_dbl=NC_FILL_DOUBLE; mss_val_flt=NC_FILL_FLOAT; } /* !flg_add_msv_att */ /* Define interpolated and copied variables in output file */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ var_nm=trv.nm; /* Preserve input type in output type */ var_typ_out=trv.var_typ; dmn_nbr_in=trv.nbr_dmn; dmn_nbr_out=trv.nbr_dmn; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid_flg(out_id,var_nm,&var_id_out); /* If variable has not been defined, define it */ if(rcd != NC_NOERR){ if(trv.flg_rgr){ /* Interpolate */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); rcd=nco_inq_var_packing(in_id,var_id_in,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports variable \"%s\" is packed so results unpredictable. HINT: If regridded values seems weird, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,var_nm); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); if(ilev_nm_in && !strcmp(dmn_nm,ilev_nm_in)){ /* Change ilev dimension */ dmn_id_out[dmn_idx]=dmn_id_ilev_out; dmn_cnt_out[dmn_idx]=ilev_nbr_out; }else if(!strcmp(dmn_nm,lev_nm_in)){ /* Change lev dimension */ dmn_id_out[dmn_idx]=dmn_id_lev_out; dmn_cnt_out[dmn_idx]=lev_nbr_out; }else{ /* Dimensions ilev/lev_nm_in have already been defined as ilev/lev_nm_out, replicate all other dimensions */ rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx); } /* !ilev */ if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_out+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt_out[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_id_out+dmn_idx); } /* !rcd */ } /* !dmn_idx */ }else{ /* !flg_rgr */ /* Replicate non-interpolated variables */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_out+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt_out[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_id_out+dmn_idx); } /* !rcd */ } /* !dmn_idx */ } /* !flg_rgr */ rcd=nco_def_var(out_id,var_nm,var_typ_out,dmn_nbr_out,dmn_id_out,&var_id_out); /* Duplicate netCDF4 settings when possible */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC){ /* Deflation */ if(dmn_nbr_out > 0){ int dfl_lvl_in; /* [enm] Deflate level [0..9] */ rcd=nco_inq_var_deflate(in_id,var_id_in,&shuffle,&deflate,&dfl_lvl_in); /* Copy original deflation settings */ if(deflate || shuffle) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl_in); /* Overwrite HDF Lempel-Ziv compression level, if requested */ if(dfl_lvl == 0) deflate=(int)False; else deflate=(int)True; /* Turn-off shuffle when uncompressing otherwise chunking requests may fail */ if(dfl_lvl == 0) shuffle=NC_NOSHUFFLE; /* Shuffle never, to my knowledge, increases filesize, so shuffle by default when manually deflating */ if(dfl_lvl >= 0) shuffle=NC_SHUFFLE; if(dfl_lvl >= 0) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl); } /* !dmn_nbr_out */ } /* !NC_FORMAT_NETCDF4 */ (void)nco_att_cpy(in_id,out_id,var_id_in,var_id_out,PCK_ATT_CPY); /* Variables with subterranean levels and missing-value extrapolation must have _FillValue attribute */ if(flg_add_msv_att && trv.flg_rgr){ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl); if(!has_mss_val){ nco_bool flg_att_chg; /* [flg] _FillValue attribute was written */ aed_mtd_fll_val.var_nm=var_nm; aed_mtd_fll_val.id=var_id_out; aed_mtd_fll_val.type=var_typ_out; if(var_typ_out == NC_FLOAT) aed_mtd_fll_val.val.fp=&mss_val_flt; else if(var_typ_out == NC_DOUBLE) aed_mtd_fll_val.val.dp=&mss_val_dbl; flg_att_chg=nco_aed_prc(out_id,var_id_out,aed_mtd_fll_val); if(!flg_att_chg && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: WARNING %s reports unsuccessful attempt to create _FillValue attribute for variable %s\n",nco_prg_nm_get(),fnc_nm,var_nm); } /* !has_mss_val */ } /* !flg_add_msv_att */ } /* !rcd */ } /* !var */ } /* !idx_tbl */ /* Free pre-allocated array space */ if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec); /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Begin data mode */ (void)nco_enddef(out_id); /* Copy all grid variables */ if(flg_grd_out_hyb){ (void)nco_put_var(out_id,hyai_id,hyai_out,crd_typ_out); (void)nco_put_var(out_id,hyam_id,hyam_out,crd_typ_out); (void)nco_put_var(out_id,hybi_id,hybi_out,crd_typ_out); (void)nco_put_var(out_id,hybm_id,hybm_out,crd_typ_out); (void)nco_put_var(out_id,ilev_id,ilev_out,crd_typ_out); (void)nco_put_var(out_id,lev_id,lev_out,crd_typ_out); (void)nco_put_var(out_id,p0_id,&p0_out,crd_typ_out); (void)nco_put_var(out_id,ps_id,ps_out,crd_typ_out); } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ (void)nco_put_var(out_id,lev_id,lev_out,crd_typ_out); } /* !flg_grd_out_prs */ nco_bool flg_ntp_log=True; /* [flg] Interpolate in log(vertical_coordinate) */ if(ntp_mth == nco_ntp_lnr) flg_ntp_log=False; size_t idx_in; /* [idx] Index into 3D input variables */ size_t idx_out; /* [idx] Index into 3D output variables */ size_t var_sz_in; /* [nbr] Number of elements in variable (will be self-multiplied) */ size_t var_sz_out; /* [nbr] Number of elements in variable (will be self-multiplied) */ /* Interpolate or copy variable values */ double *var_val_dbl_in=NULL; double *var_val_dbl_out=NULL; double *prs_ntp_in; /* [Pa] Interpolated pressure array on input grid */ double *prs_ntp_out; /* [Pa] Interpolated pressure array on output grid */ int lvl_idx_in; /* [idx] Level index on input grid */ int lvl_idx_out; /* [idx] Level index on output grid */ int lvl_nbr_in; /* [nbr] Number of levels for current interpolated variable on input grid */ int lvl_nbr_out; /* [nbr] Number of levels for current interpolated variable on output grid */ int thr_idx; /* [idx] Thread index */ size_t grd_nbr=grd_sz_in; /* [nbr] Horizonal grid size */ size_t idx_dbg=rgr->idx_dbg; /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped as shared in parallel clause */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ /* Repeating above documentation for the forgetful: NB: tm_nbr is max(timesteps) in vertical grid definitions, not number of records in either file This implementation interpolates timeseries to/from time-invariant vertical grids in one OpenMP call! */ for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){ /* Index-offset to current surface pressure timeslice */ idx_fst=tm_idx*grd_sz_in; if(need_prs_mdp){ /* Allocated and define midpoint pressures */ if(tm_idx == 0) prs_mdp_in=(double *)nco_malloc_dbg(grd_sz_in*lev_nbr_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_mdp_in value buffer"); if(tm_idx == 0) prs_mdp_out=(double *)nco_malloc_dbg(grd_sz_out*lev_nbr_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_mdp_out value buffer"); if(flg_grd_in_hyb) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_in;lev_idx++) prs_mdp_in[grd_idx+lev_idx*grd_sz_in]=p0_in*hyam_in[lev_idx]+ps_in[idx_fst+grd_idx]*hybm_in[lev_idx]; if(flg_grd_out_hyb) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++) prs_mdp_out[grd_idx+lev_idx*grd_sz_out]=p0_out*hyam_out[lev_idx]+ps_out[idx_fst+grd_idx]*hybm_out[lev_idx]; if(flg_grd_in_prs) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_in;lev_idx++) prs_mdp_in[grd_idx+lev_idx*grd_sz_in]=lev_in[lev_idx]; if(flg_grd_out_prs) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++) prs_mdp_out[grd_idx+lev_idx*grd_sz_out]=lev_out[lev_idx]; if(flg_ntp_log){ var_sz_in=grd_sz_in*lev_nbr_in; for(idx_in=0;idx_in<var_sz_in;idx_in++) prs_mdp_in[idx_in]=log(prs_mdp_in[idx_in]); var_sz_out=grd_sz_out*lev_nbr_out; for(idx_out=0;idx_out<var_sz_out;idx_out++) prs_mdp_out[idx_out]=log(prs_mdp_out[idx_out]); } /* !flg_ntp_log */ } /* !need_prs_mdp */ if(need_prs_ntf){ /* Allocate and define interface pressures */ if(tm_idx == 0) prs_ntf_in=(double *)nco_malloc_dbg(grd_sz_in*ilev_nbr_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_ntf_in value buffer"); if(tm_idx == 0) prs_ntf_out=(double *)nco_malloc_dbg(grd_sz_out*ilev_nbr_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_ntf_out value buffer"); if(flg_grd_in_hyb) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_in;ilev_idx++) prs_ntf_in[grd_idx+ilev_idx*grd_sz_in]=p0_in*hyai_in[ilev_idx]+ps_in[idx_fst+grd_idx]*hybi_in[ilev_idx]; if(flg_grd_out_hyb) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++) prs_ntf_out[grd_idx+ilev_idx*grd_sz_out]=p0_out*hyai_out[ilev_idx]+ps_out[idx_fst+grd_idx]*hybi_out[ilev_idx]; if(flg_grd_in_prs) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_in;ilev_idx++) prs_ntf_in[grd_idx+ilev_idx*grd_sz_in]=lev_in[ilev_idx]; if(flg_grd_out_prs) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++) prs_ntf_out[grd_idx+ilev_idx*grd_sz_out]=lev_out[ilev_idx]; if(flg_ntp_log){ var_sz_in=grd_sz_in*ilev_nbr_in; for(idx_in=0;idx_in<var_sz_in;idx_in++) prs_ntf_in[idx_in]=log(prs_ntf_in[idx_in]); var_sz_out=grd_sz_out*ilev_nbr_out; for(idx_out=0;idx_out<var_sz_out;idx_out++) prs_ntf_out[idx_out]=log(prs_ntf_out[idx_out]); } /* !flg_ntp_log */ } /* !need_prs_ntf */ /* Set firstprivate variables to initial values */ has_ilev=False; has_lev=False; has_tm=False; if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"Interpolation progress: # means interpolated, ~ means copied\n"); #ifdef __GNUG__ # define GCC_LIB_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ ) # if GCC_LIB_VERSION < 490 # define GXX_OLD_OPENMP_SHARED_TREATMENT 1 # endif /* 480 */ # if GCC_LIB_VERSION >= 900 # define GXX_WITH_OPENMP5_GPU_SUPPORT 1 # endif /* 900 */ #endif /* !__GNUC__ */ #if defined( __INTEL_COMPILER) # pragma omp parallel for default(none) firstprivate(has_ilev,has_lev,has_tm,var_val_dbl_in,var_val_dbl_out) private(dmn_cnt_in,dmn_cnt_out,dmn_id_in,dmn_id_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,grd_idx,has_mss_val,idx_in,idx_out,idx_tbl,in_id,lvl_idx_in,lvl_idx_out,lvl_nbr_in,lvl_nbr_out,mss_val_cmp_dbl,mss_val_dbl,prs_ntp_in,prs_ntp_out,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr) shared(dmn_id_ilev_in,dmn_id_ilev_out,dmn_id_lev_in,dmn_id_lev_out,dmn_id_tm_in,flg_ntp_log,flg_vrt_tm,fnc_nm,grd_nbr,idx_dbg,ilev_nbr_in,ilev_nbr_out,lev_nbr_in,lev_nbr_out,out_id,prs_mdp_in,prs_mdp_out,prs_ntf_in,prs_ntf_out,tm_idx,xtr_mth) #else /* !__INTEL_COMPILER */ # ifdef GXX_OLD_OPENMP_SHARED_TREATMENT # pragma omp parallel for default(none) firstprivate(has_ilev,has_lev,has_tm,var_val_dbl_in,var_val_dbl_out) private(dmn_cnt_in,dmn_cnt_out,dmn_id_in,dmn_id_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,grd_idx,has_mss_val,idx_in,idx_out,idx_tbl,in_id,lvl_idx_in,lvl_idx_out,lvl_nbr_in,lvl_nbr_out,mss_val_cmp_dbl,mss_val_dbl,prs_ntp_in,prs_ntp_out,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr) shared(dmn_id_ilev_in,dmn_id_ilev_out,dmn_id_lev_in,dmn_id_lev_out,dmn_id_tm_in,flg_ntp_log,flg_vrt_tm,fnc_nm,grd_nbr,idx_dbg,ilev_nbr_in,ilev_nbr_out,lev_nbr_in,lev_nbr_out,out_id,prs_mdp_in,prs_mdp_out,prs_ntf_in,prs_ntf_out,tm_idx,xtr_mth) # else /* !old g++ */ # if defined(GXX_WITH_OPENMP5_GPU_SUPPORT) && 0 # pragma omp target teams distribute parallel for # else # pragma omp parallel for firstprivate(has_ilev,has_lev,has_tm,var_val_dbl_in,var_val_dbl_out) private(dmn_cnt_in,dmn_cnt_out,dmn_id_in,dmn_id_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,grd_idx,has_mss_val,idx_in,idx_out,idx_tbl,in_id,lvl_idx_in,lvl_idx_out,lvl_nbr_in,lvl_nbr_out,mss_val_cmp_dbl,mss_val_dbl,prs_ntp_in,prs_ntp_out,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr) shared(dmn_id_ilev_in,dmn_id_ilev_out,dmn_id_lev_in,dmn_id_lev_out,dmn_id_tm_in,flg_ntp_log,flg_vrt_tm,grd_nbr,idx_dbg,ilev_nbr_in,ilev_nbr_out,lev_nbr_in,lev_nbr_out,out_id,prs_mdp_in,prs_mdp_out,prs_ntf_in,prs_ntf_out,tm_idx,xtr_mth) # endif /* !GCC > 9.0 */ # endif /* !GCC < 4.9 */ #endif /* !__INTEL_COMPILER */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; thr_idx=omp_get_thread_num(); in_id=trv_tbl->in_id_arr[thr_idx]; #ifdef _OPENMP if(nco_dbg_lvl_get() >= nco_dbg_grp && !thr_idx && !idx_tbl) (void)fprintf(fp_stdout,"%s: INFO %s reports regrid loop uses %d thread%s\n",nco_prg_nm_get(),fnc_nm,omp_get_num_threads(),(omp_get_num_threads() > 1) ? "s" : ""); if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s: INFO thread = %d, idx_tbl = %d, nm = %s\n",nco_prg_nm_get(),thr_idx,idx_tbl,trv.nm); #endif /* !_OPENMP */ if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s%s ",trv.flg_rgr ? "#" : "~",trv.nm); if(trv.flg_rgr){ /* Interpolate variable */ var_nm=trv.nm; if(!strcmp(var_nm,"US") || !strcmp(var_nm,"VS")) (void)fprintf(fp_stdout,"%s: WARNING %s reports attempt to vertically interpolate a variable named \"%s\". If this variable is from a CESM CAM or E3SM EAM output or initial condition file on a rectangular grid (e.g., FV 0.9x1.25), then expect this program to fail and dump core when interpolating US and to produce slightly incorrect answers for VS. The vertical interpolation routine requires that interpolated variables be on the same horizontal grid as the supplied pressure field. However, the CAM/EAM US and VS variables from rectangular grid simulations are often on a horizontal grid, called the staggered grid, that is offset from the rest of the variables including the surface pressure. US usually sits on a grid that is staggered in latitude from, and is a slightly different size than, the surface pressure grid. This leads to a core dump. VS sits on a grid staggered in longitude from, though the same size as, the surface pressure field. The resulting interpolation will be based on surface pressure half a gridcell to the east rather than centered with VS. The correct procedure to vertically interpolate US and VS is to 1) horizontally regrid the supplied surface pressure (often \"PS\") to the staggered grid, then 2) vertically interpolate US and VS to the desired vertical grid based on the surface pressure on the staggered grid, then 3) re-combine the interpolated US and VS with the interpolated versions of the rest of the variables. The best solution to this dilemma is to script this workflow. Contact Charlie if you need help with this.\n",nco_prg_nm_get(),fnc_nm,var_nm); var_typ_rgr=NC_DOUBLE; /* NB: Perform regridding in double precision */ var_typ_out=trv.var_typ; /* NB: Output type in file is same as input type */ var_sz_in=1L; var_sz_out=1L; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid(out_id,var_nm,&var_id_out); rcd=nco_inq_varndims(in_id,var_id_in,&dmn_nbr_in); rcd=nco_inq_varndims(out_id,var_id_out,&dmn_nbr_out); dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out; dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */ dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); rcd=nco_inq_vardimid(out_id,var_id_out,dmn_id_out); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx); if(dmn_id_in[dmn_idx] == dmn_id_ilev_in) has_ilev=True; if(dmn_id_in[dmn_idx] == dmn_id_lev_in) has_lev=True; if(dmn_id_in[dmn_idx] == dmn_id_tm_in) has_tm=True; if(flg_vrt_tm && has_tm && dmn_id_in[dmn_idx] == dmn_id_tm_in){ dmn_cnt_in[dmn_idx]=1L; dmn_srt[dmn_idx]=tm_idx; }else{ dmn_srt[dmn_idx]=0L; } /* !flg_vrt_tm */ var_sz_in*=dmn_cnt_in[dmn_idx]; } /* !dmn_idx */ var_val_dbl_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() input value buffer"); rcd=nco_get_vara(in_id,var_id_in,dmn_srt,dmn_cnt_in,var_val_dbl_in,var_typ_rgr); for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ /* Dimension count vector is same as input except for lvl dimension */ dmn_cnt_out[dmn_idx]=dmn_cnt_in[dmn_idx]; if(has_ilev && dmn_id_out[dmn_idx] == dmn_id_ilev_out) dmn_cnt_out[dmn_idx]=ilev_nbr_out; if(has_lev && dmn_id_out[dmn_idx] == dmn_id_lev_out) dmn_cnt_out[dmn_idx]=lev_nbr_out; var_sz_out*=dmn_cnt_out[dmn_idx]; } /* !dmn_idx */ var_val_dbl_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output value buffer"); /* Missing value setup */ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl); if(has_mss_val) mss_val_cmp_dbl=mss_val_dbl; else mss_val_cmp_dbl=NC_FILL_DOUBLE; if(has_ilev){ /* Interpolate current variable from input interface pressure grid to output interface pressure grid */ lvl_nbr_in=ilev_nbr_in; lvl_nbr_out=ilev_nbr_out; prs_ntp_in=prs_ntf_in; prs_ntp_out=prs_ntf_out; }else{ /* Interpolate current variable from input midpoint pressure grid to output midpoint pressure grid */ lvl_nbr_in=lev_nbr_in; lvl_nbr_out=lev_nbr_out; prs_ntp_in=prs_mdp_in; prs_ntp_out=prs_mdp_out; } /* !ilev */ /* Procedure: Extract input/output coordinate/data arrays into 1D column order This enables actual interpolation code to be written for, or take advantage of, 1D interpolation routines After interpolating into 1D sequential memory, copy back to ND output and repeat */ double *crd_in=NULL; /* Input vertical coordinate (must be monotonic) */ double *crd_out=NULL; /* Output vertical coordinate (must be monotonic) */ double *dat_in=NULL; /* Input data (to be interpolated) on input vertical coordinate grid */ double *dat_out=NULL; /* Output data (interpolated) output vertical coordinate grid (i.e., the answer) */ double *crd_in_mnt; /* Input vertical coordinate reversed if necessary to be monotonically increasing */ double *crd_out_mnt; /* Output vertical coordinate reversed if necessary to be monotonically increasing */ double *dat_in_mnt; /* Input data (to be interpolated) reversed if necessary along with input grid */ double *dat_out_mnt; /* Output data (interpolated) reversed if necessary along with output grid */ nco_xtr_sct xtr_LHS; nco_xtr_sct xtr_RHS; size_t brk_lft_idx; size_t brk_rgt_idx; size_t in_idx; size_t in_nbr; size_t out_nbr; size_t out_idx; /* Default extrapolation uses nearest valid neighbor */ xtr_LHS.xtr_fll=True; xtr_LHS.xtr_vrb=False; xtr_LHS.typ_fll=xtr_mth; xtr_RHS.xtr_fll=True; xtr_RHS.xtr_vrb=False; xtr_RHS.typ_fll=xtr_mth; /* Special-case extrapolation methods allowed for all except missing-value extrapolation types */ if(xtr_mth != nco_xtr_fll_msv){ if(!strcmp(var_nm,"T") || !strcmp(var_nm,"ta")) xtr_RHS.typ_fll=nco_xtr_fll_tpt; else if(!strcmp(var_nm,"Z3") || !strcmp(var_nm,"zg")) xtr_LHS.typ_fll=xtr_RHS.typ_fll=nco_xtr_fll_gph; } /* !xtr_mth */ crd_in=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); crd_out=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); dat_in=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); dat_out=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); in_nbr=lvl_nbr_in; out_nbr=lvl_nbr_out; nco_bool in_ncr; /* [flg] Input coordinate monotonically increases */ nco_bool out_ncr; /* [flg] Output coordinate monotonically increases */ /* Determine monotonicity direction only once, based on first vertical column */ if(prs_ntp_in[grd_nbr]-prs_ntp_in[0] > 0.0) in_ncr=True; else in_ncr=False; out_ncr=True; if(out_nbr > 1) if(prs_ntp_out[grd_nbr]-prs_ntp_out[0] < 0.0) out_ncr=False; /* If necessary, allocate (once, and re-use it) additional memory to hold reversed arrays */ if(!in_ncr){ crd_in_mnt=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); dat_in_mnt=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); } /* !in_ncr */ if(!out_ncr){ crd_out_mnt=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); dat_out_mnt=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); } /* !out_ncr */ /* Constants and parameters for extrapolation */ const double gamma_moist=6.5/10000.0; /* [K/Pa] Temperature extrapolation assumes constant moist adiabatic lower atmosphere lapse rate dT/dp=constant=(6.5 K)/(100 mb) = (6.5 K)/(10000 Pa) */ const double Rd_rcp_g0=287.0/9.81; /* [K/Pa] Geopotential height extrapolation uses hypsometric equation Z2-Z1=(Rd*Tv_avg/g0)*ln(p1/p2)=(Rd*Tv_avg/g0)*(ln(p1)-ln(p2)) */ const double tpt_vrt_avg=288.0; /* [K] Mean virtual temperature assumed for geopotential height extrapolation */ nco_bool FIRST_WARNING_LHS; /* [flg] First warning for LHS extrapolation */ nco_bool FIRST_WARNING_RHS; /* [flg] First warning for RHS extrapolation */ if(tm_idx == 0){ /* Only print extrapolation warnings for first timestep to prevent noisy output NB: Algorithm prevents any warnings for extrapolations that appear after first timestep */ FIRST_WARNING_LHS=True; FIRST_WARNING_RHS=True; } /* !tm_idx */ /* Outer loop over columns */ for(grd_idx=0;grd_idx<grd_nbr;grd_idx++){ /* Initialize pseudo-1D variables with consecutive memory addresses to avoid indirection */ for(lvl_idx_in=0;lvl_idx_in<lvl_nbr_in;lvl_idx_in++){ idx_in=grd_idx+lvl_idx_in*grd_nbr; crd_in[lvl_idx_in]=prs_ntp_in[idx_in]; dat_in[lvl_idx_in]=var_val_dbl_in[idx_in]; } /* !lvl_idx_in */ for(lvl_idx_out=0;lvl_idx_out<lvl_nbr_out;lvl_idx_out++){ idx_out=grd_idx+lvl_idx_out*grd_nbr; crd_out[lvl_idx_out]=prs_ntp_out[idx_out]; } /* !lvl_idx_out */ /* Interpolation code easier to write/debug if crd_in and crd_out both monotonically increase However, monotonically decreasing coordinates useful in many cases, such as depth coordinate, and pressure levels arranged largest to smallest (favored by CMIP) Next code block reverses array(s) if necessary so coordinates monotonically increase Code uses crd_in_mnt, dat_in_mnt, crd_out_mnt where "_mnt" reminds of "monotonically increasing" assumption Following code lifted from CSZ's libcsz.a library source code ~/sw/c++/vec.hh */ if(in_ncr){ crd_in_mnt=crd_in; dat_in_mnt=dat_in; }else{ for(in_idx=0;in_idx<in_nbr;in_idx++){ crd_in_mnt[in_idx]=crd_in[in_nbr-in_idx-1]; dat_in_mnt[in_idx]=dat_in[in_nbr-in_idx-1]; } /* !in_idx */ } /* !in_ncr */ if(out_ncr){ crd_out_mnt=crd_out; dat_out_mnt=dat_out; }else{ for(out_idx=0;out_idx<out_nbr;out_idx++) crd_out_mnt[out_idx]=crd_out[out_nbr-out_idx-1]; } /* !out_ncr */ // Initialize bracketing index brk_lft_idx=0; // Loop over desired output coordinates for(out_idx=0;out_idx<out_nbr;out_idx++){ // Order of conditions is important since second condition is illegal if brk_lft_idx >= in_nbr while((brk_lft_idx < in_nbr) && (crd_in_mnt[brk_lft_idx] < crd_out_mnt[out_idx])){ brk_lft_idx++; } // !while brk_lft_idx--; // Handle identity interpolation separately to preserve symmetry in extrapolation code if(brk_lft_idx != in_nbr-1){ if(crd_in_mnt[brk_lft_idx+1] == crd_out_mnt[out_idx]){ dat_out_mnt[out_idx]=dat_in_mnt[brk_lft_idx+1]; if(brk_lft_idx == -1) brk_lft_idx=0; // Reset brk_lft_idx to 0 so next while loop works continue; // Jump to next iteration } // !crd_in_mnt } // !brk_lft_idx if(brk_lft_idx == -1){ // LHS Extrapolation required // Degenerate case: crd_out_mnt[out_idx] < crd_in_mnt[0] brk_lft_idx=0; // Reset brk_lft_idx to 0 so next while loop works if(xtr_LHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: WARNING %s reports variable %s column %lu output value dat_out_mnt[%lu] at coordinate crd_out_mnt[%lu] = %g requires LHS extrapolation beyond leftmost valid coordinate at crd_in_mnt[%lu] = %g. Nearest valid datum is dat_in_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,var_nm,grd_idx,out_idx,out_idx,crd_out_mnt[out_idx],brk_lft_idx,crd_in_mnt[brk_lft_idx],brk_lft_idx,dat_in_mnt[brk_lft_idx]); // Extrapolation options are presented in decreasing order of preference if(!xtr_LHS.xtr_fll){ (void)fprintf(fp_stdout,"%s: ERROR %s Full LHS extrapolation required but not permitted\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; } /* !xtr_LHS.xtr_fll */ switch(xtr_LHS.typ_fll){ case nco_xtr_fll_nil: dat_out_mnt[out_idx]=0.0; break; case nco_xtr_fll_msv: dat_out_mnt[out_idx]=mss_val_cmp_dbl; break; case nco_xtr_fll_ngh: dat_out_mnt[out_idx]=dat_in_mnt[0]; break; case nco_xtr_fll_lnr: dat_out_mnt[out_idx]=dat_in_mnt[0]- (crd_in_mnt[0]-crd_out_mnt[out_idx])* (dat_in_mnt[1]-dat_in_mnt[0])/(crd_in_mnt[1]-crd_in_mnt[0]); break; case nco_xtr_fll_gph: if(flg_ntp_log) /* Coordinates are already logarithmic in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[0]+ Rd_rcp_g0*tpt_vrt_avg*(crd_in_mnt[0]-crd_out_mnt[out_idx]); else /* Interpolate with logarithm of pressure coordinates */ dat_out_mnt[out_idx]=dat_in_mnt[0]+ Rd_rcp_g0*tpt_vrt_avg*log(crd_in_mnt[0]/crd_out_mnt[out_idx]); if(FIRST_WARNING_LHS) (void)fprintf(fp_stdout,"%s: INFO %s geopotential height extrapolated upward towards space using hypsometric equation with constant global mean virtual temperature = %g for variable %s\n",nco_prg_nm_get(),fnc_nm,tpt_vrt_avg,var_nm); FIRST_WARNING_LHS=False; break; default: (void)fprintf(fp_stdout,"%s: ERROR %s Unknown xtr_LHS.typ_fll\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; break; } // !xtr_LHS.typ_fll if(xtr_LHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: INFO %s LHS extrapolation yields dat_out_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,out_idx,dat_out_mnt[out_idx]); }else if(brk_lft_idx < in_nbr-1){ // Normal case: crd_out_mnt is interpolable brk_rgt_idx=brk_lft_idx+1; // NB: brk_rgt_idx is ALWAYS greater than brk_lft_idx // This simulaneously meets two criteria: // 1. Divide-by-zero errors are impossible in the next step // 2. The identity interpolation is satisfied since crd_dlt == 0.0: // i.e., If crd_out_mnt[idx] == crd_in_mnt[brk_lft_idx] then dat_out_mnt[out_idx] := dat_in_mnt[brk_lft_idx] // Linearly interpolate dat_out_mnt[out_idx]= dat_in_mnt[brk_lft_idx]+ (crd_out_mnt[out_idx]-crd_in_mnt[brk_lft_idx])* (dat_in_mnt[brk_rgt_idx]-dat_in_mnt[brk_lft_idx])/ (crd_in_mnt[brk_rgt_idx]-crd_in_mnt[brk_lft_idx]); }else if(brk_lft_idx == in_nbr-1){ // RHS Extrapolation required // Degenerate case: brk_lft_idx is last element of crd_in_mnt brk_rgt_idx=brk_lft_idx; if(xtr_RHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: WARNING %s reports variable %s column %lu output value dat_out_mnt[%lu] at coordinate crd_out_mnt[%lu] = %g requires RHS extrapolation beyond rightmost valid coordinate at crd_in_mnt[%lu] = %g. Nearest valid datum is dat_in_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,var_nm,grd_idx,out_idx,out_idx,crd_out_mnt[out_idx],brk_rgt_idx,crd_in_mnt[brk_rgt_idx],brk_rgt_idx,dat_in_mnt[brk_rgt_idx]); // Extrapolation options are presented in decreasing order of preference if(!xtr_RHS.xtr_fll){ (void)fprintf(fp_stdout,"%s: ERROR %s Full RHS extrapolation required but not permitted\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; } /* !xtr_RHS.xtr_fll */ switch(xtr_RHS.typ_fll){ case nco_xtr_fll_nil: dat_out_mnt[out_idx]=0.0; break; case nco_xtr_fll_msv: dat_out_mnt[out_idx]=mss_val_cmp_dbl; break; case nco_xtr_fll_ngh: dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]; break; case nco_xtr_fll_lnr: dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+ (crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1])* (dat_in_mnt[in_nbr-1]-dat_in_mnt[in_nbr-2])/ (crd_in_mnt[in_nbr-1]-crd_in_mnt[in_nbr-2]); break; case nco_xtr_fll_tpt: if(flg_ntp_log) /* Exponentiate so coordinates are linear in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+ (exp(crd_out_mnt[out_idx])-exp(crd_in_mnt[in_nbr-1]))*gamma_moist; else /* Coordinates are already linear in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+ (crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1])*gamma_moist; if(FIRST_WARNING_RHS) (void)fprintf(fp_stdout,"%s: INFO %s temperature extrapolated toward/into surface assuming constant moist adiabatic lapse rate = %g K/(100 mb) for variable %s\n",nco_prg_nm_get(),fnc_nm,gamma_moist*10000.0,var_nm); FIRST_WARNING_RHS=False; break; case nco_xtr_fll_gph: if(flg_ntp_log) /* Coordinates are already logarithmic in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]- Rd_rcp_g0*tpt_vrt_avg*(crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1]); else /* Interpolate with logarithm of pressure coordinates */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]- Rd_rcp_g0*tpt_vrt_avg*log(crd_out_mnt[out_idx]/crd_in_mnt[in_nbr-1]); if(FIRST_WARNING_RHS) (void)fprintf(fp_stdout,"%s: INFO %s geopotential height extrapolated toward/into surface using hypsometric equation with constant global mean virtual temperature = %g for variable %s\n",nco_prg_nm_get(),fnc_nm,tpt_vrt_avg,var_nm); FIRST_WARNING_RHS=False; break; default: (void)fprintf(fp_stdout,"%s: ERROR %s Unknown xtr_RHS\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; break; } // !xtr_RHS.typ_fll if(xtr_RHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: INFO %s RHS extrapolation yields dat_out_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,out_idx,dat_out_mnt[out_idx]); }else{ (void)fprintf(fp_stdout,"%s: ERROR %s Unforeseen value of brk_lft_idx\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; } // !RHS } // !out_idx /* Un-reverse output data to be on original grid */ if(!out_ncr) for(out_idx=0;out_idx<out_nbr;out_idx++) dat_out[out_idx]=dat_out_mnt[out_nbr-out_idx-1]; // End of vec.hh code /* Copy answers into output array */ for(lvl_idx_out=0;lvl_idx_out<lvl_nbr_out;lvl_idx_out++){ idx_out=grd_idx+lvl_idx_out*grd_nbr; var_val_dbl_out[idx_out]=dat_out[lvl_idx_out]; } /* !lvl_idx_out */ if(nco_dbg_lvl_get() >= nco_dbg_io && grd_idx == idx_dbg){ (void)fprintf(fp_stdout,"%s: DEBUG %s variable %s at idx_dbg = %lu\n",nco_prg_nm_get(),fnc_nm,var_nm,idx_dbg); for(out_idx=0;out_idx<out_nbr;out_idx++){ (void)fprintf(fp_stdout,"out_idx = %lu dat_out = %g\n",out_idx,dat_out[out_idx]); } /* !out_idx */ } /* !dbg */ } /* !grd_idx */ if(crd_in) crd_in=(double *)nco_free(crd_in); if(crd_out) crd_out=(double *)nco_free(crd_out); if(dat_in) dat_in=(double *)nco_free(dat_in); if(dat_out) dat_out=(double *)nco_free(dat_out); if(!in_ncr){ if(crd_in_mnt) crd_in_mnt=(double *)nco_free(crd_in_mnt); if(dat_in_mnt) dat_in_mnt=(double *)nco_free(dat_in_mnt); } /* !in_ncr */ if(!out_ncr){ if(crd_out_mnt) crd_out_mnt=(double *)nco_free(crd_out_mnt); if(dat_out_mnt) dat_out_mnt=(double *)nco_free(dat_out_mnt); } /* !out_ncr */ if(nco_typ_ntg(var_typ_out)){ /* 20210407: Round, with rint(), integer fields before sending to netCDF for output Otherwise implicit type conversion will truncate (rather than round) output values This is critical for masks where rounding errors produce near integer values (e.g., 0.999...) that could then be truncated to zero by implicit conversion instead of rounded up to 1. */ for(idx_out=0;idx_out<var_sz_out;idx_out++) if(var_val_dbl_out[idx_out] != mss_val_cmp_dbl) var_val_dbl_out[idx_out]=rint(var_val_dbl_out[idx_out]); } /* !nco_typ_ntg() */ #pragma omp critical { /* begin OpenMP critical */ rcd=nco_put_vara(out_id,var_id_out,dmn_srt,dmn_cnt_out,var_val_dbl_out,var_typ_rgr); } /* end OpenMP critical */ if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); if(var_val_dbl_in) var_val_dbl_in=(double *)nco_free(var_val_dbl_in); if(var_val_dbl_out) var_val_dbl_out=(double *)nco_free(var_val_dbl_out); }else{ /* !trv.flg_rgr */ /* Use standard NCO copy routine for variables that are not regridded 20190511: Copy them only once */ if(tm_idx == 0){ #pragma omp critical { /* begin OpenMP critical */ (void)nco_cpy_var_val(in_id,out_id,(FILE *)NULL,(md5_sct *)NULL,trv.nm,trv_tbl); } /* end OpenMP critical */ } /* !tm_idx */ } /* !flg_rgr */ } /* !xtr */ } /* end (OpenMP parallel for) loop over idx_tbl */ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"\n"); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s completion report: Variables interpolated = %d, copied unmodified = %d, omitted = %d, created = %d\n",nco_prg_nm_get(),fnc_nm,var_rgr_nbr,var_cpy_nbr,var_xcl_nbr,var_crt_nbr); } /* !tm_idx */ if(att_nm_fll_val) att_nm_fll_val=(char *)nco_free(att_nm_fll_val); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in); if(dmn_ids_out) dmn_ids_out=(int *)nco_free(dmn_ids_out); if(ilev_nm_in) ilev_nm_in=(char *)nco_free(ilev_nm_in); if(lev_nm_in) lev_nm_in=(char *)nco_free(lev_nm_in); if(hyai_in) hyai_in=(double *)nco_free(hyai_in); if(hyam_in) hyam_in=(double *)nco_free(hyam_in); if(hybi_in) hybi_in=(double *)nco_free(hybi_in); if(hybm_in) hybm_in=(double *)nco_free(hybm_in); if(ps_in) ps_in=(double *)nco_free(ps_in); if(prs_mdp_in) prs_mdp_in=(double *)nco_free(prs_mdp_in); if(prs_ntf_in) prs_ntf_in=(double *)nco_free(prs_ntf_in); if(hyai_out) hyai_out=(double *)nco_free(hyai_out); if(hyam_out) hyam_out=(double *)nco_free(hyam_out); if(hybi_out) hybi_out=(double *)nco_free(hybi_out); if(hybm_out) hybm_out=(double *)nco_free(hybm_out); if(ilev_out) ilev_out=(double *)nco_free(ilev_out); if(lev_in) lev_in=(double *)nco_free(lev_in); if(lev_out) lev_out=(double *)nco_free(lev_out); if(ps_out) ps_out=(double *)nco_free(ps_out); if(prs_mdp_out) prs_mdp_out=(double *)nco_free(prs_mdp_out); if(prs_ntf_out) prs_ntf_out=(double *)nco_free(prs_ntf_out); return rcd; } /* !nco_ntp_vrt() */ int /* O [enm] Return code */ nco_rgr_wgt /* [fnc] Regrid with external weights */ (rgr_sct * const rgr, /* I/O [sct] Regridding structure */ trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */ { /* Purpose: Regrid fields using external weights contained in a mapfile Examine ESMF, SCRIP, Tempest map-files: ncks --cdl -M -m ${DATA}/scrip/rmp_T42_to_POP43_conserv.nc | m ncks --cdl -M -m ${DATA}/maps/map_t42_to_fv129x256_aave.20150621.nc | m ncks --cdl -M -m ${DATA}/maps/map_ne30np4_to_ne120np4_tps.20150618.nc | m Test ESMF, SCRIP, Tempest map-files: ncks -D 5 -O --map=${DATA}/scrip/rmp_T42_to_POP43_conserv.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc ncks -D 5 -O --map=${DATA}/maps/map_t42_to_fv129x256_aave.20150621.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc ncks -D 5 -O --map=${DATA}/maps/map_ne30np4_to_ne120np4_tps.20150618.nc ${DATA}/ne30/rgr/ne30_1D.nc ~/foo.nc Mapfile formats ESMF, GRIDSPEC, SCRIP, and UGRID described here: http://www.earthsystemmodeling.org/esmf_releases/public/ESMF_6_3_0rp1/ESMF_refdoc/node3.html#sec:fileformat:scrip Conventions: grid_size: Number of gridcells (product of lat*lon) address: Source and destination index for each link pair num_links: Number of unique address pairs in remapping, i.e., size of sparse matrix num_wgts: Number of weights per vertice for given remapping (we only handle num_wgts == 1 below) = 1 Bilinear Destination grid value determined by weights times known source grid values at vertices of source quadrilateral that bounds destination point P One weight per vertice guarantees fxm but is not conservative Bilinear requires logically rectangular grid = 1 Distance-based: Distance-weighted uses values at num_neighbors points The weight is inversely proportional to the angular distance from the destination point to each neighbor on the source grid = 3 Second-order conservative: Described in Jones, P. W. (1999), Monthly Weather Review, 127, 2204-2210 First-order conservative schemes assume fluxes are constant within gridcell Destination fluxes are simple summations of sources fluxes weighted by overlap areas Old clm and bds remappers use a first-order algorithm Second-order improves this by using a first-order Taylor expansion of flux Source flux is centroid value plus directional offset determined by dot product of directional gradient and vector pointing from vertice to centroid. Three weights per vertice are centroid weight, weight times local theta-gradient from centroid to vertice, and weight times local phi-gradient from centroid to vertice. = 4 Bicubic: The four weights are gradients in each direction plus a cross-gradient term Same principle as bilinear, but more weights per vertice Bicubic requires logically rectangular grid wgt: Maximum number of source cells contributing to destination cell is not a dimension in SCRIP remapping files because SCRIP stores everying in 1-D sparse matrix arrays Definition of sparse matrix formulations and normalization terminology, SCRIP manual p. 8, 13, 16: for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ // Remap source function f = 1 in all unmasked source gridcells, zero elsewhere, to function F on destination grid // Normalization: fractional area (fracarea) (F = 1 where destination overlaps umasked source grid) dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0]; // Normalization: destination area (destarea) (weights in each destination cell sum to its area frcation) dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0]/dst_area[ddr_dst[lnk_idx]]; // Normalization: none (F = angular area that participates in remapping) dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0]/(dst_area[ddr_dst[lnk_idx]]*dst_frc[ddr_dst[lnk_idx]); } // end loop over lnk Documentation: NCL special cases described in popRemap.ncl, e.g., at https://github.com/yyr/ncl/blob/master/ni/src/examples/gsun/popRemap.ncl ESMF Regridding Status: https://www.earthsystemcog.org/projects/esmf Sample regrid T42->POP43, SCRIP: ncks -O --map=${DATA}/scrip/rmp_T42_to_POP43_conserv.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc */ const char fnc_nm[]="nco_rgr_wgt()"; /* [sng] Function name */ char *fl_in; char *fl_pth_lcl=NULL; const double rdn2dgr=180.0/M_PI; const double dgr2rdn=M_PI/180.0; const double eps_rlt=1.0e-14; /* [frc] Round-off error tolerance */ double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */ double area_out_ttl=0.0; /* [frc] Exact sum of area */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int in_id; /* I [id] Input netCDF file ID */ int md_open; /* [enm] Mode flag for nc_open() call */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int dmn_idx; /* [idx] Dimension index */ int dst_grid_corners_id; /* [id] Destination grid corners dimension ID */ int dst_grid_rank_id; /* [id] Destination grid rank dimension ID */ int dst_grid_size_id; /* [id] Destination grid size dimension ID */ int num_links_id; /* [id] Number of links dimension ID */ int num_wgts_id=NC_MIN_INT; /* [id] Number of weights dimension ID */ int src_grid_corners_id; /* [id] Source grid corners dimension ID */ int src_grid_rank_id; /* [id] Source grid rank dimension ID */ int src_grid_size_id; /* [id] Source grid size dimension ID */ long int lat_idx; long int lon_idx; short int bnd_idx; nco_bool FL_RTR_RMT_LCN; nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool flg_dgn_area_out=False; /* [flg] Diagnose area_out from grid boundaries */ nco_bool flg_bnd_1D_usable=False; /* [flg] Usable 1D cell vertices exist */ nco_bool flg_stg=rgr->flg_stg; /* [flg] Write staggered grid with FV output */ nco_grd_2D_typ_enm nco_grd_2D_typ=nco_grd_2D_nil; /* [enm] Two-dimensional grid-type enum */ nco_grd_lat_typ_enm nco_grd_lat_typ=nco_grd_lat_nil; /* [enm] Latitude grid-type enum */ nco_grd_lon_typ_enm nco_grd_lon_typ=nco_grd_lon_nil; /* [enm] Longitude grid-type enum */ nco_mpf_sct mpf; size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s obtaining mapping weights from %s\n",nco_prg_nm_get(),fnc_nm,rgr->fl_map); /* Duplicate (because nco_fl_mk_lcl() free()'s fl_in) */ fl_in=(char *)strdup(rgr->fl_map); /* Make sure file is on local system and is readable or die trying */ fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id); /* Identify mapping file type using string generated by weight-generator: ESMF: title = "ESMF Offline Regridding Weight Generator" ESMF_weight_only: title = "ESMF Regrid Weight Generator" NCO: Title = "netCDF Operators (NCO) Offline Regridding Weight Generator" MBTR: Title = "MOAB-TempestRemap Online Regridding Weight Generator" SCRIP: conventions = "SCRIP" Tempest: Title = "TempestRemap Offline Regridding Weight Generator" */ char *att_val; char *att_cnv_val=NULL; char *att_gnr_val=NULL; char *att_ttl_val=NULL; char *cnv_sng=NULL; /* netCDF standard is uppercase Conventions, though some models user lowercase */ char att_sng_Cnv[]="Conventions"; /* [sng] Unidata standard string (uppercase) */ char att_sng_cnv[]="conventions"; /* [sng] Unidata non-standard string (lowercase) */ char att_sng_gnr[]="weight_generator"; /* [sng] CMIP6 standard string */ char att_sng_Ttl[]="Title"; /* [sng] MBTR, NCO, and Tempest use "Title" attribute. MBTR and Tempest do not use "Conventions" */ char att_sng_ttl[]="title"; /* [sng] ERWG 7.1 weight_only uses "title" not "Conventions" attribute */ char name0_sng[]="name0"; /* [sng] Attribute where Tempest stores least-rapidly-varying dimension name */ nco_rgr_mpf_typ_enm nco_rgr_mpf_typ=nco_rgr_mpf_nil; /* [enm] Type of remapping file */ nco_rgr_typ_enm nco_rgr_typ=nco_rgr_grd_nil; /* [enm] Type of grid conversion */ /* Look for map-type signature in [cC]onventions or [tT]itle attribute */ att_cnv_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_cnv); if(!att_cnv_val) att_cnv_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_Cnv); att_gnr_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_gnr); att_ttl_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_ttl); if(!att_ttl_val) att_ttl_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_Ttl); /* Either "[cC]onventions" or "[tT]itle" attribute determines map-file type... */ if(att_cnv_val && strstr(att_cnv_val,"SCRIP")) nco_rgr_mpf_typ=nco_rgr_mpf_SCRIP; if(nco_rgr_mpf_typ == nco_rgr_mpf_nil && att_ttl_val){ if(strstr(att_ttl_val,"ESMF Offline Regridding Weight Generator")) nco_rgr_mpf_typ=nco_rgr_mpf_ESMF; else if(strstr(att_ttl_val,"netCDF Operators")) nco_rgr_mpf_typ=nco_rgr_mpf_NCO; else if(strstr(att_ttl_val,"MOAB-TempestRemap")) nco_rgr_mpf_typ=nco_rgr_mpf_MBTR; else if(strstr(att_ttl_val,"Tempest")) nco_rgr_mpf_typ=nco_rgr_mpf_Tempest; else if(strstr(att_ttl_val,"ESMF Regrid Weight Generator")) nco_rgr_mpf_typ=nco_rgr_mpf_ESMF_weight_only; } /* !att_ttl_val */ if(nco_rgr_mpf_typ == nco_rgr_mpf_nil && att_cnv_val){ if(strstr(att_cnv_val,"NCO")) nco_rgr_mpf_typ=nco_rgr_mpf_NCO; } /* !att_gnr_val */ if(nco_rgr_mpf_typ == nco_rgr_mpf_nil && att_gnr_val){ if(strstr(att_gnr_val,"NCO")) nco_rgr_mpf_typ=nco_rgr_mpf_NCO; } /* !att_gnr_val */ if(nco_rgr_mpf_typ == nco_rgr_mpf_nil){ (void)fprintf(stderr,"%s: WARNING %s unable to discern map-file type from global attributes \"[cC]onventions\" = \"%s\" and/or \"[tT]itle\" = \"%s\" and/or \"weight_generator\" = \"%s\"\n",nco_prg_nm_get(),fnc_nm,att_cnv_val ? att_cnv_val : "",att_ttl_val ? att_ttl_val : "",att_gnr_val ? att_gnr_val : ""); nco_rgr_mpf_typ=nco_rgr_mpf_unknown; } /* !nco_rgr_mpf_typ */ if(att_cnv_val) att_cnv_val=(char *)nco_free(att_cnv_val); if(att_gnr_val) att_gnr_val=(char *)nco_free(att_gnr_val); if(att_ttl_val) att_ttl_val=(char *)nco_free(att_ttl_val); switch(nco_rgr_mpf_typ){ case nco_rgr_mpf_SCRIP: rcd+=nco_inq_dimid(in_id,"src_grid_size",&src_grid_size_id); rcd+=nco_inq_dimid(in_id,"dst_grid_size",&dst_grid_size_id); rcd+=nco_inq_dimid(in_id,"src_grid_corners",&src_grid_corners_id); rcd+=nco_inq_dimid(in_id,"dst_grid_corners",&dst_grid_corners_id); rcd+=nco_inq_dimid(in_id,"src_grid_rank",&src_grid_rank_id); rcd+=nco_inq_dimid(in_id,"dst_grid_rank",&dst_grid_rank_id); rcd+=nco_inq_dimid(in_id,"num_links",&num_links_id); rcd+=nco_inq_dimid(in_id,"num_wgts",&num_wgts_id); break; case nco_rgr_mpf_ESMF_weight_only: rcd+=nco_inq_dimid(in_id,"n_s",&num_links_id); break; case nco_rgr_mpf_ESMF: case nco_rgr_mpf_MBTR: case nco_rgr_mpf_NCO: case nco_rgr_mpf_Tempest: case nco_rgr_mpf_unknown: rcd+=nco_inq_dimid(in_id,"n_a",&src_grid_size_id); rcd+=nco_inq_dimid(in_id,"n_b",&dst_grid_size_id); rcd+=nco_inq_dimid(in_id,"nv_a",&src_grid_corners_id); rcd+=nco_inq_dimid(in_id,"nv_b",&dst_grid_corners_id); rcd+=nco_inq_dimid(in_id,"src_grid_rank",&src_grid_rank_id); rcd+=nco_inq_dimid(in_id,"dst_grid_rank",&dst_grid_rank_id); if(nco_rgr_mpf_typ != nco_rgr_mpf_Tempest){ rcd+=nco_inq_dimid_flg(in_id,"num_wgts",&num_wgts_id); if(rcd != NC_NOERR){ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s reports map-file does not contain \"num_wgts\" dimension. ERWG always produces this as an orphan dimension, so post-processing could have removed it without harming other map-file fields. No harm, no foul.\n",nco_prg_nm_get(),fnc_nm); rcd=NC_NOERR; } /* !rcd */ } /* !nco_rgr_mpf_Tempest */ rcd+=nco_inq_dimid(in_id,"n_s",&num_links_id); break; default: (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unknown map-file type\n",nco_prg_nm_get(),fnc_nm); nco_dfl_case_generic_err(); /* NB: This return never executes because nco_dfl_case_generic_err() calls exit() Return placed here to suppress clang -Wsometimes-uninitialized warnings This is done many other times throughout the code, though explained only once, here */ return NCO_ERR; break; } /* end switch */ /* Use dimension IDs to get dimension sizes */ rcd+=nco_inq_dimlen(in_id,num_links_id,&mpf.num_links); if(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only){ rcd+=nco_inq_dimlen(in_id,src_grid_size_id,&mpf.src_grid_size); rcd+=nco_inq_dimlen(in_id,dst_grid_size_id,&mpf.dst_grid_size); rcd+=nco_inq_dimlen(in_id,src_grid_corners_id,&mpf.src_grid_corners); rcd+=nco_inq_dimlen(in_id,dst_grid_corners_id,&mpf.dst_grid_corners); rcd+=nco_inq_dimlen(in_id,src_grid_rank_id,&mpf.src_grid_rank); rcd+=nco_inq_dimlen(in_id,dst_grid_rank_id,&mpf.dst_grid_rank); /* TempestRemap does not generate num_wgts */ if(nco_rgr_mpf_typ == nco_rgr_mpf_MBTR || nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || num_wgts_id == NC_MIN_INT){ mpf.num_wgts=int_CEWI; }else{ rcd+=nco_inq_dimlen(in_id,num_wgts_id,&mpf.num_wgts); } /* !num_wgts_id */ assert(mpf.src_grid_size < INT_MAX && mpf.dst_grid_size < INT_MAX); }else{ mpf.src_grid_size=long_CEWI; mpf.dst_grid_size=long_CEWI; mpf.src_grid_corners=long_CEWI; mpf.dst_grid_corners=long_CEWI; mpf.src_grid_rank=long_CEWI; mpf.dst_grid_rank=long_CEWI; mpf.num_wgts=int_CEWI; } /* !ESMF_weight_only */ cnv_sng=strdup("normalization"); nco_rgr_nrm_typ_enm nco_rgr_nrm_typ=nco_rgr_nrm_nil; att_val=nco_char_att_get(in_id,NC_GLOBAL,cnv_sng); if(att_val){ if(strstr(att_val,"fracarea")) nco_rgr_nrm_typ=nco_rgr_nrm_fracarea; /* 20190912: map_gx1v6T_to_1x1_bilin.nc and map_0.1T_tripole_to_0.1x0.1_bilin.nc store "fracarea" in normalization attribute. I think NCAR created both maps for POP, probably by running ERWG with option --norm_type=fracarea. Hence "fracarea" seems to be the NCAR-way of guaranteeing that ESMF re-normalization is not performed by default. */ if(strstr(att_val,"destarea")) nco_rgr_nrm_typ=nco_rgr_nrm_destarea; /* ESMF conserve "aave" and bilinear "bilin" generate "destarea" by default */ if(strstr(att_val,"none")) nco_rgr_nrm_typ=nco_rgr_nrm_none; if(att_val) att_val=(char *)nco_free(att_val); }else{ /* 20150712: Tempest does not store a normalization attribute 20170620: ESMF weight_only does not store a normalization attribute 20190312: NCO does not yet store a normalization attribute */ if(nco_rgr_mpf_typ == nco_rgr_mpf_MBTR || nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || nco_rgr_mpf_typ == nco_rgr_mpf_NCO || nco_rgr_mpf_typ == nco_rgr_mpf_unknown || nco_rgr_mpf_typ == nco_rgr_mpf_ESMF_weight_only) nco_rgr_nrm_typ=nco_rgr_nrm_unknown; } /* endif normalization */ assert(nco_rgr_nrm_typ != nco_rgr_nrm_nil); if(cnv_sng) cnv_sng=(char *)nco_free(cnv_sng); cnv_sng=strdup("map_method"); nco_rgr_mth_typ_enm nco_rgr_mth_typ=nco_rgr_mth_nil; att_val=nco_char_att_get(in_id,NC_GLOBAL,cnv_sng); if(att_val){ if(strcasestr(att_val,"Conservative")) nco_rgr_mth_typ=nco_rgr_mth_conservative; if(strcasestr(att_val,"Bilinear")) nco_rgr_mth_typ=nco_rgr_mth_bilinear; if(strcasestr(att_val,"none")) nco_rgr_mth_typ=nco_rgr_mth_none; if(att_val) att_val=(char *)nco_free(att_val); }else{ /* Tempest does not store a map_method attribute */ if(nco_rgr_mpf_typ == nco_rgr_mpf_MBTR || nco_rgr_mpf_typ == nco_rgr_mpf_NCO || nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || nco_rgr_mpf_typ == nco_rgr_mpf_unknown) nco_rgr_mth_typ=nco_rgr_mth_unknown; } /* endif */ if(nco_rgr_mth_typ == nco_rgr_mth_nil) (void)fprintf(stdout,"%s: WARNING %s reports map global attribute %s = %s does not match SCRIP/ESMF conventions that support only values of \"Conservative\" and \"Bilinear\" for this attribute. Proceeding anyway...\n",nco_prg_nm_get(),fnc_nm,cnv_sng,att_val); if(cnv_sng) cnv_sng=(char *)nco_free(cnv_sng); if(nco_dbg_lvl_get() >= nco_dbg_scl){ (void)fprintf(stderr,"%s: INFO %s regridding input metadata and grid sizes: ",nco_prg_nm_get(),fnc_nm); (void)fprintf(stderr,"mapfile_generator = %s, map_method = %s, normalization = %s, src_grid_size = n_a = %li, dst_grid_size = n_b = %li, src_grid_corners = nv_a = %li, dst_grid_corners = nv_b = %li, src_grid_rank = %li, dst_grid_rank = %li, num_links = n_s = %li, num_wgts = %li\n",nco_rgr_mpf_sng(nco_rgr_mpf_typ),nco_rgr_mth_sng(nco_rgr_mth_typ),nco_rgr_nrm_sng(nco_rgr_nrm_typ),mpf.src_grid_size,mpf.dst_grid_size,mpf.src_grid_corners,mpf.dst_grid_corners,mpf.src_grid_rank,mpf.dst_grid_rank,mpf.num_links,mpf.num_wgts); } /* endif dbg */ /* 20190726: Allow normalization type to be "none" for bilinear regridding which UKMO SCRIP files set to "none"*/ if(nco_rgr_mth_typ == nco_rgr_mth_conservative && nco_rgr_nrm_typ == nco_rgr_nrm_none){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports requested normalization type = %s is not yet supported. Specifically, masks specified by a mask variable (dst_grid_imask,mask_b) are ignored. More specifically, any destination mask information is assumed to be built into the weight array so that no source points will contribute to masked locations. Talk to Charlie if you want this changed.\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ)); nco_exit(EXIT_FAILURE); } /* !msk */ /* Got to here in bullet-proofing code for weight-only map-files */ if(nco_rgr_mpf_typ == nco_rgr_mpf_ESMF_weight_only) (void)fprintf(stderr,"%s: WARNING %s reached end of ESMF_weight_only section\n",nco_prg_nm_get(),fnc_nm); assert(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only); /* Set type of grid conversion */ if(mpf.src_grid_rank == 1 && mpf.dst_grid_rank == 1) nco_rgr_typ=nco_rgr_grd_1D_to_1D; if(mpf.src_grid_rank == 1 && mpf.dst_grid_rank == 2) nco_rgr_typ=nco_rgr_grd_1D_to_2D; if(mpf.src_grid_rank == 2 && mpf.dst_grid_rank == 1) nco_rgr_typ=nco_rgr_grd_2D_to_1D; if(mpf.src_grid_rank == 2 && mpf.dst_grid_rank == 2) nco_rgr_typ=nco_rgr_grd_2D_to_2D; assert(nco_rgr_typ != nco_rgr_grd_nil); /* Save typing later */ nco_bool flg_grd_in_1D_dat_in_2D=False; nco_bool flg_grd_in_1D=False; nco_bool flg_grd_in_2D=False; nco_bool flg_grd_out_1D=False; nco_bool flg_grd_out_2D=False; if(nco_rgr_typ == nco_rgr_grd_1D_to_1D || nco_rgr_typ == nco_rgr_grd_1D_to_2D) flg_grd_in_1D=True; if(nco_rgr_typ == nco_rgr_grd_2D_to_1D || nco_rgr_typ == nco_rgr_grd_2D_to_2D) flg_grd_in_2D=True; if(nco_rgr_typ == nco_rgr_grd_1D_to_1D || nco_rgr_typ == nco_rgr_grd_2D_to_1D) flg_grd_out_1D=True; if(nco_rgr_typ == nco_rgr_grd_1D_to_2D || nco_rgr_typ == nco_rgr_grd_2D_to_2D) flg_grd_out_2D=True; int dmn_nbr_hrz_crd; /* [nbr] Number of horizontal dimensions in output grid */ if(flg_grd_out_2D) dmn_nbr_hrz_crd=2; else dmn_nbr_hrz_crd=1; /* Obtain grid values necessary to compute output latitude and longitude coordinates */ int area_dst_id; /* [id] Area variable ID */ int col_src_adr_id; /* [id] Source address (col) variable ID */ int dmn_sz_in_int_id; /* [id] Source grid dimension sizes ID */ int dmn_sz_out_int_id; /* [id] Destination grid dimension sizes ID */ int dst_grd_crn_lat_id; /* [id] Destination grid corner latitudes variable ID */ int dst_grd_crn_lon_id; /* [id] Destination grid corner longitudes variable ID */ int dst_grd_ctr_lat_id; /* [id] Destination grid center latitudes variable ID */ int dst_grd_ctr_lon_id; /* [id] Destination grid center longitudes variable ID */ int frc_dst_id; /* [id] Fraction variable ID */ int msk_dst_id=NC_MIN_INT; /* [id] Mask variable ID */ int row_dst_adr_id; /* [id] Destination address (row) variable ID */ int wgt_raw_id; /* [id] Remap matrix variable ID */ switch(nco_rgr_mpf_typ){ /* Obtain fields whose name depends on mapfile type */ case nco_rgr_mpf_SCRIP: rcd+=nco_inq_varid(in_id,"dst_grid_area",&area_dst_id); /* ESMF: area_b */ rcd+=nco_inq_varid(in_id,"dst_grid_center_lon",&dst_grd_ctr_lon_id); /* ESMF: xc_b */ rcd+=nco_inq_varid(in_id,"dst_grid_center_lat",&dst_grd_ctr_lat_id); /* ESMF: yc_b */ rcd+=nco_inq_varid(in_id,"dst_grid_corner_lon",&dst_grd_crn_lon_id); /* ESMF: xv_b */ rcd+=nco_inq_varid(in_id,"dst_grid_corner_lat",&dst_grd_crn_lat_id); /* ESMF: yv_b */ rcd+=nco_inq_varid(in_id,"dst_grid_frac",&frc_dst_id); /* ESMF: frac_b */ rcd+=nco_inq_varid(in_id,"dst_address",&row_dst_adr_id); /* ESMF: row */ rcd+=nco_inq_varid(in_id,"src_address",&col_src_adr_id); /* ESMF: col */ rcd+=nco_inq_varid(in_id,"remap_matrix",&wgt_raw_id); /* NB: remap_matrix[num_links,num_wgts] != S[n_s] */ break; case nco_rgr_mpf_ESMF: case nco_rgr_mpf_ESMF_weight_only: case nco_rgr_mpf_MBTR: case nco_rgr_mpf_NCO: case nco_rgr_mpf_Tempest: case nco_rgr_mpf_unknown: if(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only){ rcd+=nco_inq_varid(in_id,"area_b",&area_dst_id); /* SCRIP: dst_grid_area */ rcd+=nco_inq_varid(in_id,"xc_b",&dst_grd_ctr_lon_id); /* SCRIP: dst_grid_center_lon */ rcd+=nco_inq_varid(in_id,"yc_b",&dst_grd_ctr_lat_id); /* SCRIP: dst_grid_center_lat */ rcd+=nco_inq_varid(in_id,"xv_b",&dst_grd_crn_lon_id); /* SCRIP: dst_grid_corner_lon */ rcd+=nco_inq_varid(in_id,"yv_b",&dst_grd_crn_lat_id); /* SCRIP: dst_grid_corner_lat */ rcd+=nco_inq_varid(in_id,"frac_b",&frc_dst_id); /* SCRIP: dst_grid_frac */ } /* !nco_rgr_mpf_ESMF_weight_only */ rcd+=nco_inq_varid(in_id,"row",&row_dst_adr_id); /* SCRIP: dst_address */ rcd+=nco_inq_varid(in_id,"col",&col_src_adr_id); /* SCRIP: src_address */ rcd+=nco_inq_varid(in_id,"S",&wgt_raw_id); /* NB: remap_matrix[num_links,num_wgts] != S[n_s] */ break; default: (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unknown map file type\n",nco_prg_nm_get(),fnc_nm); nco_dfl_case_generic_err(); /* NB: This return never executes because nco_dfl_case_generic_err() calls exit() Return placed here to suppress clang -Wsometimes-uninitialized warnings This is done many other times throughout the code, though explained only once, here */ return NCO_ERR; break; } /* end switch */ /* Obtain fields whose presence depends on mapfile type */ nco_bool flg_msk_out=rgr->flg_msk_out; /* [flg] Add mask to output */ nco_bool flg_msk_apl=rgr->flg_msk_apl; /* [flg] Apply msk_out to variables after regridding */ msk_dst_id=NC_MIN_INT; if(flg_msk_out || flg_msk_apl){ switch(nco_rgr_mpf_typ){ case nco_rgr_mpf_SCRIP: rcd=nco_inq_varid_flg(in_id,"dst_grid_imask",&msk_dst_id); /* ESMF: mask_b */ break; case nco_rgr_mpf_ESMF: case nco_rgr_mpf_MBTR: case nco_rgr_mpf_NCO: case nco_rgr_mpf_Tempest: case nco_rgr_mpf_unknown: /* 20190315: TempestRemap did not propagate mask_a/b until ~201902 20210519: MBTR did not propagate mask_a/b as of ~202105 */ rcd=nco_inq_varid_flg(in_id,"mask_b",&msk_dst_id); /* SCRIP: dst_grid_imask */ break; default: (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unknown map-file type\n",nco_prg_nm_get(),fnc_nm); nco_dfl_case_generic_err(); } /* !nco_rgr_mpf_typ */ if(rcd == NC_ENOTVAR){ if(flg_msk_apl){ (void)fprintf(stderr,"%s: ERROR %s reports that user requested (with --mask_apply) the regridder to apply the destination mask field to variables after regridding. Unfortunately, the map-file lacks a destination mask of the expected name (usually \"mask_b\").\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* flg_msk_apl */ (void)fprintf(stderr,"%s: INFO %s reports map-file lacks mask_b. %sContinuing anyway without masks...\n",nco_prg_nm_get(),fnc_nm,(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || nco_rgr_mpf_typ == nco_rgr_mpf_MBTR) ? "Probably this is either a TempestRemap map-file created before ~201902 when TR began to propagate mask_a/b variables, or it is a MOAB-TempestRemap file which has never (as of 202105) propagated mask_a/b variables" : ""); rcd=NC_NOERR; } /* !rcd */ if(msk_dst_id == NC_MIN_INT) flg_msk_out=False; } /* !flg_msk_out */ /* Obtain fields whose names are independent of mapfile type */ rcd+=nco_inq_varid(in_id,"src_grid_dims",&dmn_sz_in_int_id); rcd+=nco_inq_varid(in_id,"dst_grid_dims",&dmn_sz_out_int_id); int lon_psn_src; /* [idx] Ordinal position of longitude in rectangular source grid dimension-size array */ int lat_psn_src; /* [idx] Ordinal position of latitude in rectangular source grid dimension-size array */ int lon_psn_dst=int_CEWI; /* [idx] Ordinal position of longitude in rectangular destination grid dimension-size array */ int lat_psn_dst=int_CEWI; /* [idx] Ordinal position of latitude in rectangular destination grid dimension-size array */ if(flg_grd_in_2D){ lon_psn_src=0; /* SCRIP introduced [lon,lat] convention because more natural for Fortran */ lat_psn_src=1; if(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest){ /* Until 20150814, Tempest stored [src/dst]_grid_dims as [lat,lon] unlike SCRIP's [lon,lat] order Newer behavior follows SCRIP [lon,lat] order Challenge: Support both older and newer Tempest mapfiles Tempest (unlike SCRIP and ESMF) annotates mapfile [src/dst]_grid_dims with attributes that identify axis to which each element of [src/dst]_grid_dims refers Solution: Use Tempest mapfile [src/dst]_grid_dims attributes "name0" and/or "name1" to determine if axes' positions follow old order */ att_val=nco_char_att_get(in_id,dmn_sz_in_int_id,name0_sng); if(att_val){ if(strstr(att_val,"lat")){ lon_psn_src=1; lat_psn_src=0; } /* !lat */ if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ } /* !Tempest */ } /* !flg_grd_in_2D */ if(flg_grd_out_2D){ lon_psn_dst=0; lat_psn_dst=1; if(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest){ att_val=nco_char_att_get(in_id,dmn_sz_in_int_id,name0_sng); if(att_val){ if(strstr(att_val,"lat")){ lon_psn_dst=1; lat_psn_dst=0; } /* !lat */ if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ } /* !Tempest */ } /* !flg_grd_out_2D */ const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */ const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */ double *area_out; /* [sr] Area of destination grid */ double *frc_out=NULL; /* [frc] Fraction of destination grid */ double *lat_bnd_out=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular destination grid */ double *lat_crn_out=NULL; /* [dgr] Latitude corners of rectangular destination grid */ double *lat_ctr_out=NULL_CEWI; /* [dgr] Latitude centers of rectangular destination grid */ double *lat_ntf_out=NULL; /* [dgr] Latitude interfaces of rectangular destination grid */ double *lat_wgt_out=NULL; /* [dgr] Latitude weights of rectangular destination grid */ double *lon_bnd_out=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular destination grid */ double *lon_crn_out=NULL; /* [dgr] Longitude corners of rectangular destination grid */ double *lon_ctr_out=NULL_CEWI; /* [dgr] Longitude centers of rectangular destination grid */ double *lon_ntf_out=NULL; /* [dgr] Longitude interfaces of rectangular destination grid */ double *slat_ctr_out=NULL_CEWI; /* [dgr] Latitude centers of staggered FV destination grid */ double *slat_wgt_out=NULL_CEWI; /* [frc] Latitude weights of staggered FV destination grid */ double *slon_ctr_out=NULL_CEWI; /* [dgr] Longitude centers of staggered FV destination grid */ double *wgt_raw; /* [frc] Remapping weights */ int *col_src_adr; /* [idx] Source address (col) */ int *row_dst_adr; /* [idx] Destination address (row) */ int *msk_out=NULL; /* [flg] Mask on destination grid */ int *dmn_sz_in_int; /* [nbr] Array of dimension sizes of source grid */ int *dmn_sz_out_int; /* [nbr] Array of dimension sizes of destination grid */ long *dmn_cnt_in=NULL; long *dmn_cnt_out=NULL; long *dmn_cnt=NULL; long *dmn_srt=NULL; long *dmn_srd=NULL; long idx; /* [idx] Counting index for unrolled grids */ /* Allocate space to hold dimension metadata for destination grid */ dmn_srt=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long)); dmn_cnt=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long)); dmn_srd=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long)); dmn_srt[0]=0L; dmn_cnt[0]=mpf.src_grid_rank; dmn_sz_in_int=(int *)nco_malloc(mpf.src_grid_rank*nco_typ_lng((nc_type)NC_INT)); rcd=nco_get_vara(in_id,dmn_sz_in_int_id,dmn_srt,dmn_cnt,dmn_sz_in_int,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=mpf.dst_grid_rank; dmn_sz_out_int=(int *)nco_malloc(mpf.dst_grid_rank*nco_typ_lng((nc_type)NC_INT)); rcd=nco_get_vara(in_id,dmn_sz_out_int_id,dmn_srt,dmn_cnt,dmn_sz_out_int,(nc_type)NC_INT); /* Check-for and workaround faulty Tempest and MPAS-O/I grid sizes */ if(flg_grd_in_1D && (mpf.src_grid_size != dmn_sz_in_int[0])){ (void)fprintf(stdout,"%s: INFO %s reports input grid dimension sizes disagree: mpf.src_grid_size = %ld != %d = dmn_sz_in[0]. Problem may be caused by incorrect src_grid_dims variable. This is a known issue with some TempestRemap mapfiles generated prior to ~20150901, and in some ESMF mapfiles for MPAS-O/I. This problem can be safely ignored if workaround succeeds. Attempting workaround ...\n",nco_prg_nm_get(),fnc_nm,mpf.src_grid_size,dmn_sz_in_int[0]); dmn_sz_in_int[0]=mpf.src_grid_size; } /* !bug */ if(flg_grd_out_1D && (mpf.dst_grid_size != dmn_sz_out_int[0])){ (void)fprintf(stdout,"%s: INFO %s reports output grid dimension sizes disagree: mpf.dst_grid_size = %ld != %d = dmn_sz_out[0]. Problem may be caused by incorrect dst_grid_dims variable. This is a known issue with some TempestRemap mapfiles generated prior to ~20150901, and in some ESMF mapfiles for MPAS-O/I. This problem can be safely ignored if workaround succeeds. Attempting workaround ...\n",nco_prg_nm_get(),fnc_nm,mpf.dst_grid_size,dmn_sz_out_int[0]); dmn_sz_out_int[0]=mpf.dst_grid_size; } /* !bug */ long col_nbr_in; /* [idx] Number of columns in source grid */ long lon_nbr_in; /* [idx] Number of longitudes in rectangular source grid */ long lat_nbr_in; /* [idx] Number of latitudes in rectangular source grid */ const size_t grd_sz_in=mpf.src_grid_size; /* [nbr] Number of elements in single layer of input grid */ const size_t grd_sz_out=mpf.dst_grid_size; /* [nbr] Number of elements in single layer of output grid */ if(flg_grd_in_1D){ col_nbr_in=dmn_sz_in_int[0]; lon_nbr_in=dmn_sz_in_int[0]; lat_nbr_in=dmn_sz_in_int[0]; }else if(flg_grd_in_2D){ col_nbr_in=0; lon_nbr_in=dmn_sz_in_int[lon_psn_src]; lat_nbr_in=dmn_sz_in_int[lat_psn_src]; /* Sanity-check */ assert(lat_nbr_in*lon_nbr_in == (long)grd_sz_in); } /* !src_grid_rank */ const int bnd_tm_nbr_out=2; /* [nbr] Number of boundaries for output time */ int bnd_nbr_out=int_CEWI; /* [nbr] Number of boundaries for output time and rectangular grid coordinates, and number of vertices for output non-rectangular grid coordinates */ long col_nbr_out=long_CEWI; /* [nbr] Number of columns in destination grid */ long lon_nbr_out=long_CEWI; /* [nbr] Number of longitudes in rectangular destination grid */ long lat_nbr_out=long_CEWI; /* [nbr] Number of latitudes in rectangular destination grid */ long slat_nbr_out=long_CEWI; /* [nbr] Number of latitudes in staggered FV grid destination grid */ long slon_nbr_out=long_CEWI; /* [nbr] Number of longitudes in staggered FV grid destination grid */ if(flg_grd_out_1D){ bnd_nbr_out=mpf.dst_grid_corners; col_nbr_out=dmn_sz_out_int[0]; lat_nbr_out=dmn_sz_out_int[0]; lon_nbr_out=dmn_sz_out_int[0]; /* Sanity-check */ assert(col_nbr_out == (long)grd_sz_out); }else if(flg_grd_out_2D){ col_nbr_out=lat_nbr_out*lon_nbr_out; lat_nbr_out=dmn_sz_out_int[lat_psn_dst]; lon_nbr_out=dmn_sz_out_int[lon_psn_dst]; slat_nbr_out=lat_nbr_out-1L; slon_nbr_out=lon_nbr_out; /* Sanity-check */ assert(lat_nbr_out*lon_nbr_out == (long)grd_sz_out); } /* !dst_grid_rank */ /* Ensure coordinates are in degrees not radians for simplicity and CF-compliance NB: ${DATA}/scrip/rmp_T42_to_POP43_conserv.nc has [xy]?_a in degrees and [xy]?_b in radians! */ nco_bool flg_crd_rdn=False; /* [flg] Destination coordinates are in radians not degrees */ char unt_sng[]="units"; /* [sng] netCDF-standard units attribute name */ att_val=nco_char_att_get(in_id,dst_grd_ctr_lat_id,unt_sng); if(att_val){ /* Match "radian" and "radians" */ if(strstr(att_val,"radian")) flg_crd_rdn=True; if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ nco_bool flg_grd_out_crv=False; /* [flg] Curvilinear coordinates */ nco_bool flg_grd_out_rct=False; /* [flg] Rectangular coordinates */ const nc_type crd_typ_out=NC_DOUBLE; if(flg_grd_out_2D){ lon_ctr_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); lat_ctr_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); lon_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*grd_sz_out*nco_typ_lng(crd_typ_out)); lat_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*grd_sz_out*nco_typ_lng(crd_typ_out)); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out); rcd=nco_get_vara(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,lat_ctr_out,crd_typ_out); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_out; dmn_cnt[1]=mpf.dst_grid_corners; rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_crn_out,crd_typ_out); rcd=nco_get_vara(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,lat_crn_out,crd_typ_out); /* User may specify curvilinear grid (with --rgr crv). Otherwise, manually test for curvilinear source grid. */ flg_grd_out_crv=rgr->flg_crv; /* [flg] Curvilinear coordinates */ if(flg_grd_out_crv){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Output grid specified to be %s\n",nco_prg_nm_get(),flg_grd_out_crv ? "Curvilinear" : "Rectangular"); }else{ long idx_tst=long_CEWI; /* [idx] Index of first latitude or longitude */ for(idx=0;idx<(long)grd_sz_out;idx++){ if(idx%lon_nbr_out == 0) idx_tst=idx; if(lat_ctr_out[idx] != lat_ctr_out[idx_tst]) break; // (void)fprintf(stdout,"%s: DEBUG lat_ctr_out[%li] = %g, lat_ctr_out[%li] = %g\n",nco_prg_nm_get(),idx,lat_ctr_out[idx],idx_tst,lat_ctr_out[idx_tst]); /* fxm: also test lon */ } /* !rectangular */ if(idx != (long)grd_sz_out) flg_grd_out_crv=True; else flg_grd_out_rct=True; if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO Output grid detected to be %s\n",nco_prg_nm_get(),flg_grd_out_crv ? "Curvilinear" : "Rectangular"); } /* !flg_grd_out_crv */ if(flg_grd_out_crv) bnd_nbr_out=mpf.dst_grid_corners; if(flg_grd_out_rct) bnd_nbr_out=2; /* NB: Assumes rectangular latitude and longitude and is invalid for other quadrilaterals */ } /* !flg_grd_out_2D */ if(nco_dbg_lvl_get() >= nco_dbg_scl){ (void)fprintf(stderr,"%s: INFO %s grid conversion type = %s with expected input and prescribed output grid sizes: ",nco_prg_nm_get(),fnc_nm,nco_rgr_grd_sng(nco_rgr_typ)); (void)fprintf(stderr,"lat_in = %li, lon_in = %li, col_in = %li, lat_out = %li, lon_out = %li, col_out = %li\n",lat_nbr_in,lon_nbr_in,col_nbr_in,lat_nbr_out,lon_nbr_out,col_nbr_out); } /* endif dbg */ /* Allocate space for and obtain coordinates */ if(flg_grd_out_1D){ lon_ctr_out=(double *)nco_malloc(col_nbr_out*nco_typ_lng(crd_typ_out)); lat_ctr_out=(double *)nco_malloc(col_nbr_out*nco_typ_lng(crd_typ_out)); lon_bnd_out=(double *)nco_malloc(col_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); lat_bnd_out=(double *)nco_malloc(col_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); } /* !flg_grd_out_1D */ if(flg_grd_out_rct){ if(lat_ctr_out) lat_ctr_out=(double *)nco_free(lat_ctr_out); if(lon_ctr_out) lon_ctr_out=(double *)nco_free(lon_ctr_out); if(lat_crn_out) lat_crn_out=(double *)nco_free(lat_crn_out); if(lon_crn_out) lon_crn_out=(double *)nco_free(lon_crn_out); lon_ctr_out=(double *)nco_malloc(lon_nbr_out*nco_typ_lng(crd_typ_out)); lat_ctr_out=(double *)nco_malloc(lat_nbr_out*nco_typ_lng(crd_typ_out)); lon_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*lon_nbr_out*nco_typ_lng(crd_typ_out)); lat_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*lat_nbr_out*nco_typ_lng(crd_typ_out)); lat_wgt_out=(double *)nco_malloc(lat_nbr_out*nco_typ_lng(crd_typ_out)); lon_ntf_out=(double *)nco_malloc((lon_nbr_out+1L)*nco_typ_lng(crd_typ_out)); lat_ntf_out=(double *)nco_malloc((lat_nbr_out+1L)*nco_typ_lng(crd_typ_out)); lon_bnd_out=(double *)nco_malloc(lon_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); lat_bnd_out=(double *)nco_malloc(lat_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); } /* !flg_grd_out_rct */ /* Arrays unroll into all longitudes for first latitude, then second latitude, ... Obtain longitudes by reading first block contiguously (unstrided) Obtain latitudes by reading unrolled data with stride of lon_nbr */ if(flg_grd_out_1D){ dmn_srt[0]=0L; dmn_cnt[0]=col_nbr_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out); dmn_srt[0]=0L; dmn_cnt[0]=col_nbr_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,lat_ctr_out,crd_typ_out); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr_out; dmn_cnt[1]=bnd_nbr_out; rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_bnd_out,crd_typ_out); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr_out; dmn_cnt[1]=bnd_nbr_out; rcd=nco_get_vara(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,lat_bnd_out,crd_typ_out); if(flg_crd_rdn){ for(idx=0;idx<col_nbr_out;idx++){ lon_ctr_out[idx]*=rdn2dgr; lat_ctr_out[idx]*=rdn2dgr; } /* !idx */ for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++){ lon_bnd_out[idx]*=rdn2dgr; lat_bnd_out[idx]*=rdn2dgr; } /* !idx */ } /* !rdn */ /* Is 1D interface information usable? Yes, unless if all interfaces are zeros NB: fxm Better algorithm for "usable" is that not all interfaces in any cell are equal */ flg_bnd_1D_usable=True; for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++) if(lon_bnd_out[idx] != 0.0) break; if(idx == col_nbr_out*bnd_nbr_out){ flg_bnd_1D_usable=False; }else{ for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++) if(lat_bnd_out[idx] != 0.0) break; if(idx == col_nbr_out*bnd_nbr_out) flg_bnd_1D_usable=False; } /* !usable */ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0;idx<lat_nbr_out;idx++){ (void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr_out[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr_out;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd_out[bnd_nbr_out*idx+bnd_idx],bnd_idx == bnd_nbr_out-1 ? "]\n" : ", "); } /* end loop over lat */ for(idx=0;idx<lon_nbr_out;idx++){ (void)fprintf(stdout,"lon[%li] = %g, vertices = ",idx,lon_ctr_out[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr_out;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lon_bnd_out[bnd_nbr_out*idx+bnd_idx],bnd_idx == bnd_nbr_out-1 ? "]\n" : ", "); } /* end loop over lon */ } /* endif dbg */ } /* !flg_grd_out_1D */ if(flg_grd_out_rct){ /* fxm: sub-sample these from the already-read ctr/crn arrays */ dmn_srt[0L]=0L; dmn_cnt[0L]=lon_nbr_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out); dmn_srt[0L]=0L; dmn_cnt[0L]=lat_nbr_out; dmn_srd[0L]=lon_nbr_out; rcd=nco_get_vars(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,dmn_srd,lat_ctr_out,crd_typ_out); dmn_srt[0L]=dmn_srt[1]=0L; dmn_cnt[0L]=lon_nbr_out; dmn_cnt[1]=mpf.dst_grid_corners; rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_crn_out,crd_typ_out); dmn_srt[0L]=0L; dmn_cnt[0L]=lat_nbr_out; dmn_srd[0L]=lon_nbr_out; dmn_srt[1]=0L; dmn_cnt[1]=mpf.dst_grid_corners; dmn_srd[1]=1L; rcd=nco_get_vars(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,dmn_srd,lat_crn_out,crd_typ_out); if(flg_crd_rdn){ for(idx=0L;idx<lon_nbr_out;idx++) lon_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<lat_nbr_out;idx++) lat_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<lon_nbr_out*mpf.dst_grid_corners;idx++) lon_crn_out[idx]*=rdn2dgr; for(idx=0L;idx<lat_nbr_out*mpf.dst_grid_corners;idx++) lat_crn_out[idx]*=rdn2dgr; } /* !rdn */ } /* !flg_grd_out_rct */ if(flg_grd_out_crv){ if(flg_crd_rdn){ for(idx=0L;idx<(long)grd_sz_out;idx++) lon_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<(long)grd_sz_out;idx++) lat_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<(long)grd_sz_out*mpf.dst_grid_corners;idx++) lon_crn_out[idx]*=rdn2dgr; for(idx=0L;idx<(long)grd_sz_out*mpf.dst_grid_corners;idx++) lat_crn_out[idx]*=rdn2dgr; } /* !rdn */ } /* !flg_grd_out_crv */ /* Allocate space for and obtain area, fraction, and mask, which are needed for both 1D and 2D grids */ area_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); dmn_srt[0L]=0L; dmn_cnt[0L]=grd_sz_out; rcd=nco_get_vara(in_id,area_dst_id,dmn_srt,dmn_cnt,area_out,crd_typ_out); frc_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); dmn_srt[0L]=0L; dmn_cnt[0L]=grd_sz_out; rcd=nco_get_vara(in_id,frc_dst_id,dmn_srt,dmn_cnt,frc_out,crd_typ_out); if(msk_dst_id != NC_MIN_INT){ msk_out=(int *)nco_malloc(grd_sz_out*nco_typ_lng(NC_INT)); dmn_srt[0L]=0L; dmn_cnt[0L]=grd_sz_out; rcd=nco_get_vara(in_id,msk_dst_id,dmn_srt,dmn_cnt,msk_out,(nc_type)NC_INT); } /* !msk */ /* Derive 2D interface boundaries from lat and lon grid-center values NB: Procedures to derive interfaces from midpoints on rectangular grids are theoretically possible However, ESMF often outputs interfaces values (e.g., yv_b) for midpoint coordinates (e.g., yc_b) For example, ACME standard map from ne120np4 to 181x360 has yc_b[0] = yv_b[0] = -90.0 Latitude = -90 is, by definition, not a midpoint coordinate This appears to be an artifact of the non-physical representation of the FV grid, i.e., a grid center located at the pole where longitudes collapse in the model, but cannot be represented as collapsed on a rectangular 2D grid with non-zero areas. Unfortunately, ESMF supports this nonsense by labeling the grid center as at the pole so that applications can easily diagnose an FV grid when they read-in datasets. A superior application could diagnose FV just fine from actual non-polar gridcell centers Maybe ESMF could introduce a flag or something to indicate/avoid this special case? Safer to read boundary interfaces directly from grid corner/vertice arrays in map file Derivation of boundaries xv_b, yv_b from _correct_ xc_b, yc_b is follows Do not implement this procedure until resolving midpoint/center issue described above: lon_ntf_out[0L]=0.5*(lon_ctr_out[0L]+lon_ctr_out[lon_nbr_out-1L])-180.0; // Extrapolation lat_ntf_out[0L]=lat_ctr_out[0L]-0.5*(lat_ctr_out[1L]-lat_ctr_out[0L]); // Extrapolation for(idx=1L;idx<lon_nbr_out;idx++) lon_ntf_out[idx]=0.5*(lon_ctr_out[idx-1L]+lon_ctr_out[idx]); for(idx=1L;idx<lat_nbr_out;idx++) lat_ntf_out[idx]=0.5*(lat_ctr_out[idx-1L]+lat_ctr_out[idx]); lon_ntf_out[lon_nbr_out]=lon_ntf_out[0L]+360.0; lat_ntf_out[lat_nbr_out]=lat_ctr_out[lat_nbr_out-1L]+0.5*(lat_ctr_out[lat_nbr_out-1L]-lat_ctr_out[lat_nbr_out-2L]); */ if(flg_grd_out_rct){ double lon_spn; /* [dgr] Longitude span */ double lat_spn; /* [dgr] Latitude span */ nco_bool flg_s2n=True; /* I [enm] Latitude grid-direction is South-to-North */ if(lat_ctr_out[1L] < lat_ctr_out[0L]) flg_s2n=False; /* Obtain 1-D rectangular interfaces from unrolled 1-D vertice arrays */ for(idx=0L;idx<lon_nbr_out;idx++) lon_ntf_out[idx]=lon_crn_out[mpf.dst_grid_corners*idx]; /* 20201009 The four possible CCW RLL orderings start with the ul, ll, lr, or ur vertice NCO grid generators store vertices in order (0,1,2,3)=(ul,ll,lr,ur) NCO final latitude is in upper vertices (0,3) for S2N grids, lower vertices (1,2) for N2S grids NCO final longitude is in RHS vertices (2,3) for S2N and N2S grids Need generic algorithm to pick easternmost longitude for any of the four CCW orderings What is ESMF vertice ordering? or does ESMF always copy from input grid? Most grid generators probably start with ul or ll so vertice 2 is good choice for easternmost */ // lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-(mpf.dst_grid_corners-1L)]; // ESMF? lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-2L]; // NCO lr if(lon_ntf_out[lon_nbr_out-1] == lon_ntf_out[lon_nbr_out]) lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-1L]; // NCO ur if(lon_ntf_out[lon_nbr_out-1] == lon_ntf_out[lon_nbr_out]) lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-3L]; // NCO ll assert(lon_ntf_out[lon_nbr_out-1] != lon_ntf_out[lon_nbr_out]); lon_spn=lon_ntf_out[lon_nbr_out]-lon_ntf_out[0L]; for(idx=0L;idx<lat_nbr_out;idx++) lat_ntf_out[idx]=lat_crn_out[mpf.dst_grid_corners*idx]; if(flg_s2n) lat_ntf_out[lat_nbr_out]=max_dbl(lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-1L],lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-2L]); else lat_ntf_out[lat_nbr_out]=min_dbl(lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-1L],lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-2L]); assert(lat_ntf_out[lat_nbr_out] != lat_ntf_out[lat_nbr_out-1]); lat_spn=fabs(lat_ntf_out[lat_nbr_out]-lat_ntf_out[0L]); /* Place 1-D rectangular interfaces into 2-D coordinate boundaries */ for(idx=0L;idx<lon_nbr_out;idx++){ lon_bnd_out[2L*idx]=lon_ntf_out[idx]; lon_bnd_out[2L*idx+1L]=lon_ntf_out[idx+1L]; } /* !lon_nbr_out */ for(idx=0L;idx<lat_nbr_out;idx++){ lat_bnd_out[2L*idx]=lat_ntf_out[idx]; lat_bnd_out[2L*idx+1L]=lat_ntf_out[idx+1L]; } /* !lat_nbr_out */ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0L;idx<lon_nbr_out;idx++) (void)fprintf(stdout,"lon[%li] = [%g, %g, %g]\n",idx,lon_bnd_out[2L*idx],lon_ctr_out[idx],lon_bnd_out[2L*idx+1L]); for(idx=0L;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li] = [%g, %g, %g]\n",idx,lat_bnd_out[2L*idx],lat_ctr_out[idx],lat_bnd_out[2L*idx+1L]); } /* endif dbg */ /* Global or regional grid? */ nco_grd_xtn_enm nco_grd_xtn; /* [enm] Extent of grid */ if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; /* Diagnose type of latitude output grid by testing second latitude center against formulae */ double lat_ctr_tst_eqa; double lat_ctr_tst_fv; if(flg_s2n) lat_ctr_tst_eqa=lat_ntf_out[0L]+lat_spn*1.5/lat_nbr_out; else lat_ctr_tst_eqa=lat_ntf_out[0L]-lat_spn*1.5/lat_nbr_out; if(flg_s2n) lat_ctr_tst_fv=lat_ntf_out[0L]+lat_spn/(lat_nbr_out-1L); else lat_ctr_tst_fv=lat_ntf_out[0L]-lat_spn/(lat_nbr_out-1L); double lat_ctr_tst_gss; /* In diagnosing grids, agreement to slightly worse than single-precision is "good enough for government work" Hence some comparisons cast from double to float before comparison 20150526: T42 grid from SCRIP and related maps, and NCL-generated Gaussian grids for CESM, are accurate to at most ~eight digits 20150611: map_ne120np4_to_fv801x1600_bilin.150418.nc has yc_b[1600]=-89.775000006 not expected exact value lat_ctr[1]=-89.775000000000006 20170521: T62 grid from NCEP-NCAR Reanalysis 1 is worse than single precision, has yc_[192]=-86.6531 not expected exact value lat_ctr[1]=-86.6532 */ if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_eqa) nco_grd_lat_typ=nco_grd_lat_eqa; if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_fv) nco_grd_lat_typ=nco_grd_lat_fv; double *wgt_Gss_out=NULL; // [frc] Gaussian weights double precision if(nco_grd_lat_typ == nco_grd_lat_nil){ /* Check for Gaussian grid */ double *lat_sin_out; // [frc] Sine of Gaussian latitudes double precision lat_sin_out=(double *)nco_malloc(lat_nbr_out*sizeof(double)); wgt_Gss_out=(double *)nco_malloc(lat_nbr_out*sizeof(double)); (void)nco_lat_wgt_gss(lat_nbr_out,flg_s2n,lat_sin_out,wgt_Gss_out); lat_ctr_tst_gss=rdn2dgr*asin(lat_sin_out[1L]); /* Gaussian weights on output grid will be double-precision accurate Grid itself is kept as user-specified so area diagnosed by ESMF_RegridWeightGen may be slightly inconsistent with weights */ if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stderr,"%s: INFO %s reports lat_ctr_out[1] = %g, lat_ctr_tst_gss = %g\n",nco_prg_nm_get(),fnc_nm,lat_ctr_out[1L],lat_ctr_tst_gss); if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_gss) nco_grd_lat_typ=nco_grd_lat_gss; if(lat_sin_out) lat_sin_out=(double *)nco_free(lat_sin_out); } /* !Gaussian */ if(nco_grd_lat_typ == nco_grd_lat_nil){ /* If still of unknown type, this 2D grid may be weird This occurs, e.g., with POP3 destination grid Change gridtype from nil (which means not-yet-set) to unknown (which means none of the others matched) */ nco_grd_lat_typ=nco_grd_lat_unk; } /* !nil */ /* Currently grd_lat_typ and grd_2D_typ are equivalent, though that may be relaxed in future */ if(nco_grd_lat_typ == nco_grd_lat_unk) nco_grd_2D_typ=nco_grd_2D_unk; else if(nco_grd_lat_typ == nco_grd_lat_gss) nco_grd_2D_typ=nco_grd_2D_gss; else if(nco_grd_lat_typ == nco_grd_lat_fv) nco_grd_2D_typ=nco_grd_2D_fv; else if(nco_grd_lat_typ == nco_grd_lat_eqa) nco_grd_2D_typ=nco_grd_2D_eqa; else assert(False); if(nco_grd_lon_typ == nco_grd_lon_nil){ /* NB: Longitude grid diagnosis is susceptible to mistakes when input mapfile embeds common faulty grids, e.g., ACME *150418* FV maps map_ne30np4_to_fv129x256_aave.150418.nc is diagnosed as regional grid of unknown type because of input grid flaws map_ne30np4_to_fv129x256_aave.20150901.nc is (correctly) diagnosed as global grid of with lon_Grn_ctr */ if( (float)lon_ctr_out[0L] == 0.0f && (float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_Grn_ctr; else if((float)lon_ctr_out[0L] == -180.0f && (float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_180_ctr; else if((float)lon_ntf_out[0L] == 0.0f && (float)lon_ntf_out[1L] == (float)(lon_ntf_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_Grn_wst; else if((float)lon_ntf_out[0L] == -180.0f && (float)lon_ntf_out[1L] == (float)(lon_ntf_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_180_wst; else if((float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_bb; else nco_grd_lon_typ=nco_grd_lon_unk; } /* !nco_grd_lon_typ */ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output latitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lat_sng(nco_grd_lat_typ)); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output longitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lon_sng(nco_grd_lon_typ)); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output grid-extent: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_xtn_sng(nco_grd_xtn)); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ slat_ctr_out=(double *)nco_malloc(slat_nbr_out*nco_typ_lng(crd_typ_out)); slat_wgt_out=(double *)nco_malloc(slat_nbr_out*nco_typ_lng(crd_typ_out)); slon_ctr_out=(double *)nco_malloc(slon_nbr_out*nco_typ_lng(crd_typ_out)); for(idx=0L;idx<slat_nbr_out;idx++){ slat_ctr_out[idx]=lat_ntf_out[idx+1L]; slat_wgt_out[idx]=fabs(sin(dgr2rdn*lat_ctr_out[idx+1L])-sin(dgr2rdn*lat_ctr_out[idx])); /* fabs() ensures positive area in n2s grids */ } /* !lat_nbr_out */ for(idx=0L;idx<slon_nbr_out;idx++){ slon_ctr_out[idx]=lon_ntf_out[idx]; } /* !lat_nbr_out */ } /* !nco_grd_lat_fv */ switch(nco_grd_lat_typ){ case nco_grd_lat_eqa: case nco_grd_lat_fv: for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=fabs(sin(dgr2rdn*lat_bnd_out[2*idx+1L])-sin(dgr2rdn*lat_bnd_out[2*idx])); /* fabs() ensures positive area in n2s grids */ break; case nco_grd_lat_gss: for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=wgt_Gss_out[idx]; if(wgt_Gss_out) wgt_Gss_out=(double *)nco_free(wgt_Gss_out); break; case nco_grd_lat_unk: for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=0.0; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING %s reports unknown output latitude grid-type. Unable to guess what latitude weights should be.\n",nco_prg_nm_get(),fnc_nm); break; default: nco_dfl_case_generic_err(); break; } /* end nco_grd_lat_typ switch */ /* Fuzzy test of latitude weight normalization */ lat_wgt_ttl=0.0; for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_ttl+=lat_wgt_out[idx]; if(nco_grd_lat_typ == nco_grd_lat_eqa || nco_grd_lat_typ == nco_grd_lat_fv){ double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */ lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd_out[2L*(lat_nbr_out-1L)+1L])-sin(dgr2rdn*lat_bnd_out[0L])); /* fabs() ensures positive area in n2s grids */ assert(fabs(1.0-lat_wgt_ttl/lat_wgt_ttl_xpc) < eps_rlt); if(lat_wgt_ttl_xpc < 0.0) abort(); /* CEWI Use lat_wgt_ttl_xpc at least once outside of assert() to avoid gcc 4.8.2 set-but-not-used warning */ } /* !nco_grd_lat_eqa, !nco_grd_lat_fv */ } /* !flg_grd_out_rct */ /* When possible, ensure area_out is non-zero 20150722: ESMF documentation says "The grid area array is only output when the conservative remapping option is used" Actually, ESMF does (always?) output area, but area == 0.0 unless conservative remapping is used 20150721: ESMF bilinear interpolation map ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.150418.nc has area == 0.0 20150710: Tempest regionally refined grids like bilinearly interpolated CONUS for ACME RRM has area_out == 0 20150821: ESMF always outputs area_out == 0.0 for bilinear interpolation Check whether NCO must diagnose and provide its own area_out */ /* If area_out contains any zero... */ for(idx=0;idx<(long)grd_sz_out;idx++) if(area_out[idx] == 0.0) break; if(idx != (long)grd_sz_out){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Output grid detected with zero-valued output area(s) at idx = %ld (and likely others, too).\n",nco_prg_nm_get(),idx); } /* !zero */ for(idx=0;idx<(long)grd_sz_out;idx++) if(area_out[idx] != 0.0) break; if(idx == (long)grd_sz_out){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports area_out from mapfile is everywhere zero. This is expected for bilinearly interpolated output maps produced by ESMF_RegridWeightGen. ",nco_prg_nm_get(),fnc_nm); if(flg_grd_out_2D && flg_grd_out_rct && (bnd_nbr_out == 2 || bnd_nbr_out == 4)){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable named \"%s\") from the destination gridcell boundaries. NCO diagnoses quadrilateral area for rectangular output grids from a formula that assumes that cell boundaries follow arcs of constant latitude and longitude. This differs from the area of cells with boundaries that follow great circle arcs (used by, e.g., ESMF_RegridWeightGen and TempestRemap). Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm); flg_dgn_area_out=True; }else if(flg_grd_out_2D && flg_grd_out_crv && (bnd_nbr_out == 2 || bnd_nbr_out == 4)){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable named \"%s\") from the destination gridcell boundaries. NCO diagnoses quadrilateral area for curvilinear output grids from formulae that assume that cell boundaries follow great circle arcs (as do, e.g., ESMF_RegridWeightGen and TempestRemap). This differs from the area of cells with boundaries that follow lines of constant latitude or longitude. Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm); flg_dgn_area_out=True; }else if(flg_grd_out_1D && flg_bnd_1D_usable){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable name \"%s\") from the destination gridcell boundaries. NCO diagnoses spherical polygon area for unstructured output grids from formulae that assume that cell boundaries follow great circle arcs (as do, e.g., ESMFRegridWeightGen and TempestRemap). This differs from the area of cells with boundaries that follow lines of constant latitude or longitude. Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm); flg_dgn_area_out=True; }else{ /* !1D */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"However, NCO cannot find enough boundary information, or it is too stupid about spherical trigonometry, to diagnose area_out. NCO will output an area variable (named \"%s\") copied from the input mapfile. This area will be everywhere zero.\n",rgr->area_nm); } /* !2D */ } /* !area */ if(flg_dgn_area_out){ if(flg_grd_out_1D && flg_bnd_1D_usable){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"INFO: Diagnosing area_out for 1D grid\n"); /* Area of unstructured grids requires spherical trigonometry */ nco_sph_plg_area(rgr,lat_bnd_out,lon_bnd_out,col_nbr_out,bnd_nbr_out,area_out); } /* !1D */ if(flg_grd_out_crv){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"INFO: Diagnosing area_out for curvilinear grid\n"); /* Area of curvilinear grids requires spherical trigonometry */ nco_sph_plg_area(rgr,lat_crn_out,lon_crn_out,grd_sz_out,bnd_nbr_out,area_out); } /* !flg_grd_out_crv */ if(flg_grd_out_rct && nco_grd_2D_typ != nco_grd_2D_unk){ /* Mr. Enenstein and George O. Abell taught me the area of spherical zones Spherical zone area is exact and faithful to underlying rectangular equi-angular grid However, ESMF and Tempest approximate spherical polygons as connected by great circle arcs fxm: Distinguish spherical zone shapes (e.g., equi-angular) from great circle arcs (e.g., unstructured polygons) */ for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++) area_out[lat_idx*lon_nbr_out+lon_idx]=fabs(dgr2rdn*(lon_bnd_out[2*lon_idx+1]-lon_bnd_out[2*lon_idx])*(sin(dgr2rdn*lat_bnd_out[2*lat_idx+1])-sin(dgr2rdn*lat_bnd_out[2*lat_idx]))); /* fabs() ensures positive area in n2s grids */ } /* !spherical zones */ } /* !flg_dgn_area_out */ if(rgr->tst == -1){ /* Passing --rgr tst=-1 causes regridder to fail here This failure should cause host climo script to abort */ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports regridder instructed to fail here. This tests failure mode in climo scripts...\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !tst */ /* Verify that frc_out is sometimes non-zero ESMF: "The grid frac arrays (frac_a and frac_b) are calculated by ESMF_RegridWeightGen. For conservative remapping, the grid frac array returns the area fraction of the grid cell which participates in the remapping. For bilinear and patch remapping, the destination grid frac array [frac_b] is one where the grid point participates in the remapping and zero otherwise. For bilinear and patch remapping, the source grid frac array is always set to zero." SCRIP: Similar to ESMF For both ESMF+SCRIP frac_[ab] are computed by the weight-generation algorithm and are not specified as part of the input grids How does an input ocean grid indicate that, say, half the gridcell is land and half ocean? Does it use the area variable to tell the weight generation algorithm that a gridcell is fractional? In other words does it use grid_imask=1 and grid_area=0.5*full_gridcell_area and, e.g., T=273.0? */ for(idx=0;idx<(long)grd_sz_out;idx++) if(frc_out[idx] != 0.0) break; if(idx == (long)grd_sz_out){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports frc_out == frac_b contains all zeros\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !always zero */ /* Test whether frc_out is ever zero... */ for(idx=0;idx<(long)grd_sz_out;idx++) if(frc_out[idx] == 0.0) break; if(nco_dbg_lvl_get() >= nco_dbg_std) if(idx != (long)grd_sz_out) (void)fprintf(stdout,"%s: INFO %s reports frc_out == frac_b contains zero-elements (e.g., at 1D idx = %ld)\n",nco_prg_nm_get(),fnc_nm,idx); /* Normalizing by frc_out is redundant iff frc_out == 1.0, so we can save time without sacrificing accuracy However, frc_out is often (e.g., for CS <-> RLL maps) close but not equal to unity (ESMF_RegridWeightGen issue?) Hence, decide whether to normalize by frc_out by diagnosing the furthest excursion of frc_out from unity */ nco_bool flg_frc_out_one=True; /* [flg] Destination gridcell fraction frc_out == frac_b is in [1-epsilon,frc_out,1+epsilon] */ nco_bool flg_frc_out_wrt=False; /* [flg] Write destination gridcell fraction frc_out == frac_b to regridded files */ double frc_out_dff_one; /* [frc] Deviation of frc_out from 1.0 */ double frc_out_dff_one_max=0.0; /* [frc] Maximum deviation of frc_out from 1.0 */ long idx_max_dvn; /* [idx] Index of maximum deviation from 1.0 */ for(idx=0;idx<(long)grd_sz_out;idx++){ frc_out_dff_one=fabs(frc_out[idx]-1.0); if(frc_out_dff_one > frc_out_dff_one_max){ frc_out_dff_one_max=frc_out_dff_one; idx_max_dvn=idx; } /* !max */ } /* !idx */ if(frc_out_dff_one_max > eps_rlt) flg_frc_out_one=False; nco_bool flg_frc_nrm=False; /* [flg] Must normalize by frc_out == frac_b because frc_out is not always unity and specified normalization is destarea or none */ if(!flg_frc_out_one && /* If fraction is sometimes "far" from 1.0 and ... */ ((nco_rgr_mpf_typ == nco_rgr_mpf_ESMF && nco_rgr_mth_typ == nco_rgr_mth_conservative && (nco_rgr_nrm_typ == nco_rgr_nrm_destarea || nco_rgr_nrm_typ == nco_rgr_nrm_none)) || /* ESMF map-file specifies conservative regridding with "destarea" or "none" or ... */ (nco_rgr_mpf_typ != nco_rgr_mpf_ESMF)) /* 20191003: Weight-generator does not adhere to ESMF "normalization type" convention */ && True){ flg_frc_nrm=True; /* Avoid writing frc_out unless discrepancies are particularly egregious Otherwise would frc_out for standard remaps like ne30->fv129x256 for which eps=2.46e-13 */ double eps_rlt_wrt_thr=3.0e-13; /* 20181104: Never write frac_b for CMIP6! */ /* if(frc_out_dff_one_max > eps_rlt_wrt_thr) flg_frc_out_wrt=True; */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s reports global metadata specifies conservative remapping with normalization of type = %s. Furthermore, destination fractions frc_dst = dst_frac = frac_b = frc_out contain non-unity elements (maximum deviation from unity of %g exceeds hard-coded (in variable eps_rlt) relative-epsilon threshold of %g for frc_out[%ld] = %g). Thus normalization issues will be explicitly treated. Will apply \'destarea\' normalization (i.e., divide by non-zero frc_out[dst_idx]) to all regridded arrays.\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ),frc_out_dff_one_max,eps_rlt,idx_max_dvn,frc_out[idx_max_dvn]); if(nco_dbg_lvl_get() >= nco_dbg_std && flg_frc_out_wrt) (void)fprintf(stdout,"%s: INFO %s Maximum deviation %g exceeds threshold of %g that triggers automatic writing of fractional destination area as variable named frac_b in regridded output.\n",nco_prg_nm_get(),fnc_nm,frc_out_dff_one_max,eps_rlt_wrt_thr); } /* !sometimes non-unity */ if(nco_dbg_lvl_get() >= nco_dbg_std && flg_frc_nrm && rgr->flg_rnr){ // 20190918: Weaken from WARNING to INFO because NCO no longer renormalizes when using "destarea" maps unless specifically requested to with --rnr_thr (void)fprintf(stdout,"%s: INFO %s reports manual request to renormalize partially overlapped destination gridcells (i.e., gridcells with non-unity frc_dst = dst_frac = frac_b) to preserve mean-value of valid fraction of source gridcells (usually most useful for state variables), rather than dilute valid-fraction mean over total destination gridcell area to preserve area-integral of source data (the default, often most useful for ensuring global conservation of fluxes).\n",nco_prg_nm_get(),fnc_nm); //(void)fprintf(stdout,"%s: INFO %s reports manual request (with --rnr) to renormalize fields with non-unity frc_dst = dst_frac = frac_b at same time global metadata specifies normalization type = %s. Normalizing twice can be an error, depending on intent of each. Charlie is all ears on how NCO should handle this :)\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ)); //nco_exit(EXIT_FAILURE); } /* !flg_rnr */ /* Detailed summary of 2D grids now available including quality-checked coordinates and area */ if(flg_grd_out_2D && nco_dbg_lvl_get() >= nco_dbg_sbr){ lat_wgt_ttl=0.0; area_out_ttl=0.0; if(flg_grd_out_rct){ (void)fprintf(stderr,"%s: INFO %s reports destination rectangular latitude grid:\n",nco_prg_nm_get(),fnc_nm); for(idx=0;idx<lat_nbr_out;idx++) lat_wgt_ttl+=lat_wgt_out[idx]; } /* !flg_grd_out_rct */ for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++) area_out_ttl+=area_out[lat_idx*lon_nbr_out+lon_idx]; (void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_out_ttl,area_out_ttl/(4.0*M_PI)); if(flg_grd_out_rct){ for(idx=0;idx<lon_nbr_out;idx++) (void)fprintf(stdout,"lon[%li] = [%g, %g, %g]\n",idx,lon_bnd_out[2*idx],lon_ctr_out[idx],lon_bnd_out[2*idx+1]); for(idx=0;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li] = [%g, %g, %g]\n",idx,lat_bnd_out[2*idx],lat_ctr_out[idx],lat_bnd_out[2*idx+1]); for(idx=0;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li], wgt[%li] = %20.15f, %20.15f\n",idx,idx,lat_ctr_out[idx],lat_wgt_out[idx]); } /* !flg_grd_out_rct */ if(nco_dbg_lvl_get() > nco_dbg_crr) for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++) (void)fprintf(stdout,"lat[%li] = %g, lon[%li] = %g, area[%li,%li] = %g\n",lat_idx,lat_ctr_out[lat_idx],lon_idx,lon_ctr_out[lon_idx],lat_idx,lon_idx,area_out[lat_idx*lon_nbr_out+lon_idx]); assert(area_out_ttl > 0.0); assert(area_out_ttl <= 4.0*M_PI + 5.0e-15); } /* !flg_grd_out_2D && !dbg */ /* Allocate space for and obtain weights and addresses */ wgt_raw=(double *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_DOUBLE),fnc_nm,"Unable to malloc() value buffer for remapping weights"); col_src_adr=(int *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() value buffer for remapping addresses"); row_dst_adr=(int *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() value buffer for remapping addresses"); /* Obtain remap matrix addresses and weights from map file */ dmn_srt[0]=0L; dmn_cnt[0]=mpf.num_links; rcd=nco_get_vara(in_id,col_src_adr_id,dmn_srt,dmn_cnt,col_src_adr,NC_INT); rcd=nco_get_vara(in_id,row_dst_adr_id,dmn_srt,dmn_cnt,row_dst_adr,NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=mpf.num_links; if(nco_rgr_mpf_typ != nco_rgr_mpf_SCRIP){ rcd=nco_get_vara(in_id,wgt_raw_id,dmn_srt,dmn_cnt,wgt_raw,NC_DOUBLE); }else{ /* SCRIP mapfiles store 2D weight array remap_matrix[num_links,num_wgts] Apply only first weight for first-order conservative accuracy (i.e., area overlap) Apply all three weights for second-order conservative accuracy (by including gradients from centroid to vertices) */ dmn_srd[0]=1L; dmn_srt[1]=0L; dmn_cnt[1]=1L; dmn_srd[1]=mpf.num_wgts; rcd=nco_get_vars(in_id,wgt_raw_id,dmn_srt,dmn_cnt,dmn_srd,wgt_raw,NC_DOUBLE); } /* !SCRIP */ /* Pre-subtract one from row/column addresses (stored, by convention, as Fortran indices) to optimize later access with C indices */ size_t lnk_nbr; /* [nbr] Number of links */ size_t lnk_idx; /* [idx] Link index */ lnk_nbr=mpf.num_links; for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) row_dst_adr[lnk_idx]--; for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) col_src_adr[lnk_idx]--; if(nco_dbg_lvl_get() >= nco_dbg_io){ (void)fprintf(stdout,"idx row_dst col_src wgt_raw\n"); for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) (void)fprintf(stdout,"%li %d %d %g\n",lnk_idx,row_dst_adr[lnk_idx],col_src_adr[lnk_idx],wgt_raw[lnk_idx]); } /* endif dbg */ /* Free memory associated with input file */ if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt) dmn_cnt=(long *)nco_free(dmn_cnt); if(dmn_srd) dmn_srd=(long *)nco_free(dmn_srd); /* Close input netCDF file */ nco_close(in_id); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in); /* Above this line, fl_in and in_id refer to map file Below this line, fl_in and in_id refer to input file to be regridded */ /* Initialize */ in_id=rgr->in_id; out_id=rgr->out_id; /* Sanity check that input data file matches expectations from mapfile */ char *col_nm_in=rgr->col_nm_in; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ char *lat_nm_in=rgr->lat_nm_in; /* [sng] Name of input dimension to recognize as latitude */ char *lon_nm_in=rgr->lon_nm_in; /* [sng] Name of input dimension to recognize as longitude */ int dmn_id_col=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_lat; /* [id] Dimension ID */ int dmn_id_lon; /* [id] Dimension ID */ /* 20160503 Discover coordinates via CF Convention if indicated This copies method used in nco_grd_nfr() */ /* Begin CF-coordinates block */ cf_crd_sct *cf=NULL; char *rgr_var; /* [sng] Variable for special regridding treatment */ nco_bool flg_cf=False; /* [flg] Follow CF Coordinates convention to find and infer grid */ rgr_var=rgr->var_nm; if(rgr_var){ /* Infer grid from special variable Intended to be variable that has both horizontal dimensions and "coordinates" attribute, e.g., ncks --cdl -m ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc | grep coordinates 4LFTX_221_SPDY_S113:coordinates = "gridlat_221 gridlon_221" ; Usage: ncks -O -D 3 --rgr infer --rgr_var=ALBDO_221_SFC_S113 --rgr grid=${HOME}/grd_narr.nc ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc ~/foo.nc */ char crd_sng[]="coordinates"; /* CF-standard coordinates attribute name */ cf=(cf_crd_sct *)nco_malloc(sizeof(cf_crd_sct)); cf->crd=False; /* [flg] CF coordinates information is complete */ cf->crd_id[0]=NC_MIN_INT; /* [id] Coordinate ID, first */ cf->crd_id[1]=NC_MIN_INT; /* [id] Coordinate ID, second */ cf->crd_nm[0]=NULL; /* [sng] Coordinate name, first */ cf->crd_nm[1]=NULL; /* [sng] Coordinate name, second */ cf->crd_sng=NULL; /* [sng] Coordinates attribute value */ cf->dmn_id[0]=NC_MIN_INT; /* [id] Dimension ID, first */ cf->dmn_id[1]=NC_MIN_INT; /* [id] Dimension ID, second */ cf->dmn_nm[0]=NULL; /* [sng] Dimension name, first */ cf->dmn_nm[1]=NULL; /* [sng] Dimension name, second */ cf->unt_sng[0]=NULL; /* [sng] Units string, first coordinate */ cf->unt_sng[1]=NULL; /* [sng] Units string, second coordinate */ cf->var_id=NC_MIN_INT; /* [id] Coordinate variable ID */ cf->var_nm=NULL; /* [sng] Coordinates variable name */ cf->var_type=NC_NAT; /* [enm] Coordinates variable type */ if((rcd=nco_inq_varid_flg(in_id,rgr_var,&cf->var_id)) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports special \"coordinates\" variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd */ cf->crd_sng=nco_char_att_get(in_id,cf->var_id,crd_sng); if(cf->crd_sng){ cf->crd=True; }else{ /* !rcd && att_typ */ (void)fprintf(stderr,"%s: WARNING %s reports coordinates variable %s does not have character-valued \"coordinates\" attribute. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd && att_typ */ /* Valid coordinates attribute requires two coordinate names separated by space character */ char *crd_nm[NCO_MAX_CRD_PER_VAR]; /* [sng] Coordinate name start position */ char *crd_dpl; /* [sng] Modifiable duplicate of coordinates string */ char *spc_ptr; /* [sng] Pointer to space character (' ') */ int crd_nbr=0; /* [nbr] Number of names in coordinates attribute */ int crd_spt=0; /* [nbr] Number of "spatial-like" (that include "degree" in units) coordinates */ int crd_idx=0; /* [idx] Counter for coordinate names */ for(crd_idx=0;crd_idx<NCO_MAX_CRD_PER_VAR;crd_idx++) crd_nm[crd_idx]=NULL; crd_dpl=(char *)strdup(cf->crd_sng); /* Search for spaces starting from end of string */ while((spc_ptr=strrchr(crd_dpl,' '))){ crd_nm[crd_nbr]=spc_ptr+1L; crd_nbr++; /* NUL-terminate so next search ends here */ *spc_ptr='\0'; } /* !sbs_ptr */ /* Final coordinate name begins where coordinate string starts */ crd_nm[crd_nbr]=crd_dpl; /* Change crd_nbr from 0-based index to actual coordinate number */ crd_nbr++; if(crd_nbr < 2){ (void)fprintf(stderr,"%s: WARNING %s found only %d coordinate(s) in \"coordinates\" attribute \"%s\", at least two are required. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,crd_nbr,cf->crd_sng); goto skp_cf; } /* !crd_nbr */ /* If more than two coordinate names are present, choose first two (searching backwards from end) with "degree" in units attributes, otherwise just choose first two */ crd_idx=crd_spt=0; while(crd_spt < 2 && crd_idx < crd_nbr){ cf->crd_nm[crd_spt]=crd_nm[crd_idx]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[crd_spt],&cf->crd_id[crd_spt])) == NC_NOERR){ cf->unt_sng[crd_spt]=nco_char_att_get(in_id,cf->crd_id[crd_spt],unt_sng); if(cf->unt_sng[crd_spt]){ if(strcasestr(cf->unt_sng[crd_spt],"degree")){ /* Increment count of spatial-like coordinates... */ crd_spt++; }else{ /* ...or free() memory allocated during search */ cf->unt_sng[crd_spt]=(char *)nco_free(cf->unt_sng[crd_spt]); } /* !strcasestr() */ crd_idx++; } /* !rcd && att_typ */ } /* !rcd */ } /* !crd_spt */ /* If while()-loop above was successful, our search is over Otherwise, use first two coordinate names regardless of units, and print more diagnostics */ if(crd_spt < 2){ cf->crd_nm[0]=crd_nm[0]; cf->crd_nm[1]=crd_nm[1]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[0],&cf->crd_id[0])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s not found. Turning-off CF coordinates search for this file.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0]); goto skp_cf; } /* !rcd */ if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[1],&cf->crd_id[1])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s not found. Turning-off CF coordinates search for this file.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1]); goto skp_cf; } /* !rcd */ cf->unt_sng[0]=nco_char_att_get(in_id,cf->crd_id[0],unt_sng); if(cf->unt_sng[0]){ if(!strcasestr(cf->unt_sng[0],"degrees_")) (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->unt_sng[0]); } /* !rcd && att_typ */ cf->unt_sng[1]=nco_char_att_get(in_id,cf->crd_id[1],unt_sng); if(cf->unt_sng[1]){ if(!strcasestr(cf->unt_sng[1],"degrees_")) (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1],cf->unt_sng[1]); } /* !rcd && att_typ */ } /* !crd_spt */ int crd_rnk; /* [nbr] Coordinate rank */ rcd=nco_inq_varndims(in_id,cf->crd_id[0],&crd_rnk); if(crd_rnk != 2){ (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s has %i dimension(s). Skipping CF coordinates method.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],crd_rnk); goto skp_cf; } /* !crd_rnk */ rcd=nco_inq_vardimid(in_id,cf->crd_id[0],cf->dmn_id); cf->dmn_nm[0]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); cf->dmn_nm[1]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); rcd=nco_inq_dimname(in_id,cf->dmn_id[0],cf->dmn_nm[0]); rcd=nco_inq_dimname(in_id,cf->dmn_id[1],cf->dmn_nm[1]); /* "coordinates" convention does not guarantee lat, lon are specified in that order Use "units" values, if any, to determine order In absence of "units", assume order is lat, lon */ nco_bool crd0_is_lat=False; /* [flg] First coordinate is latitude */ nco_bool crd0_is_lon=False; /* [flg] First coordinate is longitude */ nco_bool crd1_is_lat=False; /* [flg] Second coordinate is latitude */ nco_bool crd1_is_lon=False; /* [flg] Second coordinate is longitude */ if(cf->unt_sng[0]){ if(!strcasecmp(cf->unt_sng[0],"degrees_north")) crd0_is_lat=True; if(!strcasecmp(cf->unt_sng[0],"degrees_east")) crd0_is_lon=True; } /* endif */ if(cf->unt_sng[1]){ if(!strcasecmp(cf->unt_sng[1],"degrees_north")) crd1_is_lat=True; if(!strcasecmp(cf->unt_sng[1],"degrees_east")) crd1_is_lon=True; } /* endif */ assert((crd0_is_lat && crd1_is_lon) || (crd0_is_lon && crd1_is_lat)); int idx_lat; int idx_lon; if(crd0_is_lat && crd1_is_lon){ idx_lat=0; idx_lon=1; }else{ idx_lat=1; idx_lon=0; } /* endif */ /* Dimensions and coordinates have been vetted. Store as primary lookup names. Dimensions are always returned in order [LRV,MRV]=[0,1] LRV is along-track direction, and MRV is across-track (at least in NASA data) Internally we label LRV as "lat" and MRV as "lon" so that code looks similar for curvilinear and rectangular grids */ dmn_id_lat=cf->dmn_id[0]; dmn_id_lon=cf->dmn_id[1]; /* Subtlety: lat_nm_in is coordinate (variable+dimension) name when specified from command-line (as in nco_grd_nfr()), dimension name when found through CF-method (as in nco_rgr_wgt()). This confusing distinction could be avoided by passing command-line dimension names through-to nco_rgr_wgt(). However, that route would require complex priorities for what to do when passing command-line coordinate names not dimension names and visa-versa. */ lat_nm_in=strdup(cf->dmn_nm[0]); lon_nm_in=strdup(cf->dmn_nm[1]); //lat_nm_in=strdup(cf->crd_nm[idx_lat]); //lon_nm_in=strdup(cf->crd_nm[idx_lon]); /* Next four lines unnecessary in nco_rgr_wgt() which only needs dimension names (it reads input coordinates from map-file not data-file) */ //lat_ctr_id=cf->crd_id[idx_lat]; //lon_ctr_id=cf->crd_id[idx_lon]; //lat_dmn_nm=strdup(cf->dmn_nm[0]); //lon_dmn_nm=strdup(cf->dmn_nm[1]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s \"coordinates\" attribute \"%s\" points to coordinates %s and %s. Latitude coordinate \"%s\" has dimensions \"%s\" and \"%s\". Longitude coordinate \"%s\" has dimensions \"%s\" and \"%s\".\n",nco_prg_nm_get(),fnc_nm,rgr_var,cf->crd_sng,cf->crd_nm[0],cf->crd_nm[1],cf->crd_nm[idx_lat],cf->dmn_nm[idx_lat],cf->dmn_nm[idx_lon],cf->crd_nm[idx_lon],cf->dmn_nm[idx_lat],cf->dmn_nm[idx_lon]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s Coordinates %s and %s \"units\" values are \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->crd_nm[1],cf->unt_sng[0] ? cf->unt_sng[0] : "(non-existent)",cf->unt_sng[1] ? cf->unt_sng[1] : "(non-existent)"); /* Clean-up CF coordinates memory */ if(crd_dpl) crd_dpl=(char *)nco_free(crd_dpl); if(cf->crd_sng) cf->crd_sng=(char *)nco_free(cf->crd_sng); if(cf->dmn_nm[0]) cf->dmn_nm[0]=(char *)nco_free(cf->dmn_nm[0]); if(cf->dmn_nm[1]) cf->dmn_nm[1]=(char *)nco_free(cf->dmn_nm[1]); if(cf->unt_sng[0]) cf->unt_sng[0]=(char *)nco_free(cf->unt_sng[0]); if(cf->unt_sng[1]) cf->unt_sng[1]=(char *)nco_free(cf->unt_sng[1]); // if(foo) foo=(char *)nco_free(foo); } /* !rgr_var */ /* goto skp_cf */ skp_cf: /* free() any abandoned cf structure now */ if(!flg_cf) if(cf) cf=(cf_crd_sct *)nco_free(cf); rcd=NC_NOERR; /* End CF-coordinates block */ if(flg_grd_in_1D){ long col_nbr_in_dat; /* [nbr] Number of columns in input datafile */ /* Check default or command-line option first, then search usual suspects, and if that fails then guess unstructured dimension is dimension in input file with size n_a expected by input map file, suggested by PJCS Using internal database names first ensures users can pick between multiple dimensions of size n_a 20180313: fxm New PJCS algorithm is superior, should eliminate internal database for unstructured grids? Database is necessary for 2D grids because otherwise no good way to disambiguate latitude from longitude */ if(col_nm_in && (rcd=nco_inq_dimid_flg(in_id,col_nm_in,&dmn_id_col)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"lndgrid",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("lndgrid"); /* CLM */ else if((rcd=nco_inq_dimid_flg(in_id,"nCells",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("nCells"); /* MPAS-O/I */ else if((rcd=nco_inq_dimid_flg(in_id,"nEdges",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("nEdges"); /* MPAS-O/I */ else if((rcd=nco_inq_dimid_flg(in_id,"ncol_d",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("ncol_d"); /* EAM dynamics grid */ else if((rcd=nco_inq_dimid_flg(in_id,"ncol_p",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("ncol_d"); /* EAM physics grid */ else if((rcd=nco_inq_dimid_flg(in_id,"sounding_id",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("sounding_id"); /* OCO2 */ /* 20180605: Database matches to above names may be false-positives ALM/CLM/CTSM/ELM store all possible dimension names that archived variables could use NCO only prints dimensions used in variables, while ncdump prints all dimensions From ncdump we find usually unused ALM/CLM/CTSM/ELM dimensions: gridcell, lndunit, column, pft, levurb, numrad, levsno Check that matched dimension has expected size: */ if(dmn_id_col != NC_MIN_INT){ rcd=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr_in_dat); if(col_nbr_in != col_nbr_in_dat){ dmn_id_col=NC_MIN_INT; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s database-prioritized unstructured dimension candidate \"%s\" has size not expected by supplied map-file: mapfile col_nbr_in = %ld != %ld = col_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,col_nm_in,col_nbr_in,col_nbr_in_dat); } /* !col_nbr_in */ }else{ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s expects data on an unstructured grid yet cannot find a dimension name that matches the usual suspects for unstructured dimensions (ncol, gridcell, lndgrid, nCells, nEdges, sounding_id). Consider specifying horizontal dimension name to ncks with \"--rgr col_nm=foo\" or to ncremap with \"ncremap -R '--rgr col_nm=foo'\", and consider requesting the NCO project to add this horizontal dimension name to its internal database.\n",nco_prg_nm_get(),fnc_nm); } /* !dmn_id_col */ if(dmn_id_col == NC_MIN_INT){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s Proceeding with fallback algorithm to guess unstructured dimension as first dimension in data file of equal size to that expected by supplied map-file...\n",nco_prg_nm_get(),fnc_nm); /* 20180312: Unstructured dimension must have same size as input map file, suggested by PJCS */ int *dmn_ids_in; /* [nbr] Input file dimension IDs */ int dmn_nbr_in; /* [nbr] Number of dimensions in input file */ const int flg_prn=0; /* [enm] Parent flag */ rcd=nco_inq_dimids(in_id,&dmn_nbr_in,NULL,flg_prn); dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); rcd=nco_inq_dimids(in_id,NULL,dmn_ids_in,flg_prn); /* Find dimension, if any, with same size as map "a" src_grid_dims[0] = n_a dimension */ for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ dmn_id_col=dmn_ids_in[dmn_idx]; rcd=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr_in_dat); if(col_nbr_in == col_nbr_in_dat){ rcd=nco_inq_dimname(in_id,dmn_id_col,col_nm_in); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s found that dimension %s in datafile has same size (n_a = %ld) expected by map-file. Assuming %s is the unstructured dimension.\n",nco_prg_nm_get(),fnc_nm,col_nm_in,col_nbr_in,col_nm_in); break; } /* !col_nbr_in */ } /* !dmn_idx */ if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in); if(dmn_idx == dmn_nbr_in){ dmn_id_col=NC_MIN_INT; (void)fprintf(stdout,"%s: WARNING received a map-file constructed to process data on an unstructured (one-dimensional) grid, but %s (aka \"the regridder\") cannot find a dimension in the input data file (or, with ncremap, a possibly already subsetted intermediate file) that matches the size of the unstructured dimension in the supplied map-file = src_grd_dims[0] = n_a = %ld.\nHINT: Ensure at least one member of the variable extraction list has a spatial dimension of size = %ld\n",nco_prg_nm_get(),fnc_nm,col_nbr_in,col_nbr_in); (void)fprintf(stdout,"%s: INFO %s reports a third, last-ditch (aka \"Hail Mary\") workaround may work. The Hail-Mary allows logically 1D map-files to regrid logically 2D datasets, so long as the product of the horizontal dimension sizes in the 2D input data file equals the map-file 1D dimension size.\n",nco_prg_nm_get(),fnc_nm); /* Hail Mary algorithm: Use following 2D input grid block to identify horizontal coordinates and dimensions */ flg_grd_in_1D_dat_in_2D=True; flg_grd_in_2D=True; //nco_exit(EXIT_FAILURE); } /* !dmn_idx */ } /* !col_nm_in */ } /* !1D */ if(flg_grd_in_2D){ long lat_nbr_in_dat; /* [nbr] Number of latitudes in input datafile */ if(lat_nm_in && (rcd=nco_inq_dimid_flg(in_id,lat_nm_in,&dmn_id_lat)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("latitude"); else if((rcd=nco_inq_dimid_flg(in_id,"lat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("lat"); else if((rcd=nco_inq_dimid_flg(in_id,"Latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Latitude"); else if((rcd=nco_inq_dimid_flg(in_id,"Lat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Lat"); else if((rcd=nco_inq_dimid_flg(in_id,"south_north",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("south_north"); /* WRF */ else if((rcd=nco_inq_dimid_flg(in_id,"south_north_stag",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("south_north_stag"); else if((rcd=nco_inq_dimid_flg(in_id,"YDim:location",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("YDim:location"); /* AIRS L3 */ else if((rcd=nco_inq_dimid_flg(in_id,"YDim:MOD_Grid_monthly_CMG_VI",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("YDim:MOD_Grid_monthly_CMG_VI"); /* MODIS MOD13C2 */ else if((rcd=nco_inq_dimid_flg(in_id,"natrack",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("natrack"); /* MODIS DeepBlue SeaWiFS L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"nj",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nj"); /* CICE RTM */ else if((rcd=nco_inq_dimid_flg(in_id,"lsmlat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("lsmlat"); /* CISM/CLM/ELM */ else if((rcd=nco_inq_dimid_flg(in_id,"nlat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nlat"); /* POP */ else if((rcd=nco_inq_dimid_flg(in_id,"rlat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("rlat"); /* RACMO */ else if((rcd=nco_inq_dimid_flg(in_id,"nscan",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nscan"); /* AMSR, TRMM */ else if((rcd=nco_inq_dimid_flg(in_id,"nTimes",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nTimes"); /* OMI L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"number_of_lines",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("number_of_lines"); /* DSCOVR L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoTrack",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("GeoTrack"); /* AIRS L2 DAP NC */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoTrack:L2_Standard_atmospheric&surface_product",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("GeoTrack:L2_Standard_atmospheric&surface_product"); /* AIRS L2 HDF */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Along_Swath:mod04",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Cell_Along_Swath:mod04"); /* MODIS MOD04 L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Along_Swath_mod04",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Cell_Along_Swath_mod04"); /* MODIS MOD04 L2 (ncl_convert2nc changes colon to underscore) */ else if((rcd=nco_inq_dimid_flg(in_id,"CO_Latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("CO_Latitude"); else if((rcd=nco_inq_dimid_flg(in_id,"j",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("j"); /* CMIP5 NorESM1 ocean */ else if((rcd=nco_inq_dimid_flg(in_id,"latitude0",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("latitude0"); /* Oxford */ else if((rcd=nco_inq_dimid_flg(in_id,"y",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("y"); /* NEMO */ else if((rcd=nco_inq_dimid_flg(in_id,"x",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("x"); /* NSIDC polar stereographic (NB: unfortunate incompatible conflict between NEMO & NSIDC names) */ else if((rcd=nco_inq_dimid_flg(in_id,"y1",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("y1"); /* NSIDC EASE */ else if((rcd=nco_inq_dimid_flg(in_id,"ygrid",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("ygrid"); /* SSM/I */ else if((rcd=nco_inq_dimid_flg(in_id,"ygrid_0",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("ygrid_0"); /* NWS HRRR */ else{ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports unable to find latitude dimension in input file. Tried the usual suspects. HINT: Inform regridder of input latitude dimension name with \"ncks --rgr lat_nm_in=name\" or \"ncremap -R '--rgr lat_nm_in=name'\"\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !lat */ rcd=nco_inq_dimlen(in_id,dmn_id_lat,&lat_nbr_in_dat); if(lat_nbr_in != lat_nbr_in_dat && !flg_grd_in_1D_dat_in_2D){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports mapfile and data file dimension sizes disagree: mapfile lat_nbr_in = %ld != %ld = lat_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,lat_nbr_in,lat_nbr_in_dat); nco_exit(EXIT_FAILURE); } /* !err */ long lon_nbr_in_dat; /* [nbr] Number of longitudes in input datafile */ if(lon_nm_in && (rcd=nco_inq_dimid_flg(in_id,lon_nm_in,&dmn_id_lon)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"longitude",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("longitude"); else if((rcd=nco_inq_dimid_flg(in_id,"lon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("lon"); else if((rcd=nco_inq_dimid_flg(in_id,"Longitude",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Longitude"); else if((rcd=nco_inq_dimid_flg(in_id,"Lon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Lon"); else if((rcd=nco_inq_dimid_flg(in_id,"west_east",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("west_east"); /* WRF */ else if((rcd=nco_inq_dimid_flg(in_id,"west_east_stag",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("west_east_stag"); else if((rcd=nco_inq_dimid_flg(in_id,"XDim:location",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("XDim:location"); /* AIRS L3 */ else if((rcd=nco_inq_dimid_flg(in_id,"XDim:MOD_Grid_monthly_CMG_VI",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("XDim:MOD_Grid_monthly_CMG_VI"); /* MODIS MOD13C2 */ else if((rcd=nco_inq_dimid_flg(in_id,"ni",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("ni"); /* CICE RTM */ else if((rcd=nco_inq_dimid_flg(in_id,"lsmlon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("lsmlon"); /* CISM/CLM/ELM */ else if((rcd=nco_inq_dimid_flg(in_id,"nlon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nlon"); /* POP */ else if((rcd=nco_inq_dimid_flg(in_id,"rlon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("rlon"); /* POP */ else if((rcd=nco_inq_dimid_flg(in_id,"npix",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("npix"); /* AMSR */ else if((rcd=nco_inq_dimid_flg(in_id,"npixel",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("npixel"); /* TRMM */ else if((rcd=nco_inq_dimid_flg(in_id,"nxtrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nxtrack"); /* MODIS DeepBlue SeaWiFS L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"nXtrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nXtrack"); /* OMI L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"number_of_pixels",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("number_of_pixels"); /* DSCOVR L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoXTrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("GeoXTrack"); /* AIRS L2 DAP NC */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoXTrack:L2_Standard_atmospheric&surface_product",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("GeoXTrack:L2_Standard_atmospheric&surface_product"); /* AIRS L2 HDF */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Across_Swath:mod04",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Cell_Across_Swath:mod04"); /* MODIS MOD04 L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Across_Swath_mod04",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Cell_Across_Swath_mod04"); /* MODIS MOD04 L2 (ncl_convert2nc changes colon to underscore) */ else if((rcd=nco_inq_dimid_flg(in_id,"i",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("i"); /* CMIP5 NorESM1 ocean */ else if((rcd=nco_inq_dimid_flg(in_id,"longitude0",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("longitude0"); /* Oxford */ else if((rcd=nco_inq_dimid_flg(in_id,"x",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("x"); /* NEMO */ else if((rcd=nco_inq_dimid_flg(in_id,"y",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("y"); /* NSIDC polar stereographic (NB: unfortunate incompatible conflict between NEMO & NSIDC names) */ else if((rcd=nco_inq_dimid_flg(in_id,"x1",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("x1"); /* NSIDC EASE */ else if((rcd=nco_inq_dimid_flg(in_id,"xgrid",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("xgrid"); /* SSM/I */ else if((rcd=nco_inq_dimid_flg(in_id,"xgrid_0",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("xgrid_0"); /* NWS HRRR */ else{ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports unable to find longitude dimension in input file. Tried the usual suspects. HINT: Inform regridder of input longitude dimension name with \"ncks --rgr lon_nm_in=name\" or \"ncremap -R '--rgr lon_nm_in=name'\"\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !lat */ rcd=nco_inq_dimlen(in_id,dmn_id_lon,&lon_nbr_in_dat); if(lon_nbr_in != lon_nbr_in_dat && !flg_grd_in_1D_dat_in_2D){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports mapfile and data file dimension sizes disagree: mapfile lon_nbr_in = %ld != %ld = lon_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,lon_nbr_in,lon_nbr_in_dat); nco_exit(EXIT_FAILURE); } /* !err */ if(flg_grd_in_1D_dat_in_2D){ if(lon_nbr_in_dat*lat_nbr_in_dat == col_nbr_in){ (void)fprintf(stdout,"%s: INFO %s Hail Mary algorithm reports tentative success in that product of identifed horizontal dimension sizes in the 2D input data file equals the map-file 1D dimension size = %ld.\n",nco_prg_nm_get(),fnc_nm,col_nbr_in); lat_nbr_in=lat_nbr_in_dat; lon_nbr_in=lon_nbr_in_dat; }else{ /* !col_nbr_in */ (void)fprintf(stdout,"%s: ERROR %s Hail Mary algorithm reports final failure since product of identifed horizontal dimension sizes in the 2D input data file does not equal the map-file 1D dimension size = %ld.\n",nco_prg_nm_get(),fnc_nm,col_nbr_in); nco_exit(EXIT_FAILURE); } /* !col_nbr_in */ } /* !flg_grd_in_1D_dat_in_2D */ } /* !2D */ /* Do not extract grid variables (that are also extensive variables) like lon, lat, area, and masks If necessary, use remap data to diagnose them from scratch Other extensive variables (like counts, population) will be extracted and summed not averaged */ /* Exception list source: ALM/CLM: landmask (20170504: Debatable, including erroneous mask may be better than completely excluding an expected mask) (20170504: must keep landfrac since regridded by ncremap for SGS option) AMSR: Latitude, Longitude CAM, CERES, CMIP5: lat, lon CAM, CMIP5: gw, lat_bnds, lon_bnds CAM-FV: slon, slat, w_stag (w_stag is weights for slat grid, analagous to gw for lat grid) CAM-SE, EAM, MOSART: area CICE: latt_bounds, lont_bounds, latu_bounds, lonu_bounds, TLAT, TLON, ULAT, ULON (NB: CICE uses ?LON and POP uses ?LONG) (aice is ice area, tmask is state-variable mask, both not currently excluded, although all binary masks like tmask should be recomputed on new grid) CISM/CLM/ELM: LATIXY, LONGXY (glacier mask files) DSCOVR L2: latitude, longitude ESMF: gridcell_area GPM: S1_Latitude, S1_Longitude HIRDLS: Latitude MAR/RACMO: LAT, LON MLS: CO_Latitude MPAS-O/I/LI: areaCell, latCell, lonCell and others that are all handled by separated MPAS convention implementation below NCO: lat_vertices, lon_vertices NEMO: nav_lat, nav_lon NWS HRRR: gridlat_0, gridlon_0 OCO2: latitude_bnds, longitude_bnds OMI DOMINO: Latitude, LatitudeCornerpoints, Longitude, LongitudeCornerpoints Oxford: global_latitude0, global_longitude0, latitude0, longitude0 POP: TLAT, TLONG, ULAT, ULONG (NB: CICE uses ?LON and POP uses ?LONG) (POP does not archive spatial bounds) RACMO: rlat, rlon TRMM: Latitude, Longitude UV-CDAT regridder: bounds_lat, bounds_lon Unknown: XLAT_M, XLONG_M WRF: XLAT, XLONG */ const int var_xcl_lst_nbr=53; /* [nbr] Number of objects on exclusion list */ const char *var_xcl_lst[]={"/area","/gridcell_area","/gw","/LAT","/lat","/Latitude","/latitude","/nav_lat","/global_latitude0","gridlat_0","/latitude0","/rlat","/slat","/LATIXY","/LONGXY","/TLAT","/ULAT","/XLAT","/XLAT_M","/CO_Latitude","/S1_Latitude","/lat_bnds","/lat_vertices","/latt_bounds","/latu_bounds","/latitude_bnds","/LatitudeCornerpoints","/bounds_lat","/LON","/lon","/Longitude","/longitude","/nav_lon","/global_longitude0","gridlon_0","/longitude0","/rlon","/slon","/TLON","/TLONG","/ULON","/ULONG","/XLONG","/XLONG_M","/CO_Longitude","/S1_Longitude","/lon_bnds","/lon_vertices","/lont_bounds","/lonu_bounds","/longitude_bnds","/LongitudeCornerpoints","/bounds_lon","/w_stag"}; int var_cpy_nbr=0; /* [nbr] Number of copied variables */ int var_rgr_nbr=0; /* [nbr] Number of regridded variables */ int var_xcl_nbr=0; /* [nbr] Number of deleted variables */ int var_crt_nbr=0; /* [nbr] Number of created variables */ int var_xtn_nbr=0; /* [nbr] Number of extensive variables */ unsigned int idx_tbl; /* [idx] Counter for traversal table */ const unsigned int trv_nbr=trv_tbl->nbr; /* [idx] Number of traversal table entries */ for(idx=0;idx<var_xcl_lst_nbr;idx++){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,var_xcl_lst[idx])) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* endif */ } /* !idx */ cnv_sct *cnv; /* [sct] Convention structure */ /* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */ cnv=nco_cnv_ini(in_id); if(cnv->MPAS){ /* 20160228: MPAS has a host of mysterious grid and extensive variables that should probably not be regridded 20180206: Add from MPAS-LI xCell, yCell, zCell, and [xyz]Edge, and [xyz]Vertex 20180917: Restrict exclusion list to a subset of variables with nCells-dimension Six nCells-variables may be valuable when regridded to lat/lon mpas_xcl_lst in nco_rgr_wgt() and MPAS var_xcl_lst in nco_var_is_fix() differ by these six variables: areaCell for comparison to area(lat,lon) cellMask for area-weighted mask maxLevelCell for area-weighted underwater topographic mask xCell, yCell, zCell for area-weighted cartesian coordinates 20180918: Regridder currently only works on cell-based coordinates Decided regridder will omit not copy fields on vertex- or edge-based coordinates until it can regrid them Regridding vertex- or edge-based fields would require new sparse matrix for vertices or edges How would ERWG or TempestRemap handle that? MPAS geophysical variables on vertex-based (not cell-based) coordinates include: avg_airStressVertexUGeo_1, avg_airStressVertexVGeo_1, uOceanVelocityVertexGeo_1, uVelocityGeo_1, vOceanVelocityVertexGeo_1, vVelocityGeo_1 MPAS geophysical variables on edge-based (not cell-based) coordinates include: principalStress1Var_1, principalStress2Var_1 */ const int mpas_xcl_lst_nbr=35; const char *mpas_xcl_lst[]={"/angleEdge","/areaTriangle","/cellsOnCell","/cellsOnEdge","/cellsOnVertex","/dcEdge","/dvEdge","/edgeMask","/edgesOnCell","/edgesOnEdge","/edgesOnVertex","/indexToCellID","/indexToEdgeID","/indexToVertexID","/kiteAreasOnVertex","/latCell","/latEdge","/latVertex","/lonCell","/lonEdge","/lonVertex","/maxLevelEdgeTop","/meshDensity","/nEdgesOnCell","/nEdgesOnEdge","/vertexMask","/verticesOnCell","/verticesOnEdge","/weightsOnEdge","/xEdge","/yEdge","/zEdge","/xVertex","/yVertex","/zVertex"}; for(idx=0;idx<mpas_xcl_lst_nbr;idx++){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,mpas_xcl_lst[idx])) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined MPAS exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* endif */ } /* !idx */ } /* !MPAS */ char *dmn_nm_cp; /* [sng] Dimension name as char * to reduce indirection */ int dmn_nbr_in; /* [nbr] Number of dimensions in input variable */ int dmn_nbr_out; /* [nbr] Number of dimensions in output variable */ nco_bool has_lon; /* [flg] Contains longitude dimension */ nco_bool has_lat; /* [flg] Contains latitude dimension */ trv_sct trv; /* [sct] Traversal table object structure to reduce indirection */ /* Define regridding flag for each variable */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; dmn_nbr_in=trv_tbl->lst[idx_tbl].nbr_dmn; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ has_lon=False; has_lat=False; if(flg_grd_in_2D){ for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ /* Pre-determine flags necessary during next loop */ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; /* fxm: Generalize to include any variable containing two coordinates with "standard_name" = "latitude" and "longitude" */ if(!has_lon) has_lon=!strcmp(dmn_nm_cp,lon_nm_in); if(!has_lat) has_lat=!strcmp(dmn_nm_cp,lat_nm_in); } /* end loop over dimensions */ } /* !flg_grd_in_2D */ for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; /* Regrid variables containing the horizontal spatial dimension on 1D grids, and both latitude and longitude on 2D grids */ if(!strcmp(dmn_nm_cp,col_nm_in) || (has_lon && has_lat)){ trv_tbl->lst[idx_tbl].flg_rgr=True; var_rgr_nbr++; break; } /* endif */ } /* end loop over dimensions */ if(dmn_idx == dmn_nbr_in){ /* Not regridded, so must be omitted or copied... */ if(flg_grd_in_2D && (has_lon || has_lat)){ /* Single spatial dimensional variables on 2D input grids are likely extensive (e.g., grd_mrd_lng from bds) These could be salvaged with explicit rules or implicit assumptions */ trv_tbl->lst[idx_tbl].flg_xtr=False; var_xcl_nbr++; if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) extensive-seeming (e.g., 1D spatial variable in 2D input grid, or 2D spatial variable without primary grid dimensions from multi-grid file (e.g., west_east_stag or south_north_stag instead of west_east or south_north)) variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); }else{ /* !omitted */ /* Copy all variables that are not regridded or omitted */ var_cpy_nbr++; } /* !omitted */ } /* endif not regridded */ } /* end nco_obj_typ_var */ } /* end idx_tbl */ if(!var_rgr_nbr) (void)fprintf(stdout,"%s: WARNING %s reports no variables fit regridding criteria. The regridder expects something to regrid, and variables not regridded are copied straight to output. HINT: If the name(s) of the input horizontal spatial dimensions to be regridded (e.g., latitude and longitude or column) do not match NCO's preset defaults (case-insensitive unambiguous forms and abbreviations of \"latitude\", \"longitude\", and \"ncol\", respectively) then change the dimension names that NCO looks for. Instructions are at http://nco.sf.net/nco.html#regrid, e.g., \"ncks --rgr col=lndgrid --rgr lat=north\" or \"ncremap -R '--rgr col=lndgrid --rgr lat=north'\".\n",nco_prg_nm_get(),fnc_nm); for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.flg_rgr){ for(int xtn_idx=0;xtn_idx<rgr->xtn_nbr;xtn_idx++){ /* 20150927: Extensive variable treatments are still in alpha-development Currently testing on AIRS TSurfStd_ct (by summing not averaging) In future may consider variables that need more complex (non-summing) extensive treatment MPAS-O/I has a zillion of these [xyz]Cell, cellsOnCell, fCell, indexToCellID, maxLevelCell, meshDensity Not to mention the variables that depend on nEdges and nVertices... */ if(!strcmp(trv.nm,rgr->xtn_var[xtn_idx])){ trv_tbl->lst[idx_tbl].flg_xtn=True; var_xtn_nbr++; if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO Variable %s will be treated as extensive (summed not averaged)\n",nco_prg_nm_get(),trv.nm_fll); } /* !strcmp */ } /* !xtn_idx */ } /* !flg_rgr */ } /* !idx_tbl */ if(nco_dbg_lvl_get() >= nco_dbg_sbr){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr) (void)fprintf(stderr,"Regrid %s? %s\n",trv.nm,trv.flg_rgr ? "Yes" : "No"); } /* end idx_tbl */ } /* end dbg */ /* Lay-out regridded file */ aed_sct aed_mtd; char *area_nm_out; char *att_nm; char *bnd_nm_out; char *bnd_tm_nm_out; char *col_nm_out; char *frc_nm_out; char *lat_bnd_nm_out; char *lat_dmn_nm_out; char *lat_nm_out; char *lat_wgt_nm; char *lon_bnd_nm_out; char *lon_dmn_nm_out; char *lon_nm_out; char *msk_nm_out; char *slat_nm_out=NULL; char *slat_wgt_nm_out=NULL; char *slon_nm_out=NULL; int dmn_id_bnd; /* [id] Dimension ID */ int dmn_id_bnd_tm; /* [id] Dimension ID */ int dmn_id_slat; /* [id] Dimension ID */ int dmn_id_slon; /* [id] Dimension ID */ int area_out_id; /* [id] Variable ID for area */ int frc_out_id; /* [id] Variable ID for fraction */ int lon_out_id; /* [id] Variable ID for longitude */ int lat_out_id; /* [id] Variable ID for latitude */ int lat_wgt_id; /* [id] Variable ID for latitude weight */ int lon_bnd_id; /* [id] Variable ID for lon_bnds/lon_vertices */ int lat_bnd_id; /* [id] Variable ID for lat_bnds/lat_vertices */ int msk_out_id; /* [id] Variable ID for mask */ int slat_out_id; /* [id] Variable ID for staggered latitude */ int slat_wgt_id; /* [id] Variable ID for staggered latitude weight */ int slon_out_id; /* [id] Variable ID for staggered longitude */ int dmn_ids_out[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */ long dmn_srt_out[dmn_nbr_grd_max]; long dmn_cnt_tuo[dmn_nbr_grd_max]; /* Name output dimensions/variables */ area_nm_out=rgr->area_nm; bnd_tm_nm_out=rgr->bnd_tm_nm; frc_nm_out=rgr->frc_nm; lat_bnd_nm_out=rgr->lat_bnd_nm; lat_wgt_nm=rgr->lat_wgt_nm; lon_bnd_nm_out=rgr->lon_bnd_nm; msk_nm_out=rgr->msk_nm; /* Use explicitly specified output names, if any, otherwise use input names (either explicitly specified or discovered by fuzzing) */ if(rgr->col_nm_out) col_nm_out=rgr->col_nm_out; else col_nm_out=col_nm_in; if(rgr->lat_dmn_nm) lat_dmn_nm_out=rgr->lat_dmn_nm; else lat_dmn_nm_out=lat_nm_in; if(rgr->lon_dmn_nm) lon_dmn_nm_out=rgr->lon_dmn_nm; else lon_dmn_nm_out=lon_nm_in; if(rgr->lat_nm_out) lat_nm_out=rgr->lat_nm_out; else lat_nm_out=lat_nm_in; if(rgr->lon_nm_out) lon_nm_out=rgr->lon_nm_out; else lon_nm_out=lon_nm_in; if(flg_grd_out_1D){ bnd_nm_out=rgr->vrt_nm; lat_bnd_nm_out=rgr->lat_vrt_nm; lon_bnd_nm_out=rgr->lon_vrt_nm; } /* !flg_grd_out_1D */ if(flg_grd_out_crv){ bnd_nm_out=rgr->bnd_nm; } /* !flg_grd_out_crv */ if(flg_grd_out_rct){ bnd_nm_out=rgr->bnd_tm_nm; /* NB: default to bnd_tm_nm for spatial bounds */ } /* !flg_grd_out_rct */ if(flg_grd_out_2D){ lat_bnd_nm_out=rgr->lat_bnd_nm; lon_bnd_nm_out=rgr->lon_bnd_nm; } /* !flg_grd_out_2D */ if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ slat_nm_out=strdup("slat"); slat_wgt_nm_out=strdup("w_stag"); slon_nm_out=strdup("slon"); } /* !nco_grd_lat_fv */ /* Ensure temporal bounds dimension name is distinct from spatial bounds when their sizes differ */ if(bnd_nbr_out != bnd_tm_nbr_out){ if(!strcmp(bnd_nm_out,bnd_tm_nm_out)){ (void)fprintf(stdout,"%s: INFO %s reports spatial and temporal output bounds dimensions are identical (and named \"%s\") by default for rectangular output grids because both can be stored as 2D arrays. That cannot work for this mapping because temporal and spatial bounds dimensions sizes differ (bnd_nbr_out = %d, bnd_tm_nbr_out = %d). Using fall-back spatial bounds name \"%s\" instead. HINT: You may change one or both manually with \"ncks --rgr bnd_nm=name\" or \"ncks --rgr bnd_tm_nm=name\", or, using ncremap, with \"ncremap -R '--rgr bnd_nm=name'\" or \"ncremap -R '--rgr bnd_tm_nm=name'\"\n",nco_prg_nm_get(),fnc_nm,bnd_tm_nm_out,bnd_nbr_out,bnd_tm_nbr_out,bnd_nm_out); } /* !strcmp() */ } /* !bnd_nbr_out */ /* Persistent metadata */ aed_sct aed_mtd_crd; char *att_val_crd=NULL; char *att_nm_crd=NULL; att_nm_crd=strdup("coordinates"); aed_mtd_crd.att_nm=att_nm_crd; if(flg_grd_out_1D || flg_grd_out_crv) aed_mtd_crd.mode=aed_overwrite; else aed_mtd_crd.mode=aed_delete; aed_mtd_crd.type=NC_CHAR; aed_mtd_crd.sz=strlen(lat_nm_out)+strlen(lon_nm_out)+1L; att_val_crd=(char *)nco_malloc((aed_mtd_crd.sz+1L)*nco_typ_lng(aed_mtd_crd.type)); (void)sprintf(att_val_crd,"%s %s",lat_nm_out,lon_nm_out); aed_mtd_crd.val.cp=att_val_crd; /* Reminder: Regridder area_out options, e.g., --rgr area_out, set flg_area_out to control adding "area" variable to regridded output Regridder cll_msr options, --rgr cll_msr, set flg_cll_msr to control adding "cell_measures" attribute to regridded output ncks & ncra cll_msr options, --cll_msr, set EXTRACT_CLL_MSR to control adding "cell_measures" variables (e.g., area) to extraction list of input file EXTRACT_CLL_MSR supercedes --rgr area_out in determining whether to add "area" to regridded output */ nco_bool flg_area_out=rgr->flg_area_out; /* [flg] Add area to output */ nco_bool flg_cll_msr=rgr->flg_cll_msr; /* [flg] Add cell_measures attribute */ aed_sct aed_mtd_cll_msr; char *att_nm_cll_msr=NULL; char *att_val_cll_msr=NULL; if(flg_cll_msr){ att_nm_cll_msr=strdup("cell_measures"); aed_mtd_cll_msr.att_nm=att_nm_cll_msr; aed_mtd_cll_msr.mode=aed_overwrite; aed_mtd_cll_msr.type=NC_CHAR; att_val_cll_msr=(char *)nco_malloc((strlen(area_nm_out)+6L+1L)*nco_typ_lng(aed_mtd_cll_msr.type)); (void)sprintf(att_val_cll_msr,"area: %s",area_nm_out); aed_mtd_cll_msr.sz=strlen(att_val_cll_msr); aed_mtd_cll_msr.val.cp=att_val_cll_msr; } /* !flg_cll_msr */ /* Define new horizontal dimensions before all else */ if(flg_grd_out_1D){ rcd+=nco_def_dim(out_id,col_nm_out,col_nbr_out,&dmn_id_col); } /* !flg_grd_out_1D */ if(flg_grd_out_2D){ rcd+=nco_def_dim(out_id,lat_dmn_nm_out,lat_nbr_out,&dmn_id_lat); rcd+=nco_def_dim(out_id,lon_dmn_nm_out,lon_nbr_out,&dmn_id_lon); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ rcd+=nco_def_dim(out_id,slat_nm_out,slat_nbr_out,&dmn_id_slat); rcd+=nco_def_dim(out_id,slon_nm_out,slon_nbr_out,&dmn_id_slon); } /* !nco_grd_lat_fv */ } /* !flg_grd_out_2D */ /* If dimension has not been defined, define it */ rcd=nco_inq_dimid_flg(out_id,bnd_tm_nm_out,&dmn_id_bnd_tm); if(rcd != NC_NOERR) rcd=nco_def_dim(out_id,bnd_tm_nm_out,bnd_tm_nbr_out,&dmn_id_bnd_tm); /* If dimension has not been defined, define it */ rcd=nco_inq_dimid_flg(out_id,bnd_nm_out,&dmn_id_bnd); if(rcd != NC_NOERR) rcd=nco_def_dim(out_id,bnd_nm_out,bnd_nbr_out,&dmn_id_bnd); char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */ char *var_nm; /* [sng] Variable name */ int *dmn_id_in=NULL; /* [id] Dimension IDs */ int *dmn_id_out=NULL; /* [id] Dimension IDs */ int var_id_in; /* [id] Variable ID */ int var_id_out; /* [id] Variable ID */ nc_type var_typ_out; /* [enm] Variable type to write to disk */ nc_type var_typ_rgr; /* [enm] Variable type used during regridding */ nco_bool PCK_ATT_CPY=True; /* [flg] Copy attributes "scale_factor", "add_offset" */ int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; dfl_lvl=rgr->dfl_lvl; fl_out_fmt=rgr->fl_out_fmt; /* Define new coordinates and grid variables in regridded file */ if(flg_grd_out_1D){ rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&lat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&lon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_col; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_col; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&area_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&frc_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd+=nco_def_var(out_id,msk_nm_out,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_col,&msk_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_msk_out */ } /* !flg_grd_out_1D */ if(flg_grd_out_crv){ dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_lon; rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&area_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&frc_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd+=nco_def_var(out_id,msk_nm_out,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids_out,&msk_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_msk_out */ dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_lon; dmn_ids_out[2]=dmn_id_bnd; rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_3D,dmn_ids_out,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_3D,dmn_ids_out,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_grd_out_crv */ if(flg_grd_out_rct){ rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lat,&lat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lon,&lon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ rcd+=nco_def_var(out_id,slat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slat,&slat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,slat_wgt_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slat,&slat_wgt_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slat_wgt_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,slon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slon,&slon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !nco_grd_lat_fv */ dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_lon; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lat_wgt_nm,crd_typ_out,dmn_nbr_1D,&dmn_id_lat,&lat_wgt_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_wgt_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_lon; if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&area_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&frc_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd+=nco_def_var(out_id,msk_nm_out,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids_out,&msk_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_msk_out */ } /* !flg_grd_out_rct */ /* Add _FillValue to empty destination cells, if requested */ nco_bool flg_add_fll=rgr->flg_add_fll; /* [flg] Add _FillValue to fields with empty destination cells */ nco_bool flg_dst_mpt=False; /* [flg] At least one destination cell is empty */ size_t dst_idx; /* [idx] Index on destination grid */ /* Determine whether any destination cells are, in fact, empty Logic here could be replaced by examining frac_b variable, if we trust input frac_b... ...and we do trust input frac_b since it is already used for renormalization */ if(flg_add_fll){ if(flg_msk_apl){ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(msk_out[dst_idx] == 0) break; if(dst_idx < grd_sz_out) flg_dst_mpt=True; if(flg_dst_mpt && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports at least one destination cell, Fortran (1-based) row index %lu, is empty. User requested (with --msk_apl) that masked cells receive _FillValue, so regridder will ensure that all regridded fields have _FillValue attribute.\n",nco_prg_nm_get(),fnc_nm,dst_idx+1L); }else{ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ /* For each destination cell... */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ /* ...does any weight... */ if(row_dst_adr[lnk_idx] == dst_idx){ /* ...contribute to that cell? */ /* If so, break lnk_idx loop and continue to next iteration of dst_idx loop */ break; } /* !row_dst_adr */ } /* !lnk_idx */ /* If weight loop reached end without a match, then this destination cell is empty */ if(lnk_idx == lnk_nbr){ flg_dst_mpt=True; break; } /* !lnk_idx */ } /* !dst_idx */ if(flg_dst_mpt && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports at least one destination cell, Fortran (1-based) row index %lu, is empty. User requested (with --add_fll) that empty cells receive _FillValue, so regridder will ensure that all regridded fields have _FillValue attribute.\n",nco_prg_nm_get(),fnc_nm,dst_idx+1L); } /* !flg_msk_apl */ } /* !flg_add_fll */ /* Pre-allocate dimension ID and cnt/srt space */ int dmn_nbr_max; /* [nbr] Maximum number of dimensions variable can have in input or output */ int dmn_in_fst; /* [idx] Offset of input- relative to output-dimension due to non-MRV dimension insertion */ int dmn_nbr_rec; /* [nbr] Number of unlimited dimensions */ int *dmn_ids_rec=NULL; /* [id] Unlimited dimension IDs */ rcd+=nco_inq_ndims(in_id,&dmn_nbr_max); dmn_nbr_max++; /* Safety in case regridding adds dimension */ dmn_id_in=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* Identify all record-dimensions in input file */ rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec); if(dmn_nbr_rec > 0){ dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int)); rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec); } /* !dmn_nbr_rec */ int flg_pck; /* [flg] Variable is packed on disk */ nco_bool has_mss_val; /* [flg] Has numeric missing value attribute */ double mss_val_dbl; double mss_val_cmp_dbl; /* Missing value for comparison to double precision values */ /* Define regridded and copied variables in output file */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv_tbl->lst[idx_tbl].flg_mrv=True; trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ var_nm=trv.nm; /* Preserve input type in output type */ var_typ_out=trv.var_typ; /* Demote DP to SP to save space. fxm: missing value type will then be inconsistent if copied without demotion */ //if(trv.var_typ == NC_DOUBLE) var_typ_out=NC_FLOAT; else var_typ_out=trv.var_typ; dmn_nbr_in=trv.nbr_dmn; dmn_nbr_out=trv.nbr_dmn; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid_flg(out_id,var_nm,&var_id_out); /* If variable has not been defined, define it */ if(rcd != NC_NOERR){ if(trv.flg_rgr){ /* Regrid */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); dmn_in_fst=0; rcd=nco_inq_var_packing(in_id,var_id_in,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports variable \"%s\" is packed so results unpredictable. HINT: If regridded values seems weird, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,var_nm); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); /* Is horizontal dimension last, i.e., most-rapidly-varying? */ if(flg_grd_in_1D && !strcmp(dmn_nm,col_nm_in)){ if(dmn_idx != dmn_nbr_in-1){ /* Unstructured input grid has col in non-MRV location (expect this with, e.g., MPAS-O/I native grid dimension-ordering */ (void)fprintf(stdout,"%s: WARNING %s reports unstructured grid spatial coordinate %s is (zero-based) dimension %d of input variable to be regridded %s which has %d dimensions. The NCO regridder does not support unstructured spatial dimensions that are not the last (i.e., most rapidly varying) dimension of an input variable, so results are likely garbage.\nHINT: Re-arrange input file dimensions to place horizontal dimension(s) last with, e.g., \'ncpdq -a time,lev,%s in.nc out.nc\' prior to calling the regridder. E3SM users: If this is an MPAS dataset with a new (unknown to ncremap) dimension, please ask Charlie to add the dimension to the ncremap dimension permutation list.\n",nco_prg_nm_get(),fnc_nm,dmn_nm,dmn_idx,var_nm,dmn_nbr_in,dmn_nm); trv_tbl->lst[idx_tbl].flg_mrv=False; } /* !dmn_idx */ } /* !flg_grd_in_1D */ if(flg_grd_in_2D && (!strcmp(dmn_nm,lat_nm_in) || !strcmp(dmn_nm,lon_nm_in))){ /* Are horizontal dimensions most-rapidly-varying? */ if(dmn_idx != dmn_nbr_in-1 && dmn_idx != dmn_nbr_in-2){ /* NB: Lat/lon input grid has lat/lon in non-MRV location (expect this with, e.g., AIRS L2 grid dimension-ordering */ (void)fprintf(stdout,"%s: WARNING %s reports lat-lon grid spatial coordinate %s is (zero-based) dimension %d of input variable to be regridded %s which has %d dimensions. The NCO regridder does not support rectangular lat-lon dimension(s) that are not the last two (i.e., most rapidly varying) dimensions of an input variable, so results are likely garbage.\nHINT: Re-arrange input file dimensions to place horizontal dimensions last with, e.g., \'ncpdq -a time,lev,lat,lon in.nc out.nc\' prior to calling the regridder.\n",nco_prg_nm_get(),fnc_nm,dmn_nm,dmn_idx,var_nm,dmn_nbr_in); trv_tbl->lst[idx_tbl].flg_mrv=False; } /* !dmn_idx */ } /* !flg_grd_in_2D */ if(flg_grd_out_1D){ if((nco_rgr_typ == nco_rgr_grd_2D_to_1D) && (!strcmp(dmn_nm,lat_nm_in) || !strcmp(dmn_nm,lon_nm_in))){ /* Replace orthogonal horizontal dimensions by unstructured horizontal dimension already defined */ if(!strcmp(dmn_nm,lat_nm_in)){ /* Replace lat with col */ dmn_id_out[dmn_idx]=dmn_id_col; dmn_cnt[dmn_idx]=col_nbr_out; } /* endif lat */ if(!strcmp(dmn_nm,lon_nm_in)){ /* Assume non-MRV dimensions are ordered lat/lon. Replace lat with col. Shift MRV dimensions to left after deleting lon. */ dmn_id_out[dmn_idx]=NC_MIN_INT; dmn_cnt[dmn_idx]=NC_MIN_INT; dmn_nbr_out--; /* Reduce output dimension position of all subsequent input dimensions by one */ if(!trv_tbl->lst[idx_tbl].flg_mrv) dmn_in_fst=-1; } /* endif lon */ }else{ /* Dimension col_nm_in has already been defined as col_nm_out, replicate all other dimensions */ if(!strcmp(dmn_nm,col_nm_in)) rcd=nco_inq_dimid_flg(out_id,col_nm_out,dmn_id_out+dmn_idx); else rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx+dmn_in_fst); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx+dmn_in_fst); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt[dmn_idx+dmn_in_fst]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx+dmn_in_fst],dmn_id_out+dmn_idx+dmn_in_fst); } /* !rcd */ } /* !lat && !lon */ } /* !flg_grd_out_1D */ if(flg_grd_out_2D){ if(nco_rgr_typ == nco_rgr_grd_1D_to_2D && !strcmp(dmn_nm,col_nm_in)){ /* Replace unstructured horizontal dimension by orthogonal horizontal dimensions already defined */ dmn_id_out[dmn_idx]=dmn_id_lat; dmn_id_out[dmn_idx+1]=dmn_id_lon; dmn_cnt[dmn_idx]=lat_nbr_out; dmn_cnt[dmn_idx+1]=lon_nbr_out; dmn_nbr_out++; /* Increase output dimension position of all subsequent input dimensions by one */ if(!trv_tbl->lst[idx_tbl].flg_mrv) dmn_in_fst=1; }else{ /* Dimensions lat/lon_nm_in have already been defined as lat/lon_nm_out, replicate all other dimensions */ if(!strcmp(dmn_nm,lat_nm_in)) rcd=nco_inq_dimid_flg(out_id,lat_dmn_nm_out,dmn_id_out+dmn_idx); else if(!strcmp(dmn_nm,lon_nm_in)) rcd=nco_inq_dimid_flg(out_id,lon_dmn_nm_out,dmn_id_out+dmn_idx); else rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx+dmn_in_fst); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx+dmn_in_fst); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt[dmn_idx+dmn_in_fst]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx+dmn_in_fst],dmn_id_out+dmn_idx+dmn_in_fst); } /* !rcd */ } /* !col */ } /* !1D_to_2D */ } /* !dmn_idx */ }else{ /* !flg_rgr */ /* Replicate non-regridded variables */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx],dmn_id_out+dmn_idx); } /* !rcd */ } /* !dmn_idx */ } /* !flg_rgr */ rcd=nco_def_var(out_id,var_nm,var_typ_out,dmn_nbr_out,dmn_id_out,&var_id_out); /* Duplicate netCDF4 settings when possible */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC){ /* Deflation */ if(dmn_nbr_out > 0){ int dfl_lvl_in; /* [enm] Deflate level [0..9] */ rcd=nco_inq_var_deflate(in_id,var_id_in,&shuffle,&deflate,&dfl_lvl_in); /* Copy original deflation settings */ if(deflate || shuffle) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl_in); /* Overwrite HDF Lempel-Ziv compression level, if requested */ if(dfl_lvl == 0) deflate=(int)False; else deflate=(int)True; /* Turn-off shuffle when uncompressing otherwise chunking requests may fail */ if(dfl_lvl == 0) shuffle=NC_NOSHUFFLE; /* Shuffle never, to my knowledge, increases filesize, so shuffle by default when manually deflating */ if(dfl_lvl >= 0) shuffle=NC_SHUFFLE; if(dfl_lvl >= 0) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl); } /* !dmn_nbr_out */ } /* !NC_FORMAT_NETCDF4 */ (void)nco_att_cpy(in_id,out_id,var_id_in,var_id_out,PCK_ATT_CPY); if(trv.flg_rgr){ aed_mtd_crd.var_nm=var_nm; aed_mtd_crd.id=var_id_out; (void)nco_aed_prc(out_id,var_id_out,aed_mtd_crd); if(flg_cll_msr){ aed_mtd_cll_msr.var_nm=var_nm; aed_mtd_cll_msr.id=var_id_out; (void)nco_aed_prc(out_id,var_id_out,aed_mtd_cll_msr); } /* !flg_cll_msr */ /* 20210602: Ensure all regridded variables have _FillValue if user requested _FillValue in empty cells and there are empty cells */ if(flg_add_fll && flg_dst_mpt){ /* Check for _FillValue here iff user requests non-default behavior */ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,(double *)NULL); if(!has_mss_val){ val_unn mss_val_dfl; /* [] Default _FillValue */ mss_val_dfl=nco_mss_val_dfl_get(var_typ_out); rcd=nco_put_att(out_id,var_id_out,"_FillValue",var_typ_out,1L,(void *)(&mss_val_dfl)); } /* !has_mss_val */ } /* !flg_add_fll */ } /* !flg_rgr */ } /* !rcd */ } /* !var */ } /* !idx_tbl */ /* Free pre-allocated array space */ /* col_nm_in will not otherwise be free'd if it was guessed as usual suspect */ if(col_nm_in != rgr->col_nm_in) col_nm_in=(char *)nco_free(col_nm_in); if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt) dmn_cnt=(long *)nco_free(dmn_cnt); if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec); /* Define new metadata in regridded file */ if(flg_area_out){ rcd=nco_char_att_put(out_id,area_nm_out,"long_name","Solid angle subtended by gridcell"); rcd=nco_char_att_put(out_id,area_nm_out,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm_out,"units","steradian"); if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); att_val=(char *)nco_calloc((strlen(lat_dmn_nm_out)+strlen(lon_dmn_nm_out)+8L),sizeof(char)); (void)sprintf(att_val,"%s, %s: sum",lat_dmn_nm_out,lon_dmn_nm_out); rcd=nco_char_att_put(out_id,area_nm_out,"cell_mathods",att_val); if(att_val) att_val=(char *)nco_free(att_val); } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd=nco_char_att_put(out_id,frc_nm_out,"long_name","Fraction of gridcell valid on destination grid"); if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); att_val=(char *)nco_calloc((strlen(lat_dmn_nm_out)+strlen(lon_dmn_nm_out)+8L),sizeof(char)); (void)sprintf(att_val,"%s, %s: sum",lat_dmn_nm_out,lon_dmn_nm_out); rcd=nco_char_att_put(out_id,frc_nm_out,"cell_mathods",att_val); } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd=nco_char_att_put(out_id,msk_nm_out,"long_name","Mask (0 = invalid destination, 1 = valid destination)"); if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); } /* !flg_msk_out */ rcd=nco_char_att_put(out_id,lat_nm_out,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lat_nm_out,"standard_name","latitude"); rcd=nco_char_att_put(out_id,lat_nm_out,"units","degrees_north"); // 20200205: Attach "axis" attribute to single-dimensional geospatial coordinates not to two-dimensional coordinate variables per CF Conventions section 5.2 if(!flg_grd_out_crv) rcd=nco_char_att_put(out_id,lat_nm_out,"axis","Y"); double vld_min; vld_min=-90.0; att_nm=strdup("valid_min"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lat_nm_out; aed_mtd.id=lat_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_min; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lat_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); double vld_max; vld_max=90.0; att_nm=strdup("valid_max"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lat_nm_out; aed_mtd.id=lat_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_max; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lat_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,lat_nm_out,"bounds",lat_bnd_nm_out); if(flg_grd_out_rct) att_val=strdup("Gridcell latitude interfaces"); else att_val=strdup("Gridcell latitude vertices"); rcd=nco_char_att_put(out_id,lat_bnd_nm_out,"long_name",att_val); rcd=nco_char_att_put(out_id,lon_nm_out,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lon_nm_out,"standard_name","longitude"); rcd=nco_char_att_put(out_id,lon_nm_out,"units","degrees_east"); // 20200205: Attach "axis" attribute to single-dimensional geospatial coordinates not to two-dimensional coordinate variables per CF Conventions section 5.2 if(!flg_grd_out_crv) rcd=nco_char_att_put(out_id,lon_nm_out,"axis","X"); /* UGRID Conventions define "topology" and "modulo" attributes https://github.com/ugrid-conventions/ugrid-conventions My understanding is these should only be utilized for global grids */ if(nco_rgr_typ == nco_rgr_grd_2D_to_2D){ /* fxm: change this to check whether lon_spn >= 360 or nco_grd_xtn == global */ att_nm=strdup("modulo"); double modulo=360.0; aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&modulo; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,lon_nm_out,"topology","circular"); } /* !nco_rgr_grd_2D_to_2D */ if(lon_ctr_out[0] >= 0.0) vld_min=0.0; else vld_min=-180.0; att_nm=strdup("valid_min"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_min; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); if(lon_ctr_out[0] >= 0.0) vld_max=360.0; else vld_max=180.0; att_nm=strdup("valid_max"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_max; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,lon_nm_out,"bounds",lon_bnd_nm_out); att_nm=strdup("bounds"); att_val=lon_bnd_nm_out; aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=strlen(att_val); aed_mtd.type=NC_CHAR; aed_mtd.val.cp=att_val; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); if(flg_grd_out_rct) att_val=strdup("Gridcell longitude interfaces"); else att_val=strdup("Gridcell longitude vertices"); rcd=nco_char_att_put(out_id,lon_bnd_nm_out,"long_name",att_val); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ rcd=nco_char_att_put(out_id,slat_nm_out,"long_name","Latitude for staggered FV grid"); rcd=nco_char_att_put(out_id,slat_nm_out,"units","degrees_north"); rcd=nco_char_att_put(out_id,slat_wgt_nm_out,"long_name","Latitude weights for staggered FV grid"); rcd=nco_char_att_put(out_id,slon_nm_out,"long_name","Longitude for staggered FV grid"); rcd=nco_char_att_put(out_id,slon_nm_out,"units","degrees_east"); } /* !nco_grd_lat_fv */ if(flg_grd_out_rct) rcd=nco_char_att_put(out_id,lat_wgt_nm,"long_name","Latitude quadrature weights (normalized to sum to 2.0 on global grids)"); rcd=nco_char_att_put(out_id,NULL,"map_file",fl_in); rcd=nco_char_att_put(out_id,NULL,"input_file",rgr->fl_in); /* Annotate persistent metadata that should appear last in attribute list */ if(flg_grd_out_1D){ if(flg_area_out) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); if(flg_frc_out_wrt) rcd=nco_char_att_put(out_id,frc_nm_out,att_nm_crd,att_val_crd); if(flg_msk_out) rcd=nco_char_att_put(out_id,msk_nm_out,att_nm_crd,att_val_crd); } /* !flg_grd_out_1D */ /* Persistent metadata */ if(att_nm_crd) att_nm_crd=(char *)nco_free(att_nm_crd); if(att_val_crd) att_val_crd=(char *)nco_free(att_val_crd); if(flg_cll_msr){ if(att_nm_cll_msr) att_nm_cll_msr=(char *)nco_free(att_nm_cll_msr); if(att_val_cll_msr) att_val_cll_msr=(char *)nco_free(att_val_cll_msr); } /* !flg_cll_msr */ if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ if(slat_nm_out) slat_nm_out=(char *)nco_free(slat_nm_out); if(slat_wgt_nm_out) slat_wgt_nm_out=(char *)nco_free(slat_wgt_nm_out); if(slon_nm_out) slon_nm_out=(char *)nco_free(slon_nm_out); } /* !nco_grd_lat_fv */ /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Begin data mode */ (void)nco_enddef(out_id); /* Write new coordinates and variables to regridded file */ if(flg_grd_out_1D){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out); dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=col_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_bnd_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=col_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_bnd_out,crd_typ_out); if(flg_area_out){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out); } /* !flg_area_out */ if(flg_msk_out){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,(nc_type)NC_INT); } /* !flg_msk_out */ } /* !flg_grd_out_1D */ if(flg_grd_out_crv){ dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=lon_nbr_out; (void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out); (void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out); if(flg_area_out){ (void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out); } /* !flg_area_out */ if(flg_frc_out_wrt){ (void)nco_put_vara(out_id,frc_out_id,dmn_srt_out,dmn_cnt_tuo,frc_out,crd_typ_out); } /* !flg_frc_out_wrt */ if(flg_msk_out){ (void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,(nc_type)NC_INT); } /* !flg_msk_out */ dmn_srt_out[0]=dmn_srt_out[1]=dmn_srt_out[2]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=lon_nbr_out; dmn_cnt_tuo[2]=bnd_nbr_out; /* NB: 20160803 Semantically confusing---curvilinear grids must write *_crn_out data into *_bnd_out arrays */ (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_crn_out,crd_typ_out); (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_crn_out,crd_typ_out); } /* !flg_grd_out_crv */ if(flg_grd_out_rct){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=lat_nbr_out; (void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out); dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=lon_nbr_out; (void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=slat_nbr_out; (void)nco_put_vara(out_id,slat_out_id,dmn_srt_out,dmn_cnt_tuo,slat_ctr_out,crd_typ_out); (void)nco_put_vara(out_id,slat_wgt_id,dmn_srt_out,dmn_cnt_tuo,slat_wgt_out,crd_typ_out); dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=slon_nbr_out; (void)nco_put_vara(out_id,slon_out_id,dmn_srt_out,dmn_cnt_tuo,slon_ctr_out,crd_typ_out); if(slat_ctr_out) slat_ctr_out=(double *)nco_free(slat_ctr_out); if(slat_wgt_out) slat_wgt_out=(double *)nco_free(slat_wgt_out); if(slon_ctr_out) slon_ctr_out=(double *)nco_free(slon_ctr_out); } /* !nco_grd_lat_fv */ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=lat_nbr_out; (void)nco_put_vara(out_id,lat_wgt_id,dmn_srt_out,dmn_cnt_tuo,lat_wgt_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_bnd_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lon_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_bnd_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=lon_nbr_out; if(flg_area_out){ (void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out); } /* !flg_area_out */ if(flg_frc_out_wrt){ (void)nco_put_vara(out_id,frc_out_id,dmn_srt_out,dmn_cnt_tuo,frc_out,crd_typ_out); } /* !flg_frc_out_wrt */ if(flg_msk_out){ (void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,(nc_type)NC_INT); } /* !flg_msk_out */ } /* !flg_grd_out_rct */ /* Regrid or copy variable values */ const double wgt_vld_thr=rgr->wgt_vld_thr; /* [frc] Weight threshold for valid destination value */ const nco_bool flg_rnr=rgr->flg_rnr; /* [flg] Renormalize destination values by valid area */ char *sgs_frc_nm=NULL; char *sgs_msk_nm=NULL; double *sgs_frc_in=NULL; double *sgs_frc_out=NULL; double *var_val_dbl_in=NULL; double *var_val_dbl_out=NULL; double *wgt_vld_out=NULL; double var_val_crr; int *tally=NULL; /* [nbr] Number of valid (non-missing) values */ int lvl_idx; /* [idx] Level index */ int lvl_nbr; /* [nbr] Number of levels */ int thr_idx; /* [idx] Thread index */ size_t idx_in; /* [idx] Input grid index */ size_t idx_out; /* [idx] Output grid index */ size_t var_sz_in; /* [nbr] Number of elements in variable (will be self-multiplied) */ size_t var_sz_out; /* [nbr] Number of elements in variable (will be self-multiplied) */ size_t val_in_fst; /* [nbr] Number of elements by which current N-D slab input values are offset from origin */ size_t val_out_fst; /* [nbr] Number of elements by which current N-D slab output values are offset from origin */ /* 20190322: Prior to entering OpenMP loop, collect specified SGS information */ const double sgs_nrm=rgr->sgs_nrm; /* [frc] Sub-gridscale normalization */ if(rgr->sgs_frc_nm){ /* Normalization test: fl_in=20181217.CNTL_CNPCTC1850_OIBGC.ne30_oECv3.edison.clm2.h0.2000-12.nc /bin/cp -f ${DATA}/hdf/${fl_in} ~/elm_raw.nc ncremap -P sgs -v FSDS,TBOT,GPP -a aave -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/cmip6_180x360_scrip.20181001.nc ~/elm_raw.nc ~/elm_sgs.nc # Original SGS method ncks -A -v grid_area ${DATA}/grids/ne30np4_pentagons.091226.nc ~/elm_sgs.nc ncremap -P gsg -v FSDS,TBOT,GPP -m ${DATA}/maps/map_ne30np4_to_cmip6_180x360_aave.20181001.nc ~/elm_raw.nc ~/elm_gsg.nc # New SGS method */ if(rgr->sgs_msk_nm) sgs_msk_nm=(char *)strdup(rgr->sgs_msk_nm); sgs_frc_nm=(char *)strdup(rgr->sgs_frc_nm); var_nm=sgs_frc_nm; var_typ_rgr=NC_DOUBLE; /* NB: Regrid in double precision */ var_typ_out=NC_DOUBLE; /* NB: sgs_frc_out must be double precision */ var_sz_in=1L; /* Compute from scratch to be sure it matches grd_sz_in */ var_sz_out=grd_sz_out; /* Assume this holds */ char *fl_sgs=NULL; /* [sng] External sub-gridscale file name */ int sgs_id; /* [id] netCDF file ID for external sub-gridscale file */ sgs_id=in_id; if((rcd=nco_inq_varid_flg(sgs_id,var_nm,&var_id_in)) != NC_NOERR){ /* If sgs_frc_nm is not in input file then search for it in external area file */ #ifdef WIN32 const char sls_chr='\\'; /* [chr] Slash character */ #else /* !WIN32 */ const char sls_chr='/'; /* [chr] Slash character */ #endif /* !WIN32 */ char *sls_ptr; /* [sng] Pointer to last slash character (' ') */ sls_ptr=strrchr(var_nm,sls_chr); if(!sls_ptr){ (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unable to find sgs_frc_nm = %s in current input file, and unable to identify filename (ending with slash '/' or backslash '\\', as appropriate) portion of that string to serve as local external file for sgs_frc input, exiting\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm); nco_exit(EXIT_FAILURE); } /* !sls_ptr */ sgs_frc_nm=(char *)strdup(sls_ptr+1L); /* Copy variable-name portion of string */ *sls_ptr='\0'; /* NULL-terminate filename */ fl_sgs=(char *)strdup(var_nm); var_nm=sgs_frc_nm; /* NB: too tricky? */ rcd=nco_open(fl_sgs,NC_NOWRITE,&sgs_id); if((rcd=nco_inq_varid_flg(sgs_id,var_nm,&var_id_in)) != NC_NOERR){ (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unable to find sgs_frc_nm = \"%s\" in local external file %s, exiting\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm,fl_sgs); nco_exit(EXIT_FAILURE); } /* !rcd */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s obtaining sgs_frc = %s from file %s\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm,fl_sgs); } /* !rcd */ rcd=nco_inq_varndims(sgs_id,var_id_in,&dmn_nbr_in); dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out; dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */ dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); rcd=nco_inq_vardimid(sgs_id,var_id_in,dmn_id_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(sgs_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx); var_sz_in*=dmn_cnt_in[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ if(var_sz_in != grd_sz_in){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") requires that sgs_frc = %s be same size as spatial grid but var_sz_in = %lu != %lu = grd_sz_in\n",nco_prg_nm_get(),fnc_nm,var_nm,var_sz_in,grd_sz_in); nco_exit(EXIT_FAILURE); } /* !var_sz_in */ /* Missing value setup (NB: ELM landfrac has _FillValue and is _FillValue where masked */ has_mss_val=nco_mss_val_get_dbl(sgs_id,var_id_in,&mss_val_dbl); if(has_mss_val) mss_val_cmp_dbl=mss_val_dbl; else mss_val_cmp_dbl=NC_FILL_DOUBLE; sgs_frc_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() sgs_frc_in value buffer"); rcd=nco_get_vara(sgs_id,var_id_in,dmn_srt,dmn_cnt_in,sgs_frc_in,var_typ_rgr); /* If sgs_frc comes from external local file, close it now */ if(fl_sgs){ rcd=nco_close(sgs_id); fl_sgs=(char *)nco_free(fl_sgs); } /* !fl_sgs */ /* Initialize output */ sgs_frc_out=(double *)nco_malloc_dbg(grd_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() sgs_frc_out value buffer"); /* Initialize and regrid sgs_frc_out 20190907: sgs_frc_in (landfrac) is _FillValue (1.0e36) for ELM datasets in all masked gridcells, and is always positive definite (never zero) in all unmasked gridcells because it it a true area. ELM sgs_frc_out is always positive definite gridcell area everywhere, with no missing values and no zero values. 20190910: MPAS-Seaice datasets have no mask, and sgs_frc_in (timeMonthly_avg_iceAreaCell) is never (ncatted-appended) _FillValue (-9.99999979021477e+33) and is usually zero because it is time-mean area-fraction of sea ice which only exists in polar regions. MPAS-Seaice sgs_frc_out is zero in all gridcells without sea-ice. Regardless of input source, following blocks guarantee that sgs_frc_out is defined everywhere, is never a missing value (sgs_frc_out is zero where sgs_frc_in may have been _FillValue), and is always safe to multiply and normalize by sgs_frc_out in main regridding loop */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) sgs_frc_out[dst_idx]=0.0; for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) if((var_val_crr=sgs_frc_in[col_src_adr[lnk_idx]]) != mss_val_cmp_dbl) sgs_frc_out[row_dst_adr[lnk_idx]]+=var_val_crr*wgt_raw[lnk_idx]; /* Sanity check sgs_frc_out */ if(nco_dbg_lvl_get() >= nco_dbg_fl){ /* 20190326: sgs_frc expressed as a fraction must never exceed sgs_nrm CICE expresses sgs_frc (aice) in percent, i.e., sgs_nrm=100.0 Sum total value of sgs_frc (as opposed to gridcell_area) depends on grid resolution */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ /* 20190907: Approximate comparison because rounding causes frequent exceedances of sgs_nrm by epsilon ~ 1.0e-15 */ if((float)sgs_frc_out[dst_idx] > sgs_nrm) (void)fprintf(stdout,"%s: INFO %s reports sgs_frc_out[%lu] = %19.15f > %g = sgs_nrm\n",nco_prg_nm_get(),fnc_nm,dst_idx,sgs_frc_out[dst_idx],sgs_nrm); } /* !dst_idx */ } /* !dbg */ // for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ // (void)fprintf(stdout,"%s: INFO %s reports sgs_frc_out[%lu] = %19.15f\n",nco_prg_nm_get(),fnc_nm,dst_idx,sgs_frc_out[dst_idx]); // } /* !dst_idx */ if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); } /* !sgs_frc_nm */ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"Regridding progress: # means regridded, ~ means copied\n"); /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped as shared in parallel clause */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ /* OpenMP notes: default(none): GCC9.x does not accept this (https://github.com/nco/nco/issues/114) perhaps because of fp_stdout/stderr? Intel accepts it. firstprivate(): Pointers that could be inadvertently free()'d if they lost their NULL-initialization private(): Almost everything else shared(): uggh...shared clause depends on both compiler and compiler-version 1. Const variables (e.g., flg_rnr,fnc_nm,wgt_vld_thr) are default shared for gcc >= 4.9.2, 2. fnc_nm (only!) must be explicit shared for g++ 4.6.3 (travis) 3. flg_rnr,fnc_nm,wgt_vld_thr must be explicit shared for icc 13.1.3 (rhea) 4. assert() cannot be used in OpenMP blocks 5. Good discussion of "const" variables in shared() clause here http://jakascorner.com/blog/2016/07/omp-default-none-and-const.html 20200221: fxm Revisit default(none) in light of above article */ #ifdef __GNUG__ # define GCC_LIB_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ ) # if GCC_LIB_VERSION < 490 # define GXX_OLD_OPENMP_SHARED_TREATMENT 1 # endif /* 480 */ # if GCC_LIB_VERSION >= 900 # define GXX_WITH_OPENMP5_GPU_SUPPORT 1 # endif /* 900 */ #endif /* !__GNUC__ */ #if defined( __INTEL_COMPILER) # pragma omp parallel for default(none) firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_cmp_dbl,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_add_fll,flg_frc_nrm,flg_msk_apl,flg_msk_out,flg_rnr,fnc_nm,frc_out,lnk_nbr,msk_out,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw,wgt_vld_thr) #else /* !__INTEL_COMPILER */ # ifdef GXX_OLD_OPENMP_SHARED_TREATMENT # pragma omp parallel for default(none) firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_cmp_dbl,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_add_fll,flg_frc_nrm,flg_msk_apl,flg_msk_out,flg_rnr,fnc_nm,frc_out,lnk_nbr,msk_out,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw) # else /* !old g++ */ # if defined(GXX_WITH_OPENMP5_GPU_SUPPORT) && 0 # pragma omp target teams distribute parallel for firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_cmp_dbl,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_add_fll,flg_frc_nrm,flg_msk_apl,flg_msk_out,flg_rnr,frc_out,lnk_nbr,msk_out,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw) # else # pragma omp parallel for firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_cmp_dbl,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_add_fll,flg_frc_nrm,flg_msk_apl,flg_msk_out,frc_out,lnk_nbr,msk_out,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw) # endif /* !GCC >= 9.0 */ # endif /* !GCC < 4.9 */ #endif /* !__INTEL_COMPILER */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; thr_idx=omp_get_thread_num(); in_id=trv_tbl->in_id_arr[thr_idx]; #ifdef _OPENMP if(nco_dbg_lvl_get() >= nco_dbg_grp && !thr_idx && !idx_tbl) (void)fprintf(fp_stdout,"%s: INFO %s reports regrid loop uses %d thread%s\n",nco_prg_nm_get(),fnc_nm,omp_get_num_threads(),(omp_get_num_threads() > 1) ? "s" : ""); if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s: INFO thread = %d, idx_tbl = %d, nm = %s\n",nco_prg_nm_get(),thr_idx,idx_tbl,trv.nm); #endif /* !_OPENMP */ if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s%s ",trv.flg_rgr ? "#" : "~",trv.nm); if(trv.flg_rgr){ /* Regrid variable */ var_nm=trv.nm; var_typ_rgr=NC_DOUBLE; /* NB: Perform regridding in double precision */ var_typ_out=trv.var_typ; /* NB: Output type in file is same as input type */ var_sz_in=1L; var_sz_out=1L; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid(out_id,var_nm,&var_id_out); rcd=nco_inq_varndims(in_id,var_id_in,&dmn_nbr_in); rcd=nco_inq_varndims(out_id,var_id_out,&dmn_nbr_out); dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out; dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */ dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); rcd=nco_inq_vardimid(out_id,var_id_out,dmn_id_out); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx); var_sz_in*=dmn_cnt_in[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ rcd=nco_inq_dimlen(out_id,dmn_id_out[dmn_idx],dmn_cnt_out+dmn_idx); if(dmn_cnt_out[dmn_idx] == 0L){ /* No records have been written, so overwrite zero output record size with input record size */ char dmn_rec_nm[NC_MAX_NAME]; /* [sng] Record dimension name */ int dmn_rec_id_in; rcd=nco_inq_dimname(out_id,dmn_id_out[dmn_idx],dmn_rec_nm); rcd=nco_inq_dimid(in_id,dmn_rec_nm,&dmn_rec_id_in); rcd=nco_inq_dimlen(in_id,dmn_rec_id_in,dmn_cnt_out+dmn_idx); } /* !dmn_cnt_out */ var_sz_out*=dmn_cnt_out[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ /* Compute number and size of non-lat/lon or non-col dimensions (e.g., level, time, species, wavelength) Denote their convolution by level or 'lvl' for shorthand There are lvl_nbr elements for each lat/lon or col position 20151011: Until today assume lat/lon and col are most-rapidly varying dimensions 20151011: Until today lvl_nbr missed last non-spatial dimension for 1D output */ lvl_nbr=1; /* Simple prescription of lvl_nbr works when horizontal dimension(s) is/are MRV */ for(dmn_idx=0;dmn_idx<dmn_nbr_out-dmn_nbr_hrz_crd;dmn_idx++) lvl_nbr*=dmn_cnt_out[dmn_idx]; /* Determining whether an individual field _uses_ missing values is important because memory requirements of next four malloc's (i.e., exclusive of wgt_raw) can sum to ~7*sizeof(uncompressed var) for NC_FLOAT and ~3.5*sizeof(uncompressed var) for NC_DOUBLE. Traditionally has_mss_val answers "does this variable _have_ and explicit missing value?" As of 20210909, we expand the meaning of has_mss_val, though only in nco_rgr_wgt() Now has_mss_val means does the variable use the explicitly defined missing value, or, failing that, does it use the implicitly defined missing value? Only variables that _use_ a missing value need tally and wgt_vld_out arrays mss_val_dbl is what nco_mss_val_get_dbl() returns---its meaning has not changed However, it is no longer intended to be used Instead we create mss_val_cmp_dbl, a more general value for comparison and assignment */ var_val_dbl_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() input value buffer"); var_val_dbl_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output value buffer"); /* Obtain input variable */ rcd=nco_get_vara(in_id,var_id_in,dmn_srt,dmn_cnt_in,var_val_dbl_in,var_typ_rgr); /* 20210909: Begin new missing value treatment */ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl); /* NB: mss_val_cmp_dbl must be defined since it is now always used by regridder (even when has_mss_val is False) For instance flg_msk_apl block, below, uses mss_val_cmp_dbl for masked fields And test for _usage_ of missing values, below, necessarily compares to mss_val_cmp_dbl If missing value is not explicitly declared, use default missing value */ if(has_mss_val) mss_val_cmp_dbl=mss_val_dbl; else mss_val_cmp_dbl=NC_FILL_DOUBLE; /* Override float/double value with appropriate default missing value for integers */ if(!has_mss_val){ switch(var_typ_out){ case NC_BYTE: mss_val_cmp_dbl=NC_FILL_BYTE; break; case NC_CHAR: mss_val_cmp_dbl=NC_FILL_CHAR; break; case NC_SHORT: mss_val_cmp_dbl=NC_FILL_SHORT; break; case NC_INT: mss_val_cmp_dbl=NC_FILL_INT; break; case NC_FLOAT: mss_val_cmp_dbl=NC_FILL_FLOAT; break; case NC_DOUBLE: mss_val_cmp_dbl=NC_FILL_DOUBLE; break; case NC_UBYTE: mss_val_cmp_dbl=NC_FILL_UBYTE; break; case NC_USHORT: mss_val_cmp_dbl=NC_FILL_USHORT; break; case NC_UINT: mss_val_cmp_dbl=NC_FILL_UINT; break; /* 20210909: Implicit type conversion generates warnings: 'long long' to 'double' changes value from -9223372036854775806 to -9223372036854775808 'unsigned long long' to 'double' changes value from 18446744073709551614 to 18446744073709551616 Warnings can be fixed with -Wimplicit-const-int-float-conversion */ case NC_INT64: mss_val_cmp_dbl=NC_FILL_INT64; break; case NC_UINT64: mss_val_cmp_dbl=NC_FILL_UINT64; break; case NC_STRING: default: nco_dfl_case_nc_type_err(); break; } /* !var_typ_in */ } /* !has_mss_val */ /* Re-initialize Boolean to True and override with False if variable _uses_ missing values */ has_mss_val=True; for(idx_in=0;idx_in<var_sz_in;idx_in++){ if(var_val_dbl_in[idx_in] == mss_val_cmp_dbl) break; } /* !idx_in */ /* If neither implicit nor explicit missing value is present, treat all values as valid */ if(idx_in == var_sz_in) has_mss_val=False; /* 20210909: End new missing value treatment */ /* Memory allocation that depends on _FillValue and input variable contents */ if(has_mss_val) tally=(int *)nco_malloc_dbg(var_sz_out*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() tally buffer"); if(has_mss_val && flg_rnr) wgt_vld_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output renormalization weight buffer"); /* Initialize output */ (void)memset(var_val_dbl_out,0,var_sz_out*nco_typ_lng(var_typ_rgr)); if(has_mss_val) (void)memset(tally,0,var_sz_out*nco_typ_lng(NC_INT)); if(wgt_vld_out) (void)memset(wgt_vld_out,0,var_sz_out*nco_typ_lng(var_typ_rgr)); /* 20150914: Intensive variables require normalization, extensive do not Intensive variables (temperature, wind speed, mixing ratio) do not depend on gridcell boundaries Extensive variables (population, counts, numbers of things) depend on gridcell boundaries Extensive variables are the exception in models, yet are commonly used for sampling information, e.g., number of photons, number of overpasses Pass extensive variable list to NCO with, e.g., --xtn=TSurfStd_ct,... 20190420: Remove languishing, unfinished intensive variable code */ clock_t tm_srt; /* [us] Microseconds at start */ clock_t tm_end; /* [us] Microseconds at end */ float tm_drn; /* [s] Seconds elapsed */ if(nco_dbg_lvl_get() >= nco_dbg_var) tm_srt=clock(); /* This first block is for "normal" variables without sub-gridscale fractions */ if(!sgs_frc_out){ /* Apply weights */ if(!has_mss_val){ if(lvl_nbr == 1){ /* Weight single-level fields without missing values */ #ifdef ENABLE_GPU # pragma omp target data map(to:col_src_adr[0:lnk_nbr],row_dst_adr[0:lnk_nbr],var_val_dbl_in[0:var_sz_in],wgt_raw[0:lnk_nbr]) map(tofrom:var_val_dbl_out[0:var_sz_out]) # pragma omp target teams distribute parallel for simd schedule(static,1) #else /* !ENABLE_GPU */ # if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 ) # pragma omp simd # endif /* !__GNUC__ */ #endif /* !ENABLE_GPU */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) var_val_dbl_out[row_dst_adr[lnk_idx]]+=var_val_dbl_in[col_src_adr[lnk_idx]]*wgt_raw[lnk_idx]; }else{ val_in_fst=0L; val_out_fst=0L; /* Weight multi-level fields without missing values */ #ifdef ENABLE_GPU # pragma omp target data map(to:col_src_adr[0:lnk_nbr],row_dst_adr[0:lnk_nbr],var_val_dbl_in[0:var_sz_in],wgt_raw[0:lnk_nbr]) map(tofrom:var_val_dbl_out[0:var_sz_out]) # pragma omp parallel for reduction(+:val_in_fst,val_out_fst) #endif /* !ENABLE_GPU */ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ //if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(fp_stdout,"%s lvl_idx = %d val_in_fst = %li, val_out_fst = %li\n",trv.nm,lvl_idx,val_in_fst,val_out_fst); #ifdef ENABLE_GPU # pragma omp target teams distribute parallel for simd schedule(static,1) #else /* !ENABLE_GPU */ # if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 ) # pragma omp simd # endif /* !__GNUC__ */ #endif /* !ENABLE_GPU */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) var_val_dbl_out[row_dst_adr[lnk_idx]+val_out_fst]+=var_val_dbl_in[col_src_adr[lnk_idx]+val_in_fst]*wgt_raw[lnk_idx]; val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ }else{ /* has_mss_val */ if(lvl_nbr == 1){ /* Weight single-level fields with missing values */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]; idx_out=row_dst_adr[lnk_idx]; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_cmp_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]; if(wgt_vld_out) wgt_vld_out[idx_out]+=wgt_raw[lnk_idx]; tally[idx_out]++; } /* !mss_val_cmp_dbl */ } /* !lnk_idx */ }else{ /* lvl_nbr > 1 */ val_in_fst=0L; val_out_fst=0L; /* Weight multi-level fields with missing values */ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]+val_in_fst; idx_out=row_dst_adr[lnk_idx]+val_out_fst; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_cmp_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]; if(wgt_vld_out) wgt_vld_out[idx_out]+=wgt_raw[lnk_idx]; tally[idx_out]++; } /* !mss_val_cmp_dbl */ } /* !lnk_idx */ val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ } /* !has_mss_val */ if(!has_mss_val){ /* frc_dst = frc_out = dst_frac = frac_b contains non-unity elements and normalization type is "destarea" or "dstarea" or "none" When this occurs for conservative remapping, follow "destarea" normalization procedure See SCRIP manual p. 11 and http://www.earthsystemmodeling.org/esmf_releases/public/last, specifically http://www.earthsystemmodeling.org/esmf_releases/public/last/ESMF_refdoc/node3.html#SECTION03029000000000000000 "frac_a: When a conservative regridding method is used, this contains the fraction of each source cell that participated in the regridding. When a non-conservative regridding method is used, this array is set to 0.0. frac_b: When a conservative regridding method is used, this contains the fraction of each destination cell that participated in the regridding. When a non-conservative regridding method is used, this array is set to 1.0 where the point participated in the regridding (i.e. was within the unmasked source grid), and 0.0 otherwise. If the first-order conservative interpolation method is specified ("-m conserve") then the destination field may need to be adjusted by the destination fraction (frac_b). This should be done if the normalization type is ``dstarea'' (sic, really "destarea") and if the destination grid extends outside the unmasked source grid. If it isn't known if the destination extends outside the source, then it doesn't hurt to apply the destination fraction. (If it doesn't extend outside, then the fraction will be 1.0 everywhere anyway.) The following code shows how to adjust an already interpolated destination field (dst_field) by the destination fraction. The variables n_b, and frac_b are from the weight file: ! Adjust destination field by fraction do i=1, n_b if (frac_b(i) .ne. 0.0) then dst_field(i)=dst_field(i)/frac_b(i) endif enddo" NB: Non-conservative interpolation methods (e.g., bilinear) should NOT apply this normalization (theoretically there is no danger in doing so because frc_out == 1 always for all gridcells that participate in bilinear remapping and frc_out == 0 otherwise) NCO's renormalization procedure below is similar to the ESMF-recommended procedure above. However, users can control NCO renormalization with, e.g., --rnr_thr=0.1, or override it completely with --rnr_thr=none. Moreover, frac_b == frc_dst is determined solely by solely by gridcell binary mask overlaps during weight generation. It is time-invariant and 2D. Missing values (e.g., AOD) can vary in time and can be 3D (or N-D) and so can wgt_vld_out. Hence NCO renormalization is more flexible. flg_frc_nrm (i.e., ESMF-recommended) normalization makes fields pretty for graphics, yet is non-conservative because e.g., MPAS Ocean gridcells projected onto global uniform grids would have their SSTs normalized for prettiness on coastal gridpoints, which is inherently non-conservative. 20190912: Make "ESMF renormalization" of fields without missing values (i.e., "destarea") opt-in rather than default "destarea" and frac_b = frc_dst together set flg_frc_nrm Formerly flg_frc_nrm triggered ESMF renormalization by default Now flg_frc_nrm and user-explicitly-set --rnr_thr to [0.0,1.0] must both be true to trigger it This keep conservative maps conservative by default NB: This "ESMF renormalization" normalizes by frac_b == frc_dst (not by wgt_vld_out) regardless of rnr_thr 20151018: Avoid double-normalizing by only executing fractional normalization (flg_frc_nrm) block when !has_mss_val, and valid area normalization when has_mss_val */ if(flg_frc_nrm){ /* Only renormalize when frac_b < 1.0 (because frac_b == 1.0 does nothing) */ if(flg_rnr){ /* 20190912: Only renormalize when user explicitly requests it (because renormalization is non-conservative). Prior to today, renormalization was by default, henceforth it is opt-in. */ if(lvl_nbr == 1){ /* Fractionally renormalize single-level fields without missing values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=frc_out[dst_idx]; }else{ /* Fractionally renormalize multi-level fields without missing values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ if(frc_out[dst_idx] != 0.0){ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ var_val_dbl_out[dst_idx+lvl_idx*grd_sz_out]/=frc_out[dst_idx]; } /* !lvl_idx */ } /* !frc_out */ } /* !dst_idx */ } /* lvl_nbr > 1 */ } /* !flg_rnr */ } /* !flg_frc_nrm */ } /* !has_mss_val */ if(has_mss_val){ /* NCL and ESMF treatment of weights and missing values described at https://www.ncl.ucar.edu/Applications/ESMF.shtml#WeightsAndMasking http://earthsystemmodeling.org/esmf_releases/non_public/ESMF_6_1_1/ESMF_refdoc/node5.html#SECTION05012600000000000000 NCO implements one of two procedures: "conservative" or "renormalized" The "conservative" algorithm uses all valid data from the input grid on the output grid Destination cells receive the weighted valid values of the source cells This is conservative because the global integrals of the source and destination fields are equal The "renormalized" algorithm divides the destination value by the sum of the valid weights This returns "reasonable" values, i.e., the mean of the valid input values However, renormalization is equivalent to extrapolating valid data to missing regions Hence the input and output integrals are unequal and the regridding is not conservative */ /* In fields with missing values, destination cells with no accumulated weight are missing value */ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(!tally[dst_idx]) var_val_dbl_out[dst_idx]=mss_val_cmp_dbl; if(flg_rnr){ // if(nco_dbg_lvl_get() >= nco_dbg_quiet) (void)fprintf(fp_stdout,"%s: DEBUG renormalization for %s uses flg_rnr block\n",nco_prg_nm_get(),var_nm); if(wgt_vld_thr == 0.0){ /* Renormalize cells with no threshold by valid accumulated weight */ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(tally[dst_idx]) var_val_dbl_out[dst_idx]/=wgt_vld_out[dst_idx]; }else{ /* Renormalize cells with threshold by valid accumulated weight if weight exceeds threshold */ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(wgt_vld_out[dst_idx] >= wgt_vld_thr){var_val_dbl_out[dst_idx]/=wgt_vld_out[dst_idx];}else{var_val_dbl_out[dst_idx]=mss_val_cmp_dbl;} } /* !wgt_vld_thr */ } /* !flg_rnr */ } /* !has_mss_val */ } /* !sgs_frc_out */ /* Variables with sub-gridscale fractions require "double-weighting" and normalization */ if(sgs_frc_out){ if(!strcmp(var_nm,sgs_frc_nm)){ /* Copy shared variable sgs_frc_out that was regridded before OpenMP loop 20190911: Reasons to copy sgs_frc_out into sgs_frc_nm data include speed, consistency, and well-definedness of sgs_frc_out. One reason to regrid sgs_frc_nm here is consistency with original, raw dataset: ELM landfrac is masked so regridding it here (rather than using sgs_frc_out) would produce a regridded dataset more identical to raw ELM output. The same can be said for CICE (I think). MPAS cellMask and timeMonthly_avg_iceAreaCell are not masked, and so should produce the same values as sgs_frc_out if regridded here. */ memcpy(var_val_dbl_out,sgs_frc_out,grd_sz_out*nco_typ_lng(var_typ_rgr)); }else if(sgs_msk_nm && !strcmp(var_nm,sgs_msk_nm)){ /* Compute binary mask directly from shared sgs_frc_out (guaranteed to be all valid values) */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]=1.0; }else{ /* !sgs_msk_nm */ /* "Double-weight" all other sub-gridscale input values by sgs_frc_in and overlap weight, normalize by sgs_frc_out */ if(!has_mss_val){ if(lvl_nbr == 1){ /* SGS-regrid single-level fields without missing values */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) var_val_dbl_out[row_dst_adr[lnk_idx]]+=var_val_dbl_in[col_src_adr[lnk_idx]]*wgt_raw[lnk_idx]*sgs_frc_in[col_src_adr[lnk_idx]]; /* NB: MPAS-Seaice dataset sgs_frc_out is usually zero in non-polar regions */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=sgs_frc_out[dst_idx]; }else{ /* lvl_nbr > 1 */ /* SGS-regrid multi-level fields without missing values */ val_in_fst=0L; val_out_fst=0L; for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]; idx_out=row_dst_adr[lnk_idx]; var_val_dbl_out[idx_out+val_out_fst]+=var_val_dbl_in[idx_in+val_in_fst]*wgt_raw[lnk_idx]*sgs_frc_in[idx_in]; } /* !lnk_idx */ /* Normalize current level values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx+val_out_fst]/=sgs_frc_out[dst_idx]; val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ }else{ /* !has_mss_val */ if(lvl_nbr == 1){ /* SGS-regrid single-level fields with missing values */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]; idx_out=row_dst_adr[lnk_idx]; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_cmp_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]*sgs_frc_in[idx_in]; tally[idx_out]++; } /* !mss_val_cmp_dbl */ } /* !lnk_idx */ /* NB: Normalization clause is complex to support sgs_frc_out from both ELM and MPAS-Seaice */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(!tally[dst_idx]){var_val_dbl_out[dst_idx]=mss_val_cmp_dbl;}else{if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=sgs_frc_out[dst_idx];} }else{ /* lvl_nbr > 1 */ /* SGS-regrid multi-level fields with missing values */ val_in_fst=0L; val_out_fst=0L; for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]+val_in_fst; idx_out=row_dst_adr[lnk_idx]+val_out_fst; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_cmp_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]*sgs_frc_in[col_src_adr[lnk_idx]]; tally[idx_out]++; } /* !mss_val_cmp_dbl */ } /* !lnk_idx */ /* Normalize current level values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ idx_out=dst_idx+val_out_fst; if(!tally[idx_out]){var_val_dbl_out[idx_out]=mss_val_cmp_dbl;}else{if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[idx_out]/=sgs_frc_out[dst_idx];} } /* dst_idx */ val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ } /* !has_mss_val */ } /* !sgs_msk_nm */ } /* !sgs_frc_out */ if(nco_typ_ntg(var_typ_out)){ /* 20210407: Round, with rint(), integer fields before sending to netCDF for output Otherwise implicit type conversion will truncate (rather than round) output values This is critical for masks where rounding errors produce near integer values (e.g., 0.999...) that could then be truncated to zero by implicit conversion instead of rounded up to 1. */ if(has_mss_val){ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(var_val_dbl_out[dst_idx] != mss_val_cmp_dbl) var_val_dbl_out[dst_idx]=rint(var_val_dbl_out[dst_idx]); }else{ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) var_val_dbl_out[dst_idx]=rint(var_val_dbl_out[dst_idx]); } /* !has_mss_val */ } /* !nco_typ_ntg() */ if(flg_add_fll && !has_mss_val){ /* 20210604: Initialize fields without _FillValue in input file to default missing value in unmapped destination cells Otherwise empty destination cells will be zero (not _FillValue) in output file Fields with input _FillValue are already _FillValue in output where tally is zero */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ if(frc_out[dst_idx] == 0.0){ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ var_val_dbl_out[dst_idx+lvl_idx*grd_sz_out]=NC_FILL_DOUBLE; } /* !lvl_idx */ } /* !frc_out */ } /* !dst_idx */ } /* !flg_add_fll */ if(flg_msk_apl){ /* 20210607: Overwrite output values with _FillValue where destination cell is masked Same procedure regardless of whether input variables already have _FillValue NB: This is separate, and presumably independent, from above flg_add_fll loop Fields with flg_msk_apl will (harmlessly?) go through both loops */ double mss_val_msk; /* [frc] Missing value to apply where mask is false */ //if(has_mss_val) mss_val_msk=mss_val_dbl; else mss_val_msk=NC_FILL_DOUBLE; mss_val_msk=mss_val_cmp_dbl; /* [frc] Missing value to apply where mask is false */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ if(msk_out[dst_idx] == 0){ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ var_val_dbl_out[dst_idx+lvl_idx*grd_sz_out]=mss_val_msk; } /* !lvl_idx */ } /* !frc_out */ } /* !dst_idx */ } /* !flg_add_fll */ if(nco_dbg_lvl_get() >= nco_dbg_var){ tm_end=clock(); tm_drn=(float)(tm_end-tm_srt)/CLOCKS_PER_SEC; (void)fprintf(fp_stdout,"%s: INFO Compute time for %s (thread %d/%d): %g s\n",nco_prg_nm_get(),trv.nm,thr_idx,omp_get_num_threads(),tm_drn); } /* !dbg */ #pragma omp critical { /* begin OpenMP critical */ // rcd=nco_put_var(out_id,var_id_out,var_val_dbl_out,var_typ_rgr); rcd=nco_put_vara(out_id,var_id_out,dmn_srt,dmn_cnt_out,var_val_dbl_out,var_typ_rgr); } /* end OpenMP critical */ if(dmn_id_in) dmn_id_out=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); if(tally) tally=(int *)nco_free(tally); if(var_val_dbl_out) var_val_dbl_out=(double *)nco_free(var_val_dbl_out); if(var_val_dbl_in) var_val_dbl_in=(double *)nco_free(var_val_dbl_in); if(wgt_vld_out) wgt_vld_out=(double *)nco_free(wgt_vld_out); }else{ /* !trv.flg_rgr */ /* Use standard NCO copy routine for variables that are not regridded */ #pragma omp critical { /* begin OpenMP critical */ (void)nco_cpy_var_val(in_id,out_id,(FILE *)NULL,(md5_sct *)NULL,trv.nm,trv_tbl); } /* end OpenMP critical */ } /* !flg_rgr */ } /* !xtr */ } /* end (OpenMP parallel for) loop over idx_tbl */ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"\n"); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s completion report: Variables regridded = %d (%d extensive), copied unmodified = %d, omitted = %d, created = %d\n",nco_prg_nm_get(),fnc_nm,var_rgr_nbr,var_xtn_nbr,var_cpy_nbr,var_xcl_nbr,var_crt_nbr); /* Free memory allocated for grid reading/writing */ if(area_out) area_out=(double *)nco_free(area_out); if(col_src_adr) col_src_adr=(int *)nco_free(col_src_adr); if(dmn_sz_in_int) dmn_sz_in_int=(int *)nco_free(dmn_sz_in_int); if(dmn_sz_out_int) dmn_sz_out_int=(int *)nco_free(dmn_sz_out_int); if(frc_out) frc_out=(double *)nco_free(frc_out); if(lat_bnd_out) lat_bnd_out=(double *)nco_free(lat_bnd_out); if(lat_crn_out) lat_crn_out=(double *)nco_free(lat_crn_out); if(lat_ctr_out) lat_ctr_out=(double *)nco_free(lat_ctr_out); if(lat_ntf_out) lat_ntf_out=(double *)nco_free(lat_ntf_out); if(lat_wgt_out) lat_wgt_out=(double *)nco_free(lat_wgt_out); if(lon_bnd_out) lon_bnd_out=(double *)nco_free(lon_bnd_out); if(lon_crn_out) lon_crn_out=(double *)nco_free(lon_crn_out); if(lon_ctr_out) lon_ctr_out=(double *)nco_free(lon_ctr_out); if(lon_ntf_out) lon_ntf_out=(double *)nco_free(lon_ntf_out); if(msk_out) msk_out=(int *)nco_free(msk_out); if(row_dst_adr) row_dst_adr=(int *)nco_free(row_dst_adr); if(sgs_frc_nm) sgs_frc_nm=(char *)nco_free(sgs_frc_nm); if(sgs_frc_in) sgs_frc_in=(double *)nco_free(sgs_frc_in); if(sgs_frc_out) sgs_frc_out=(double *)nco_free(sgs_frc_out); if(sgs_msk_nm) sgs_msk_nm=(char *)nco_free(sgs_msk_nm); if(wgt_raw) wgt_raw=(double *)nco_free(wgt_raw); return rcd; } /* end nco_rgr_wgt() */ void nco_bsl_zro /* Return Bessel function zeros */ (const int bsl_zro_nbr, /* O [nbr] Order of Bessel function */ double * const bsl_zro) /* O [frc] Bessel zero */ { /* Purpose: Return Bessel function zeros Source: CCM code /fs/cgd/csm/models/atm/ccm3.5.8/src/ccmlsm_share/bsslzr.F Return bsl_zro_nbr zeros (or if bsl_zro_nbr > 50, approximate zeros), of the Bessel function j0 First 50 zeros are given exactly, and remaining zeros are computed by extrapolation, and therefore are not exact Original version: CCM1 Standardized: J. Rosinski, June 1992 Reviewed: J. Hack, D. Williamson, August 1992 Reviewed: J. Hack, D. Williamson, April 1996 Modified 19970123 by Jim Rosinski to use double precision arithmetic ~2000: Converted to Fortran9X by C. Zender, changed all real*16 statements to double precision (real*8) 20150530: Converted to C99 by C. Zender */ const char fnc_nm[]="nco_bsl_zro()"; /* [sng] Function name */ const double pi=M_PI; // [frc] 3 const double bsl_zro_tbl[]={ // Zeros of Bessel functions of order 1 to 50 -1.e36, 2.4048255577, 5.5200781103, 8.6537279129, 11.7915344391, 14.9309177086, 18.0710639679, 21.2116366299, 24.3524715308, 27.4934791320, 30.6346064684, 33.7758202136, 36.9170983537, 40.0584257646, 43.1997917132, 46.3411883717, 49.4826098974, 52.6240518411, 55.7655107550, 58.9069839261, 62.0484691902, 65.1899648002, 68.3314693299, 71.4729816036, 74.6145006437, 77.7560256304, 80.8975558711, 84.0390907769, 87.1806298436, 90.3221726372, 93.4637187819, 96.6052679510, 99.7468198587, 102.8883742542, 106.0299309165, 109.1714896498, 112.3130502805, 115.4546126537, 118.5961766309, 121.7377420880, 124.8793089132, 128.0208770059, 131.1624462752, 134.3040166383, 137.4455880203, 140.5871603528, 143.7287335737, 146.8703076258, 150.0118824570, 153.1534580192, 156.2950342685}; const int bsl_zro_tbl_nbr_max=50; /* [nbr] */ int bsl_idx; /* [idx] Counting index */ /* Main Code */ if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stdout,"%s: DEBUG Entering %s\n",nco_prg_nm_get(),fnc_nm); /* NB: Initialize bsl_zro[0] but (in C) never use it Initialization prevents uninitialized memory warnings */ for(bsl_idx=0;bsl_idx<=bsl_zro_nbr;bsl_idx++) if(bsl_idx <= bsl_zro_tbl_nbr_max) bsl_zro[bsl_idx]=bsl_zro_tbl[bsl_idx]; if(bsl_zro_nbr > bsl_zro_tbl_nbr_max) for(bsl_idx=bsl_zro_tbl_nbr_max+1;bsl_idx<=bsl_zro_nbr;bsl_idx++) bsl_zro[bsl_idx]=bsl_zro[bsl_idx-1]+pi; if(nco_dbg_lvl_get() == nco_dbg_old){ (void)fprintf(stdout,"%s: DEBUG %s reports bsl_zro_nbr = %d\n",nco_prg_nm_get(),fnc_nm,bsl_zro_nbr); (void)fprintf(stdout,"idx\tbsl_zro\n"); for(bsl_idx=1;bsl_idx<=bsl_zro_nbr;bsl_idx++) (void)fprintf(stdout,"%d\t%g\n",bsl_idx,bsl_zro[bsl_idx]); } /* endif dbg */ return; } /* end nco_bsl_zro() */ void nco_lat_wgt_gss /* [fnc] Compute and return sine of Gaussian latitudes and their weights */ (const int lat_nbr, /* I [nbr] Latitude number */ const nco_bool flg_s2n, /* I [enm] Latitude grid-direction is South-to-North */ double * const lat_sin, /* O [frc] Sine of latitudes */ double * const wgt_Gss) /* O [frc] Gaussian weights */ { /* Purpose: Compute and return sine of Gaussian latitudes and their weights Returned arrays are ordered south-to-north (S->N), not (N->S) Source: CCM /fs/cgd/csm/models/atm/ccm3.5.8/src/ccmlsm_share/gauaw.F Calculate sine of latitudes lat_sin(lat_nbr) and weights wgt_Gss(lat_nbr) for Gaussian quadrature Algorithm described in Davis and Rabinowitz, Journal of Research of the NBS, V 56, Jan 1956 Zeros of Bessel function j0, obtained from nco_bsl_zro(), are first guess for abscissae Original version: CCM1 Standardized: L. Bath, Jun 1992 L. Buja, Feb 1996 Reviewed: D. Williamson, J. Hack, Aug 1992 D. Williamson, J. Hack, Feb 1996 19970123 Modified by Jim Rosinski to use real*16 arithmetic in order to achieve (nearly) identical weights and latitudes on all machines. ~2000: Converted to Fortran9X by C. Zender, changed all real*16 statements to double precision (real*8) 20150530: Converted to C99 by C. Zender 20150725: Verified against tabulation at http://pomax.github.io/bezierinfo/legendre-gauss.html#n64 */ const char fnc_nm[]="nco_lat_wgt_gss()"; /* [sng] Function name */ const double eps_rlt=1.0e-16; // Convergence criterion (NB: Threshold was 1.0d-27 in real*16, 1.0e-15 fine for real*8, 1.0e-16 pushes double precision to the brink) const double pi=M_PI; // [frc] 3 const int itr_nbr_max=20; // [nbr] Maximum number of iterations double c_cff; // Constant combination coefficient double lat_idx_dbl; // Latitude index, double precision double lat_nnr_idx_dbl; // Inner latitude index, double precision double lat_nbr_dbl; // [nbr] Number of latitudes, double precision double pk=double_CEWI; // Polynomial double pkm1; // Polynomial double pkm2; // Polynomial double pkmrk; // Polynomial double sp; // Current iteration latitude increment double xz; // Abscissa estimate double cos_arg; // Intermediate parameter introduced while attempting to eliminate valgrind "uninitialised value" warnings int itr_cnt; // Iteration counter int lat_idx; // [idx] Counting index (latitude) int lat_sym_idx; // [idx] Counting index (symmetric latitude) int lat_nnr_idx; // [idx] Counting index (inner latitude loop) int lat_nbr_rcp2; // lat_nbr/2 (number of latitudes in hemisphere) double *lat_sin_p1; // Sine of Gaussian latitudes double precision double *wgt_Gss_p1; // Gaussian weights double precision /* Main Code */ if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stdout,"%s: DEBUG Entering %s\n",nco_prg_nm_get(),fnc_nm); /* Arrays with Fortran indexing (indicated by "plus one" = "_p1") keep numerical algorithm in C identical to Fortran */ lat_sin_p1=(double *)nco_malloc((lat_nbr+1)*sizeof(double)); // Sine of Gaussian latitudes double precision wgt_Gss_p1=(double *)nco_malloc((lat_nbr+1)*sizeof(double)); // Gaussian weights double precision /* Use Newton iteration to find abscissae */ c_cff=0.25*(1.0-4.0/(pi*pi)); lat_nbr_dbl=lat_nbr; lat_nbr_rcp2=lat_nbr/2; // NB: Integer arithmetic (void)nco_bsl_zro(lat_nbr_rcp2,lat_sin_p1); for(lat_idx=1;lat_idx<=lat_nbr_rcp2;lat_idx++){ // NB: Loop starts at 1 // 20150713: Introduce intermediate parameter cos_arg in attempt to eliminate valgrind "uninitialised value" warnings emitted by cos() (actually __cos_sse()) // Warnings occur with gcc-compiled code, not with clang-compiled code cos_arg=lat_sin_p1[lat_idx]/sqrt((lat_nbr_dbl+0.5)*(lat_nbr_dbl+0.5)+c_cff); xz=cos(cos_arg); /* First approximation to xz */ itr_cnt=0; /* goto label_73 */ label_73: pkm2=1.0; pkm1=xz; if(++itr_cnt > itr_nbr_max){ (void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %d\n",nco_prg_nm_get(),fnc_nm,fabs(sp),itr_nbr_max,lat_idx); nco_exit(EXIT_FAILURE); } /* endif */ /* Compute Legendre polynomial */ for(lat_nnr_idx=2;lat_nnr_idx<=lat_nbr;lat_nnr_idx++){ lat_nnr_idx_dbl=lat_nnr_idx; pk=((2.0*lat_nnr_idx_dbl-1.0)*xz*pkm1-(lat_nnr_idx_dbl-1.0)*pkm2)/lat_nnr_idx_dbl; pkm2=pkm1; pkm1=pk; } /* end inner loop over lat_nnr */ pkm1=pkm2; pkmrk=(lat_nbr_dbl*(pkm1-xz*pk))/(1.0-xz*xz); sp=pk/pkmrk; xz=xz-sp; /* NB: Easy to introduce bug here by not replacing Fortran abs() with C fabs() */ if(fabs(sp) > eps_rlt) goto label_73; lat_sin_p1[lat_idx]=xz; wgt_Gss_p1[lat_idx]=(2.0*(1.0-xz*xz))/((lat_nbr_dbl*pkm1)*(lat_nbr_dbl*pkm1)); } /* end outer loop over lat */ if(lat_nbr != lat_nbr_rcp2*2){ /* When lat_nbr is odd, compute weight at Equator */ lat_sin_p1[lat_nbr_rcp2+1]=0.0; pk=2.0/(lat_nbr_dbl*lat_nbr_dbl); for(lat_idx=2;lat_idx<=lat_nbr;lat_idx+=2){ lat_idx_dbl=lat_idx; pk=pk*lat_idx_dbl*lat_idx_dbl/((lat_idx_dbl-1.0)*(lat_idx_dbl-1.0)); } /* end loop over lat */ wgt_Gss_p1[lat_nbr_rcp2+1]=pk; } /* endif lat_nbr is odd */ /* Complete sets of abscissas and weights, using symmetry properties */ for(lat_idx=1;lat_idx<=lat_nbr_rcp2;lat_idx++){ lat_sym_idx=lat_nbr-lat_idx+1; lat_sin_p1[lat_sym_idx]=-lat_sin_p1[lat_idx]; wgt_Gss_p1[lat_sym_idx]=wgt_Gss_p1[lat_idx]; } /* end loop over lat */ /* Shift by one to remove Fortran offset in p1 arrays */ //memcpy(lat_sin,lat_sin_p1,lat_nbr*sizeof(double)); //memcpy(wgt_Gss,wgt_Gss_p1,lat_nbr*sizeof(double)); /* Reverse and shift arrays because original CCM code algorithm computes latitudes from north-to-south Shift by one to remove Fortran offset in p1 arrays */ if(flg_s2n){ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ lat_sin[lat_idx]=lat_sin_p1[lat_nbr-lat_idx]; wgt_Gss[lat_idx]=wgt_Gss_p1[lat_nbr-lat_idx]; } /* end loop over lat */ }else{ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ lat_sin[lat_idx]=lat_sin_p1[lat_idx+1]; wgt_Gss[lat_idx]=wgt_Gss_p1[lat_idx+1]; } /* end loop over lat */ } /* !flg_s2n */ if(nco_dbg_lvl_get() == nco_dbg_old){ (void)fprintf(stdout,"%s: DEBUG %s reports lat_nbr = %d\n",nco_prg_nm_get(),fnc_nm,lat_nbr); (void)fprintf(stdout,"idx\tasin\tngl_rad\tngl_dgr\tgw\n"); for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) (void)fprintf(stdout,"%d\t%g\t%g\t%g%g\n",lat_idx,lat_sin[lat_idx],asin(lat_sin[lat_idx]),180.0*asin(lat_sin[lat_idx])/pi,wgt_Gss[lat_idx]); } /* endif dbg */ if(wgt_Gss_p1) wgt_Gss_p1=(double *)nco_free(wgt_Gss_p1); if(lat_sin_p1) lat_sin_p1=(double *)nco_free(lat_sin_p1); return; } /* end nco_lat_wgt_gss() */ void nco_sph_plg_area /* [fnc] Compute area of spherical polygon */ (rgr_sct * const rgr, /* I [sct] Regridding structure */ const double * const lat_bnd, /* [dgr] Latitude boundaries of rectangular grid */ const double * const lon_bnd, /* [dgr] Longitude boundaries of rectangular grid */ const long col_nbr, /* [nbr] Number of columns in grid */ const int bnd_nbr, /* [nbr] Number of bounds in gridcell */ double * const area) /* [sr] Gridcell area */ { /* Purpose: Compute area of spherical polygon */ /* Computing triangular area accurately is hard in corner cases Spherical triangle suffer from at least as many issues as planar, which are described by "Miscalculating Area and Angles of a Needle-like Triangle" by W. Kahan, UC Berkeley In particular, the Law of Cosines and Heron's formula can be ill-conditioned For spherical triangles L'Huilier's Theorem is superior to Girard's Formula: http://mathworld.wolfram.com/LHuiliersTheorem.html Girard's formula depends on pi-minus-angle and angle is usually quite small in our applications so precision would be lost L'Huilier's theorem depends only on angles (a,b,c) and semi-perimeter (s) and is well-conditioned for small angles semi-perimeter = half-perimeter of triangle = 0.5*(a+b+c) Spherical Excess (SE) difference between the sum of the angles of a spherical triangle area and a planar triangle area with same interior angles (that sum to pi) SE is also the solid angle subtended by the spherical triangle and that's, well, astonishing and pretty cool Wikipedia shows a better SE formula for triangles that are ill-conditioned for L'Huilier's formula because a = b ~ 0.5c https://en.wikipedia.org/wiki/Spherical_trigonometry#Area_and_spherical_excess See also interesting discussion of L'Huilier by Charles Karney who suggests his own alternative: http://osgeo-org.1560.x6.nabble.com/Area-of-a-spherical-polygon-td3841625.html The discussion mentions Mil94 Robert D. Miller, Computing the area of a spherical polygon, Graphic Gems IV, chapter II.4, pages 132-137. http://books.google.com/books?id=CCqzMm_-WucC&pg=PA132&lpg=PA132&dq=miller+area+spherical+polygon+gems&source=bl&ots=mrnvZ6NJcm&sig=CMg8eaD8dzP5snMaPeCQzgoFWUk&hl=sv&ei=4G-YTKv5GsWZOI-mmZQP&sa=X&oi=book_result&ct=result&resnum=1&ved=0CBQQ6AEwAA#v=onepage&q&f=false Mil94 contains similar ideas to my method for spherical polygons (decomposing into adjacent multiple triangles from single vertex) However, his method places single vertex at pole, then adds signed areas to obtain full polygon area His method may suffer from degraded precision because of roundoff error and long side-lengths So-called "proper" spherical triangle are those for which all angles are less than pi, so a+b+c<3*pi Cartesian coordinates of (lat,lon)=(theta,phi) are (x,y,z)=(cos(theta)*cos(phi),cos(theta)*sin(phi),sin(theta)) Dot-product rule for vectors gives interior angle/arc length between two points: cos(a)=u dot v=cos(theta1)*cos(phi1)*cos(theta2)*cos(phi2)+cos(theta1)*sin(phi1)*cos(theta2)*sin(phi2)+sin(theta1)*sin(theta2) Spherical law of cosines relates interior angles/arc-lengths (a,b,c) to surface angles (A,B,C) in spherical triangle: https://en.wikipedia.org/wiki/Spherical_law_of_cosines cos(a)=cos(b)*cos(c)+sin(b)*sin(c)*cos(A) cos(b)=cos(c)*cos(a)+sin(c)*sin(a)*cos(B) cos(c)=cos(a)*cos(b)+sin(a)*sin(b)*cos(C) cos(A)=[cos(a)-cos(b)*cos(c)]/[sin(b)*sin(c)] cos(B)=[cos(b)-cos(c)*cos(a)]/[sin(c)*sin(a)] cos(C)=[cos(c)-cos(a)*cos(b)]/[sin(a)*sin(b)] Bounds information on unstructured grids will use bounds_nbr=maximum(vertice_nbr) Unused vertices are stored as either repeated points (ACME does this) or, conceiveably, as missing values Given (lat,lon) for N-points algorithm to find area of spherical polygon is: 1. Any decomposition, Girard areas: Loses precision due to mismatch between pi and small spherical excesses A. Find interior angles/arc-lengths (a,b,c,d...) using spherical law of cosines along each edge B. Apply generalized Girard formula SE_n = Sum(A_n) - (N-2) - pi 2. CSZ decomposition (N-2 triangles) with L'Huilier areas, Convert polygon into triangles by cycling spoke through all sides from common apex This method requires computation of N-2 (not N) triangles, though fewer sides due to optimization It works on all convex polygons (interior angles less than 180) but not, in general, concave polygons Whether it works or not on concave polygons depends upon their exact shape and the choice of apex point A. First three non-identical points form first triangle with sides A,B,C (first+second point define A, etc.) i. First vertice anchors all triangles ii. Third vertice of preceding triangle becomes second vertice of next triangle iii. Next non-identical point becomes last vertice of next triangle iv. Side C of previous triangle is side A of next triangle B. For each triangle, compute area with L'Huilier formula unless A = B ~ 0.5*C then use SAS formula 3. centroidal decomposition, N triangle version by Taylor, L'Huilier areas: Compute polygon centroid and treat this as hub from which spokes are drawn to all vertices This method requires computation of N triangles, though fewer sides due to optimization Moreover, it works on all convex polygons and on slightly concave polygons Centroid/hub has clear view of interior of most simple concave polygons 4. Any decomposition but with exact RLL grids by Zender and Agress 20160918 A. Decompose polygon into triangles via any method (e.g., method 2 or 3 above) B. Determine whether triangle is spherical or contains RLL (constant latitude) C. Spherical triangles use L'Huilier, RLL triangles use series expansion */ const char fnc_nm[]="nco_sph_plg_area()"; const double dgr2rdn=M_PI/180.0; int bnd_nbr_ttl; /* [nbr] Number of bounds in gridcell accounting for possibility of centroid information */ long idx; /* [idx] Counting index for unrolled grids */ short int bnd_idx; /* Shift to this method once we pass rgr into nco_sph_plg_area() */ nco_bool flg_mth_csz=False; /* [flg] Use CSZ's advancing polygon bisector method */ nco_bool flg_mth_ctr=False; /* [flg] Use centroid method to compute polygon area */ nco_edg_typ_enm edg_typ; /* [enm] Arc-type for triangle edges */ nco_ply_tri_mth_typ_enm ply_tri_mth; /* [enm] Polygon decomposition method */ if(rgr->edg_typ == nco_edg_nil) rgr->edg_typ=nco_edg_gtc; edg_typ=rgr->edg_typ; /* [enm] Arc-type for triangle edges */ ply_tri_mth=rgr->ply_tri_mth; /* [enm] Polygon decomposition method */ if(ply_tri_mth == nco_ply_tri_mth_csz) flg_mth_csz=True; if(ply_tri_mth == nco_ply_tri_mth_ctr) flg_mth_ctr=True; assert(flg_mth_ctr != flg_mth_csz); bnd_nbr_ttl=bnd_nbr; // Allocate space for one extra boundary to store centroid information if necessary if(flg_mth_ctr) bnd_nbr_ttl=bnd_nbr+1; double *lat_bnd_rdn=NULL_CEWI; /* [rdn] Latitude boundaries of rectangular destination grid */ double *lon_bnd_rdn=NULL_CEWI; /* [rdn] Longitude boundaries of rectangular destination grid */ double *lat_bnd_sin=NULL_CEWI; /* [frc] Sine of latitude boundaries of rectangular destination grid */ double *lon_bnd_sin=NULL_CEWI; /* [frc] Sine of longitude boundaries of rectangular destination grid */ double *lat_bnd_cos=NULL_CEWI; /* [frc] Cosine of latitude boundaries of rectangular destination grid */ double *lon_bnd_cos=NULL_CEWI; /* [frc] Cosine of longitude boundaries of rectangular destination grid */ /* Allocate one extra space for some arrays to store polygon centroid values for each column for ply_tri_mth=ctr */ lon_bnd_rdn=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double)); lat_bnd_rdn=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double)); lon_bnd_cos=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double)); lat_bnd_cos=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double)); lon_bnd_sin=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double)); lat_bnd_sin=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double)); memcpy(lat_bnd_rdn,lat_bnd,col_nbr*bnd_nbr*sizeof(double)); memcpy(lon_bnd_rdn,lon_bnd,col_nbr*bnd_nbr*sizeof(double)); for(idx=0;idx<col_nbr*bnd_nbr;idx++){ lon_bnd_rdn[idx]*=dgr2rdn; lat_bnd_rdn[idx]*=dgr2rdn; lon_bnd_cos[idx]=cos(lon_bnd_rdn[idx]); lat_bnd_cos[idx]=cos(lat_bnd_rdn[idx]); lon_bnd_sin[idx]=sin(lon_bnd_rdn[idx]); lat_bnd_sin[idx]=sin(lat_bnd_rdn[idx]); } /* !idx */ double area_smc_crc; /* [sr] Small-circle correction to spherical triangle area */ double area_smc; /* [sr] Gridcell area allowing for latitude-triangles */ double area_ttl; /* [sr] Total area of input polygon list assuming spherical triangles */ double area_smc_ttl; /* [sr] Total area of input polygon list allowing for latitude-triangles */ double area_smc_crc_ttl; /* [sr] Latitude-triangle correction (should be small) to total area of input polygon list */ double area_smc_crc_abs_ttl; /* [sr] Latitude-triangle absolute correction (no compensation of positive/negative contributions, should be no smaller than above) to total area of input polygon list */ double lat_ctr; /* [dgr] Latitude of polygon centroid */ double lon_ctr; /* [dgr] Longitude of polygon centroid */ double lat_ctr_rdn; /* [rdn] Latitude of polygon centroid */ double lon_ctr_rdn; /* [rdn] Longitude of polygon centroid */ double lat_ctr_cos; /* [frc] Cosine latitude of polygon centroid */ double lat_dlt; /* [rdn] Latitudinal difference */ double lon_dlt; /* [rdn] Longitudinal difference */ double ngl_a; /* [rdn] Interior angle/great circle arc a */ double ngl_b; /* [rdn] Interior angle/great circle arc b */ double ngl_c; /* [rdn] Interior angle/great circle arc c */ double ngl_ltr_a; /* [rdn] Interior angle/small circle arc a, canonical latitude-triangle geometry */ double ngl_ltr_b; /* [rdn] Interior angle/great circle arc b, canonical latitude-triangle geometry */ double ngl_ltr_c; /* [rdn] Interior angle/great circle arc c, canonical latitude-triangle geometry */ double prm_smi; /* [rdn] Semi-perimeter of triangle */ double sin_hlf_tht; /* [frc] Sine of half angle/great circle arc theta connecting two points */ double xcs_sph; /* [sr] Spherical excess */ int tri_nbr; /* [nbr] Number of triangles in polygon */ long bnd_vld_nbr=NC_MIN_INT; /* [idx] Number of valid (non-duplicative) vertices in each triangle */ long *a_idx; /* [idx] Point A 1-D indices for each triangle in polygon */ long *b_idx; /* [idx] Point B 1-D indices for each triangle in polygon */ long *c_idx; /* [idx] Point C 1-D indices for each triangle in polygon */ long *vrt_vld=NULL; /* [idx] Absolute 1-D indices of valid vertices */ long idx_a; /* [idx] Point A 1-D index */ long idx_b; /* [idx] Point B 1-D index */ long idx_c; /* [idx] Point C 1-D index */ nco_bool flg_sas_ndl=False; /* [flg] L'Huilier's formula will fail due to needle where one side exceeds semi-perimeter */ nco_bool flg_sas_isc=False; /* [flg] L'Huilier's formula is ill-conditioned due to flat, near-isoceles triangle */ nco_bool flg_sas_a=False; /* [flg] Use SAS triangle formula with central angle a */ nco_bool flg_sas_b=False; /* [flg] Use SAS triangle formula with central angle b */ nco_bool flg_sas_c=False; /* [flg] Use SAS triangle formula with central angle c */ nco_bool flg_ply_has_smc; /* [flg] Any triangle in polygon has small-circle edge */ nco_bool flg_tri_crr_smc; /* [flg] Current triangle has small_circle edge */ /* Initialize global accumulators */ area_ttl=0.0; area_smc_ttl=0.0; area_smc_crc_ttl=0.0; area_smc_crc_abs_ttl=0.0; for(long col_idx=0;col_idx<col_nbr;col_idx++){ /* Initialize local properties and accumulators for this cell/polygon */ flg_ply_has_smc=False; ngl_c=double_CEWI; /* Otherwise compiler unsure ngl_c is initialized first use */ area[col_idx]=0.0; area_smc=0.0; tri_nbr=0; if(col_idx == 0){ a_idx=(long *)nco_calloc(bnd_nbr,sizeof(long)); b_idx=(long *)nco_calloc(bnd_nbr,sizeof(long)); c_idx=(long *)nco_calloc(bnd_nbr,sizeof(long)); vrt_vld=(long *)nco_calloc(bnd_nbr,sizeof(long)); } /* !col_idx */ /* Safety re-initialization to ease debugging, not strictly necessary */ for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++){ vrt_vld[bnd_idx]=NC_MIN_INT; a_idx[bnd_idx]=NC_MIN_INT; b_idx[bnd_idx]=NC_MIN_INT; c_idx[bnd_idx]=NC_MIN_INT; } /* !bnd_idx */ if(flg_mth_ctr){ double lon_dff; /* [dgr] Longitude difference */ long bnd_srt_idx; /* [idx] Absolute starting index of vertices in polygon */ long bnd_idx; /* [idx] Offset of current valid vertex index from starting index */ long bnd_vld_idx; /* [idx] Absolute index of last valid vertex */ /* First vertice is always valid */ bnd_srt_idx=bnd_nbr*col_idx; bnd_vld_idx=bnd_srt_idx; vrt_vld[0]=bnd_vld_idx; lat_ctr=lat_bnd[bnd_srt_idx]; lon_ctr=lon_bnd[bnd_srt_idx]; bnd_vld_nbr=1; /* First guess for next valid index */ bnd_idx=1; /* bnd_idx labels offset from first vertex of next valid (i.e., non-duplicative) vertex */ while(bnd_idx<bnd_nbr){ /* Skip repeated points that must occur when polygon has fewer than allowed vertices */ while(lon_bnd[bnd_vld_idx] == lon_bnd[bnd_srt_idx+bnd_idx] && lat_bnd[bnd_srt_idx] == lat_bnd[bnd_srt_idx+bnd_idx]){ /* Next valid vertice must not duplicate first vertex */ bnd_idx++; /* Have we already found all valid vertices? */ if(bnd_idx == bnd_nbr) break; } /* !while */ /* Jump to normalization when all valid vertices found */ if(bnd_idx == bnd_nbr) break; /* Current vertex is valid (non-duplicative) */ bnd_vld_idx=bnd_srt_idx+bnd_idx; vrt_vld[bnd_vld_nbr]=bnd_vld_idx; bnd_vld_nbr++; if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG %s reports centroidal decomposition col_idx=%lu, bnd_nbr=%d, bnd_idx=%ld, bnd_vld_idx=%ld, bnd_vld_nbr=%ld\n",nco_prg_nm_get(),fnc_nm,col_idx,bnd_nbr,bnd_idx,bnd_vld_idx,bnd_vld_nbr); assert(bnd_vld_nbr <= bnd_nbr); lat_ctr+=lat_bnd[bnd_vld_idx]; lon_ctr+=lon_bnd[bnd_vld_idx]; lon_dff=lon_bnd[bnd_vld_idx]-lon_bnd[0]; if(lon_dff >= 180.0){ lon_ctr-=360.0; }else if(lon_dff <= -180.0){ lon_ctr+=360.0; } /* !lon_dff */ /* Search for next valid vertice in next iteration */ bnd_idx++; } /* !bnd_idx */ /* Compute centroid */ lat_ctr/=bnd_vld_nbr; lon_ctr/=bnd_vld_nbr; /* Centroid can become point A of bnd_nbr polygons or optimize algorithm: 1. Skip sub-dividing polygon into centroid-based triangles for bnd_vld_nbr == 3 2. Split quadrilaterals into two (non-centroid) triangles for bnd_vld_nbr == 4 3. Use full centroid-based triangle algorithm for bnd_vld_nbr >= 5 */ lat_ctr_rdn=lat_ctr*dgr2rdn; lon_ctr_rdn=lon_ctr*dgr2rdn; lat_ctr_cos=cos(lat_ctr_rdn); /* Place centroid values in extended arrays for easy access */ lat_bnd_rdn[(col_idx+1)*bnd_nbr_ttl-1L]=lat_ctr_rdn; lon_bnd_rdn[(col_idx+1)*bnd_nbr_ttl-1L]=lon_ctr_rdn; lat_bnd_cos[(col_idx+1)*bnd_nbr_ttl-1L]=lat_ctr_cos; /* Polygon centroid and valid vertices are now known */ assert(bnd_vld_nbr > 2); if(bnd_vld_nbr == 3){ /* Three vertices only means polygon is already decomposed into a triangle */ tri_nbr=1; a_idx[0]=vrt_vld[0]; b_idx[0]=vrt_vld[1]; c_idx[0]=vrt_vld[2]; }else if(bnd_vld_nbr == 4){ /* Bisect quadrilateral into two triangles rather than use centroid and have four triantles */ tri_nbr=2; a_idx[0]=vrt_vld[0]; b_idx[0]=vrt_vld[1]; c_idx[0]=vrt_vld[2]; a_idx[1]=vrt_vld[0]; /* NB: Order is important. This way side C of triangle[0] = side A of trangle[1] */ b_idx[1]=vrt_vld[2]; c_idx[1]=vrt_vld[3]; }else if(bnd_vld_nbr >= 5){ /* Centroid method has as many triangles as valid vertices */ tri_nbr=bnd_vld_nbr; for(int tri_idx=0;tri_idx<tri_nbr;tri_idx++){ a_idx[tri_idx]=(col_idx+1)*bnd_nbr_ttl-1L; /* A is always centroid, store values at end of arrays */ b_idx[tri_idx]=vrt_vld[tri_idx]; c_idx[tri_idx]=vrt_vld[(tri_idx+1)%tri_nbr]; } /* !tri_idx */ } /* !bnd_vld_nbr */ } /* !flg_mth_ctr */ if(flg_mth_csz){ /* A is always first vertice of all triangles */ idx_a=bnd_nbr*col_idx; /* Start search for B at next vertice */ bnd_idx=1; /* bnd_idx labels offset from point A of potential location of triangle points B and C We know that bnd_idx(A) == 0, bnd_idx(B) < bnd_nbr-1, bnd_idx(C) < bnd_nbr */ while(bnd_idx<bnd_nbr-1){ /* Only first triangle must search for B, subsequent triangles recycle previous C as current B */ if(tri_nbr == 0){ /* Skip repeated points that must occur when polygon has fewer than allowed vertices */ /* 20200115: Prior to today we never skipped polar points (same latitudes but different longitudes) That worked fine in practice for spherical triangles partly because triangles from CSZ decomposition (aka hub-and-spoke decomposition) are additive, even with multiple points on the same great circle, and partly due to luck (a starting vertex surrounded by points on the same geodesic would break it). Moreover, repeated polar points pose no issues for L'Huilier's (or Girard's) method which depends only on the interior angles and side lengths, not the longitudes of polar points. Small circles change that last part, and we must now eliminate repeated polar points. */ if(edg_typ == nco_edg_smc){ /* Skip repeated numerically identical points */ while(lon_bnd[idx_a] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_a] == lat_bnd[idx_a+bnd_idx]){ /* Next vertice may not duplicate A */ bnd_idx++; /* If there is no room for C then all triangles found */ if(bnd_idx == bnd_nbr-1) break; } /* !while */ /* Skip geometrically identical (i.e., repeated polar) points */ while((fabs(lat_bnd[idx_a]) == 90.0) && (fabs(lat_bnd[idx_a+bnd_idx]) == 90.0)){ bnd_idx++; if(bnd_idx == bnd_nbr-1) break; } /* !while */ }else if(edg_typ != nco_edg_smc){ /* Spherical polygongs can use simpler, pre-20200116 algorithm to eliminate repeated points */ while(lon_bnd[idx_a] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_a] == lat_bnd[idx_a+bnd_idx]){ /* Next vertice may not duplicate A */ bnd_idx++; /* If there is no room for C then all triangles found */ if(bnd_idx == bnd_nbr-1) break; } /* !while */ }else{ abort(); } /* !edg_typ */ /* Jump to next column when all triangles found */ if(bnd_idx == bnd_nbr-1) break; } /* !tri_nbr */ idx_b=idx_a+bnd_idx; /* Search for C at next vertice */ bnd_idx++; /* fxm */ while(lon_bnd[idx_b] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_b] == lat_bnd[idx_a+bnd_idx]){ /* Next vertice may not duplicate B */ bnd_idx++; /* If there is no room for C then all triangles found */ if(bnd_idx == bnd_nbr) break; } /* !while */ /* Jump to next column when all triangles found */ if(bnd_idx == bnd_nbr) break; idx_c=idx_a+bnd_idx; /* Valid triangle, vertices are known and labeled */ a_idx[tri_nbr]=idx_a; b_idx[tri_nbr]=idx_b; c_idx[tri_nbr]=idx_c; tri_nbr++; /* Begin search for next B at current C */ bnd_idx=idx_c-idx_a; } /* !bnd_idx */ } /* !flg_mth_csz */ /* Triangles are known for requested decomposition method Compute and accumulate their area Optimized algorithm recycles previous arc c as current arc a (after first triangle) */ for(int tri_idx=0;tri_idx<tri_nbr;tri_idx++){ idx_a=a_idx[tri_idx]; idx_b=b_idx[tri_idx]; idx_c=c_idx[tri_idx]; if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG %s reports triangle vertices: col_idx=%lu, tri_idx=%d, idx_a=%ld, idx_b=%ld, idx_c=%ld\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,idx_a,idx_b,idx_c); /* Compute interior angle/great circle arc a for first triangle; subsequent triangles recycle previous arc c */ if(tri_idx == 0){ /* 20150831: Test by computing ncol=0 area in conus chevrons grid, compare to MAT results ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/257x512_SCRIP.20150901.nc -m ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.20150901.nc ncremap -s ${DATA}/grids/257x512_SCRIP.20150901.nc -g ${DATA}/grids/conusx4v1np4_chevrons_scrip_c150815.nc -m ${DATA}/maps/map_fv257x512_to_conusx4v1np4_chevrons_bilin.20150901.nc ncks -O -D 5 -v FSNT --map ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.150418.nc ${DATA}/ne30/raw/famipc5_ne30_v0.3_00003.cam.h0.1979-01.nc ${DATA}/ne30/rgr/fv_FSNT.nc ncks -O -D 5 -v FSNT --rgr diagnose_area --map ${DATA}/maps/map_fv257x512_to_conusx4v1np4_chevrons_bilin.20150901.nc ${DATA}/ne30/rgr/fv_FSNT.nc ${DATA}/ne30/rgr/dogfood.nc ncks -O -D 1 --rgr infer#diagnose_area --rgr grid=${HOME}/grd.nc ${DATA}/ne30/rgr/dogfood.nc ~/foo.nc ncks -H -s %20.15e, -v area -d ncol,0 ${DATA}/ne30/rgr/dogfood.nc ncks -H -s %20.15e, -v grid_area -d grid_size,0 ${DATA}/grids/conusx4v1np4_chevrons_scrip_c150815.nc ncks -H -s %20.15e, -v grid_area -d grid_size,0 ${HOME}/grd.nc ncol=0 on conus chevrons file: 3.653857995295246e-05 raw GLL weight 3.653857995294305e-05 ESMF weight (area_b from map-file) 3.653857995294302e-05 matlab CSZ decomposition (N-2 triangles) computed at SNL by MAT 3.653857995294301e-05 matlab centroidal decomposition (N triangles) computed at SNL by MAT 3.653857995294258e-05 NCO CSZ _and_ centroidal decompositions (new haversine) 3.653857995289623e-05 NCO CSZ decomposition (old acos) 20191011: Tested this same polygon in ESMF and NCO weight-generator NCO maps begin with first destination gridcell, find next ESMF gridcell by searching for first col: ncks --trd -C -v col ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc | egrep "=1 " ncks -H --trd -s %20.15e -C -d n_b,0 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc 3.653857995294305e-05 ncks -H --trd -s '%20.15e, ' -C -d n_b,0 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc 3.653857995295246e-05 ESMF and NCO weight-generators produce nearly identical S results to double-precision: ncks -H --trd -s '%20.15e, ' -C -d n_s,0,1 -v S ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc 2.181999640069480e-03, 1.309571213636605e-02 ncks -H --trd -s %20.15e -C -d n_s,207436 -d n_s,209617 -v S ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc 2.181999640069454e-03, 1.309571213636510e-02 Compare first five polygon areas: ncks --trd -H -C -s '%20.15e, ' -d n_b,0,4 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc ncks --trd -H -C -s '%20.15e, ' -d n_b,0,4 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc 3.653857995294305e-05, 1.250459284052488e-04, 1.448204605591709e-04, 8.223598867312266e-05, 8.585831933875070e-05, # aave 3.653857995294258e-05, 1.250459284052470e-04, 1.448204605591675e-04, 8.223598867312247e-05, 8.585831933875186e-05, Compare total areas: ncwa -O -y ttl -v area.? ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc ~/foo_aave.nc ncwa -O -y ttl -v area.? ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc ~/foo_nco.nc ncks --trd -H -C -s '%20.15e, ' -v area.? ~/foo_aave.nc ncks --trd -H -C -s '%20.15e, ' -v area.? ~/foo_nco.nc aave: 1.256637061435867e+01, 1.256637061435973e+01 nco: 1.256637061435857e+01, 1.256637061435955e+01 4*pi: 1.25663706143591729538e+01 Does (tru_glb_ttl/NCO_glb_ttl)*NCO_lcl = ESMF_lcl ? (1.25663706143591729538/1.256637061435857)*3.653857995294258=3.6538579952944333 No, normalization alone does not explain differences between ESMF and NCO It does not appear that ESMF does a global normalization of areas/weights */ /* Computing great circle arcs over small arcs requires care since central angle is near 0 degrees Cosine small angles changes slowly for such angles, and leads to precision loss Use haversine formula instead of spherical law of cosines formula https://en.wikipedia.org/wiki/Great-circle_distance */ /* Interior angle/great circle arc a, spherical law of cosines formula (loses precision): cos_a=lat_bnd_cos[idx_a]*lon_bnd_cos[idx_a]*lat_bnd_cos[idx_b]*lon_bnd_cos[idx_b]+ lat_bnd_cos[idx_a]*lon_bnd_sin[idx_a]*lat_bnd_cos[idx_b]*lon_bnd_sin[idx_b]+ lat_bnd_sin[idx_a]*lat_bnd_sin[idx_b];ngl_a=acos(cos_a); */ /* Interior angle/great circle arc a, haversine formula: */ // 20160918: Use branch cut rules for longitude lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_a],lon_bnd_rdn[idx_b])); lat_dlt=fabs(lat_bnd_rdn[idx_a]-lat_bnd_rdn[idx_b]); sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_a]*lat_bnd_cos[idx_b]*pow(sin(0.5*lon_dlt),2)); ngl_a=2.0*asin(sin_hlf_tht); }else{ /* !tri_idx == 0 */ ngl_a=ngl_c; } /* !tri_idx == 0 */ /* Interior angle/great circle arc b */ lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_b],lon_bnd_rdn[idx_c])); lat_dlt=fabs(lat_bnd_rdn[idx_b]-lat_bnd_rdn[idx_c]); sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_b]*lat_bnd_cos[idx_c]*pow(sin(0.5*lon_dlt),2)); ngl_b=2.0*asin(sin_hlf_tht); /* Interior angle/great circle arc c */ lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_c],lon_bnd_rdn[idx_a])); lat_dlt=fabs(lat_bnd_rdn[idx_c]-lat_bnd_rdn[idx_a]); sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_c]*lat_bnd_cos[idx_a]*pow(sin(0.5*lon_dlt),2)); ngl_c=2.0*asin(sin_hlf_tht); /* Semi-perimeter */ prm_smi=0.5*(ngl_a+ngl_b+ngl_c); /* L'Huilier's formula results in NaN if any side exceeds semi-perimeter This can occur in needle-shaped triangles due to rounding errors in derived arc lengths a, b, c 20200203: Problematic needles occurs a few dozen times in ne120pg2 -> cmip6 maps Problematic isoceles triangles are much rarer than problematic needles Therefore look for needle-issues first, then, if none found, look for isoceles issues Wikipedia recommends treating ill-conditioned triangles by Side-Angle-Side (SAS) formula https://en.wikipedia.org/wiki/Spherical_trigonometry Diagnose needles beforehand and call SAS routines as above to avoid NaN in L'Huilier Label problematic needle triangles by shortest side, e.g., "flg_sas_a" means (b ~ c) and a ~ 0.0 */ flg_sas_ndl=flg_sas_isc=flg_sas_a=flg_sas_b=flg_sas_c=False; if(ngl_a > prm_smi){if(ngl_b > ngl_c) flg_sas_c=True; else flg_sas_b=True;} /* a exceeds semi-perimeter */ else if(ngl_b > prm_smi){if(ngl_c > ngl_a) flg_sas_a=True; else flg_sas_c=True;} /* b exceeds semi-perimeter */ else if(ngl_c > prm_smi){if(ngl_a > ngl_b) flg_sas_b=True; else flg_sas_a=True;} /* c exceeds semi-perimeter */ if(flg_sas_a || flg_sas_b || flg_sas_c) flg_sas_ndl=True; if(!flg_sas_ndl){ /* L'Huilier's formula becomes ill-conditioned when two sides are one half the third side This occurs for flat, isoceles-shaped triangles Label problematic isoceles triangles by longest side, e.g., "flg_sas_a" means (b ~ c) ~ 0.5*a */ /* Sensitivity tests on ~20191014 showed that triangular ill-conditioning treatment (i.e., switching to SAS method) does not improve (and may degrade) accuracy for eps_ill_cnd > 1.0e-15 */ const double eps_ill_cnd=1.0e-15; /* [frc] Ill-conditioned tolerance for interior angle/great circle arcs in triangle */ const double eps_ill_cnd_dbl=2.0*eps_ill_cnd; /* [frc] Ill-conditioned tolerance for interior angle/great circle arcs in triangle */ if((fabs(ngl_a-ngl_b) < eps_ill_cnd) && (fabs(ngl_a-0.5*ngl_c) < eps_ill_cnd_dbl)) flg_sas_c=True; /* c is twice a and b */ else if((fabs(ngl_b-ngl_c) < eps_ill_cnd) && (fabs(ngl_b-0.5*ngl_a) < eps_ill_cnd_dbl)) flg_sas_a=True; /* a is twice b and c */ else if((fabs(ngl_c-ngl_a) < eps_ill_cnd) && (fabs(ngl_c-0.5*ngl_b) < eps_ill_cnd_dbl)) flg_sas_b=True; /* b is twice c and a */ if(flg_sas_a || flg_sas_b || flg_sas_c) flg_sas_isc=True; } /* !flg_sas_ndl */ if(flg_sas_isc || flg_sas_ndl){ /* Compute area using SAS formula */ double cos_hlf_C; /* [frc] Cosine of half of canoncal surface angle C */ //double sin_hlf_C; /* [frc] Sine of half of canoncal surface angle C */ double ngl_sfc_ltr_C; /* [rdn] Canonical surface angle/great circle arc C */ double tan_hlf_a_tan_hlf_b; /* [frc] Product of tangents of one-half of nearly equal canoncal sides */ double xcs_sph_hlf_tan; /* [frc] Tangent of one-half the spherical excess */ /* Transform sides into canonical order for formula where C is surface angle between arcs a and b */ if(flg_sas_c){ ngl_ltr_a=ngl_a; ngl_ltr_b=ngl_b; ngl_ltr_c=ngl_c; } /* !flg_sas_c */ if(flg_sas_a){ ngl_ltr_a=ngl_b; ngl_ltr_b=ngl_c; ngl_ltr_c=ngl_a; } /* !flg_sas_a */ if(flg_sas_b){ ngl_ltr_a=ngl_c; ngl_ltr_b=ngl_a; ngl_ltr_c=ngl_b; } /* !flg_sas_b */ if(flg_sas_ndl && (nco_dbg_lvl_get() >= nco_dbg_scl)) (void)fprintf(stdout,"%s: INFO %s reports col_idx = %li triangle %d is needle-shaped triangle with a side that exceeds semi-perimeter = %0.16e. Eschew L'Huilier's formula for spherical excess to avoid NaN. Could use SAS formula with canonical central interior arc c = %0.16e.\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,prm_smi,ngl_ltr_c); if(flg_sas_isc && (nco_dbg_lvl_get() >= nco_dbg_scl)) (void)fprintf(stdout,"%s: INFO %s reports col_idx = %li triangle %d is nearly flat isoceles-shaped triangle. Canonical arcs a and b differ by %0.16e. Eschew L'Huilier's formula for spherical excess to avoid low precision. Could use SAS formula.\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,fabs(ngl_ltr_a-ngl_ltr_b)); /* Determine canonical surface angle C To find any angle given three spherical triangle sides, Wikipedia opines: "The cosine rule may be used to give the angles A, B, and C but, to avoid ambiguities, the half-angle formulae are preferred." Half-angle formulae include two applicable variants that yield the sine or cosine of half C Then C is determined as twice the asin() or acos() function, respectively For needle-shaped triangles, RHS sin formula is ~ sin^2(0)/sin(a)*sin(b) ~ 0.0 For needle-shaped triangles, RHS cos formula is ~ sin^2(s)/sin(a)*sin(b) ~ 0.5 For flat isoceles triangles, RHS sin formula is ~ sin^2(0)/sin(a)*sin(b) ~ 0.0 For flat isoceles triangles, RHS cos formula is ~ sin(s)*sin(0)/sin(a)*sin(b) ~ 0.0 Use sin formula since both needle- and isoceles-shaped triangles have RHS ~ 0.0 where arcsin() is most precise 20200203: Half-angle sine formula gives NaNs, and half-angle cosine formula works on ne120pg2->cmip. Why? Adopting cosine formula because it works */ //sin_hlf_C=sqrt(sin(prm_smi-ngl_ltr_a)*sin(prm_smi-ngl_ltr_b)/(sin(ngl_ltr_a)*sin(ngl_ltr_b))); // Half-angle sine formula cos_hlf_C=sqrt(sin(prm_smi)*sin(prm_smi-ngl_ltr_c)/(sin(ngl_ltr_a)*sin(ngl_ltr_b))); // Half-angle cosine formula //ngl_sfc_ltr_C=2.0*asin(sin_hlf_C); ngl_sfc_ltr_C=2.0*acos(cos_hlf_C); /* SAS formula */ tan_hlf_a_tan_hlf_b=tan(0.5*ngl_ltr_a)*tan(0.5*ngl_ltr_b); xcs_sph_hlf_tan=tan_hlf_a_tan_hlf_b*sin(ngl_sfc_ltr_C)/(1.0+tan_hlf_a_tan_hlf_b*cos(ngl_sfc_ltr_C)); assert(fabs(xcs_sph_hlf_tan) != M_PI_2); xcs_sph=2.0*atan(xcs_sph_hlf_tan); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO SAS area formula for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e). Spherical excess = %0.16e.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c,xcs_sph); // Single-line version // xcs_sph=2.0*atan(tan(0.5*ngl_ltr_a)*tan(0.5*ngl_ltr_b)*sin(2.0*acos(sqrt(sin(prm_smi)*sin(prm_smi-ngl_c)/(sin(ngl_a)*sin(ngl_b)))))/(1.0+tan_hlf_a_tan_hlf_b*cos(2.0*acos(sqrt(sin(prm_smi)*sin(prm_smi-ngl_c)/(sin(ngl_a)*sin(ngl_b))))))); /* Above procedure for problematic needle-shaped and isoceles-shaped triangles degrades statistics For ne30pg2, ne120pg2 -> cmip, setting area = 0.0 _greatly_ improves area statistics (Why?) Set spherical excess to zero for problematic needle-shaped and isoceles-shaped triangles */ /* fxm: Make zeroing skinny needles/isoceles-shaped triangle-areas a command-line option? */ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO Setting SAS area = 0.0\n",nco_prg_nm_get()); xcs_sph=0.0; /* !flg_sas */ }else{ double xcs_sph_qtr_tan; /* [frc] Tangent of one-quarter the spherical excess */ xcs_sph_qtr_tan=sqrt(tan(0.5*prm_smi)*tan(0.5*(prm_smi-ngl_a))*tan(0.5*(prm_smi-ngl_b))*tan(0.5*(prm_smi-ngl_c))); assert(fabs(xcs_sph_qtr_tan) != M_PI_2); xcs_sph=4.0*atan(xcs_sph_qtr_tan); /* 20191014: Aggregate all previous area-related commands into one, gigantic, unreadable, possibly more precise command (tested and it is more obfuscated but not more precise) */ // xcs_sph=4.0*atan(sqrt(tan(0.5*0.5*(ngl_a+ngl_b+ngl_c))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_a))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_b))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_c)))); } /* !flg_sas */ if(isnan(xcs_sph)){ const double eps_ngl_skn=1.0e-13; /* [frc] Angles skinnier than this form needles whose area ~ 0.0 */ /* Categorize reason for NaN */ (void)fprintf(stdout,"%s: WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING\nUnxpected NaN polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e).\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); if( /* Side exceeds semi-perimeter */ (ngl_a > prm_smi) || (ngl_b > prm_smi) || (ngl_c > prm_smi) ){ (void)fprintf(stdout,"%s: WARNING Triangle side exceeds semi-perimeter = %0.16e polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),prm_smi,col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); }else if( /* Are angles too skinny? Quite often on ne30pg2, ne120pg2 */ (ngl_a < eps_ngl_skn) || (ngl_b < eps_ngl_skn) || (ngl_c < eps_ngl_skn) ){ (void)fprintf(stdout,"%s: WARNING Triangle has at least one skinny angles < %g [rdn] for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16f, %0.16f, %0.16f). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),eps_ngl_skn,col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); }else if( /* Are two vertices identical to double-precision? Never on ne30pg2, ne120pg2 */ ((lat_bnd[idx_a] == lat_bnd[idx_b]) && (lon_bnd[idx_a] == lon_bnd[idx_b])) || ((lat_bnd[idx_b] == lat_bnd[idx_c]) && (lon_bnd[idx_b] == lon_bnd[idx_c])) || ((lat_bnd[idx_c] == lat_bnd[idx_a]) && (lon_bnd[idx_c] == lon_bnd[idx_a])) ){ (void)fprintf(stdout,"%s: WARNING Triangle has repeated points for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c]); }else{ (void)fprintf(stdout,"%s: WARNING Triangle area formula yields NaN for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16f, %0.16f, %0.16f). Are points co-linear? Assigned triangle area = 0.0.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); } /* !co-linear */ xcs_sph=0.0; } /* !NaN */ area[col_idx]+=xcs_sph; /* Accumulate spherical triangle area into reported polygon area and adjust below */ area_smc+=xcs_sph; /* Accumulate spherical triangle area into small-circle polygon area and adjust below */ area_ttl+=xcs_sph; /* Accumulate spherical triangle area into spherical polygon area */ area_smc_ttl+=xcs_sph; /* Accumulate spherical triangle area into total polygon area and adjust below */ /* 20160918 from here to end of loop is non-spherical work 20170217: Temporarily turn-off latitude circle diagnostics because Sungduk's POP case breaks them Canonical latitude-triangle geometry has point A at apex and points B and C at same latitude ncremap --dbg=1 --alg_typ=nco --grd_src=${DATA}/grids/ne30np4_pentagons.091226.nc --grd_dst=${DATA}/grids/cmip6_180x360_scrip.20181001.nc --map=${DATA}/maps/map_ne30np4_to_cmip6_180x360_nco.20190601.nc ncremap --dbg=1 -R 'edg_typ=smc' --alg_typ=nco --grd_src=${DATA}/grids/ne30np4_pentagons.091226.nc --grd_dst=${DATA}/grids/cmip6_180x360_scrip.20181001.nc --map=${DATA}/maps/map_ne30np4_to_cmip6_180x360_smc.20190601.nc */ flg_tri_crr_smc=False; if(lat_bnd_rdn[idx_a] == lat_bnd_rdn[idx_b] || lat_bnd_rdn[idx_b] == lat_bnd_rdn[idx_c] || lat_bnd_rdn[idx_c] == lat_bnd_rdn[idx_a]){ /* Set flag only if triangle is not degenerate. Degenerate triangles (3 points on a geodesic) have zero area */ if(xcs_sph != 0.0) flg_ply_has_smc=flg_tri_crr_smc=True; if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG Found small circle triangle with vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c]); } /* endif */ if((edg_typ == nco_edg_smc) && flg_tri_crr_smc){ double ngl_plr; /* [rdn] Polar angle (co-latitude) */ long idx_ltr_a; /* [idx] Point A (apex) of canonical latitude-triangle geometry, 1-D index */ long idx_ltr_b; /* [idx] Point B (base) of canonical latitude-triangle geometry, 1-D index */ long idx_ltr_c; /* [idx] Point C (base) of canonical latitude-triangle geometry, 1-D index */ /* Rotate labels to standard position with vertex A, equi-latitude points B and C */ if(lat_bnd_rdn[idx_a] == lat_bnd_rdn[idx_b]){ idx_ltr_a=idx_c; idx_ltr_b=idx_a; idx_ltr_c=idx_b; ngl_ltr_a=ngl_c; ngl_ltr_b=ngl_a; ngl_ltr_c=ngl_b; ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_a]); }else if(lat_bnd_rdn[idx_b] == lat_bnd_rdn[idx_c]){ idx_ltr_a=idx_a; idx_ltr_b=idx_b; idx_ltr_c=idx_c; ngl_ltr_a=ngl_a; ngl_ltr_b=ngl_b; ngl_ltr_c=ngl_c; ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_b]); }else if(lat_bnd_rdn[idx_c] == lat_bnd_rdn[idx_a]){ idx_ltr_a=idx_b; idx_ltr_b=idx_c; idx_ltr_c=idx_a; ngl_ltr_a=ngl_b; ngl_ltr_b=ngl_c; ngl_ltr_c=ngl_a; ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_c]); }else{ (void)fprintf(stdout,"%s: ERROR latitudes not equal in small circle section. Vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c]); abort(); } /* endif */ /* 20160918: Compute exact area of latitude triangle wedge */ double xpn_x; /* [frc] Expansion parameter */ lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_ltr_b],lon_bnd_rdn[idx_ltr_c])); assert(lon_dlt != 0.0); // Latitude triangles must have bases with distinct longitudes if(lon_dlt != M_PI){ /* Normal clause executed for small-circle triangles */ /* Numeric conditioning uncertain. Approaches divide-by-zero when lon_dlt << 1 */ xpn_x=lat_bnd_sin[idx_ltr_b]*(1.0-cos(lon_dlt))/sin(lon_dlt); assert(fabs(xpn_x) != M_PI_2); area_smc_crc=2.0*atan(xpn_x); /* 20170217: Sungduk's POP regrid triggers following abort(): ncremap -D 1 -i ~/pop_g16.nc -d ~/cam_f19.nc -o ~/foo.nc */ //assert(xpn_x >= 0.0); //if(lat_bnd[idx_ltr_b] > 0.0) area_smc_crc+=-lon_dlt*lat_bnd_sin[idx_ltr_b]; else area_smc_crc+=+lon_dlt*lat_bnd_sin[idx_ltr_b]; area_smc_crc+=-lon_dlt*lat_bnd_sin[idx_ltr_b]; }else{ /* 20200228: Latitude triangles may have bases with longitudes that differ by 180 degrees Consider a quadrilateral with four equidistant vertices in longitude, and that caps a pole: CSZ decomposition technique divides this into two triangles each with three co-latitudinal points and no vertex at pole Solution candidates: 1. Divide such quadrilaterals using centroid technique Just realized current implementation of centroid decomposition fails on polar caps Failure occurs because centroid latitude is +/- ~90 not mean of vertices' latitudes Must impute "pseudo-centroid" with latitude +/- 90 instead of averaging vertex latitudes Requires testing each polygon to determine if it contains pole <- Too difficult/expensive 2. Assume latitude triangles whose base is 180 degrees are at pole Compute area exactly using analytic formula for annular lune */ (void)fprintf(stdout,"%s: INFO longitudes differ by pi in small circle section. Vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_ltr_a],lon_bnd[idx_ltr_a],lat_bnd[idx_ltr_b],lon_bnd[idx_ltr_b],lat_bnd[idx_ltr_c],lon_bnd[idx_ltr_c]); (void)fprintf(stdout,"%s: DEBUG col_nbr=%lu, bnd_nbr=%d, col_idx=%ld, area=%g. Vertices [0..bnd_nbr-1] in format idx (lat,lon)\n",nco_prg_nm_get(),col_nbr,bnd_nbr,col_idx,xcs_sph); for(int bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%2d (%g, %g)\n",bnd_idx,lat_bnd[bnd_nbr*col_idx+bnd_idx],lon_bnd[bnd_nbr*col_idx+bnd_idx]); (void)fprintf(stdout,"%s: INFO Assuming this triangle is decomposed from polar cap polygon. Treating area with analytic formula for annular lune\n",nco_prg_nm_get()); /* Compute small circle correction as difference between spherical triangle area and standard annuular lune formula Small circle correction is positive-definite for polar triangles so use fabs(sin(lat_bnd_sin)) */ area_smc_crc=lon_dlt*fabs(lat_bnd_sin[idx_ltr_b])-area_smc; } /* !lon_dlt */ // Adjust diagnostic areas by small-circle area correction area_smc+=area_smc_crc; area_smc_ttl+=area_smc_crc; area_smc_crc_ttl+=area_smc_crc; area_smc_crc_abs_ttl+=fabs(area_smc_crc); // 20200109: Adjust area reported to calling code by small-circle area correction area[col_idx]+=area_smc_crc; if(0){ /* 20160918: Approximate area of latitude triangle wedge. Use truncated power expansion of exact formula. */ double xpn_x_sqr; /* [frc] Expansion parameter squared */ double xpn_sum; /* [frc] Expansion sum */ double xpn_nmr; /* [frc] Expansion term numerator */ double xpn_trm; /* [frc] Expansion term */ double xpn_dnm; /* [frc] Expansion term denominator */ const unsigned short int rdr_xpn=3; /* [nbr] Order of N in trigonometric series expansion */ unsigned short int idx_xpn; /* [idx] Index in series expansion */ xpn_x=cos(ngl_plr)*(1.0-cos(lon_dlt))/sin(lon_dlt); xpn_x_sqr=xpn_x*xpn_x; xpn_nmr=xpn_x; xpn_dnm=1.0; xpn_trm=xpn_nmr/xpn_dnm; xpn_sum+=xpn_trm; for(idx_xpn=3;idx_xpn<=rdr_xpn;idx_xpn+=2){ xpn_nmr*=xpn_x_sqr; xpn_dnm*=(idx_xpn-1)*idx_xpn; xpn_trm=xpn_nmr/xpn_dnm; xpn_sum+=xpn_trm; } /* !idx_xpn */ (void)fprintf(stdout,"%s: Small-circle area using series approximation...not implemented yet\n",nco_prg_nm_get()); } /* !0 */ if(nco_dbg_lvl_get() >= nco_dbg_scl){ (void)fprintf(stdout,"%s: INFO %s col_idx = %li triangle %d spherical area, latitude-triangle area, %% difference: %g, %g, %g%%\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,xcs_sph,xcs_sph+area_smc_crc,100.0*area_smc_crc/xcs_sph); if(fabs(area_smc_crc/xcs_sph) > 0.1){ (void)fprintf(stdout,"%s: DEBUG Non-spherical correction exceeds 10%% for current triangle with vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_ltr_a],lon_bnd[idx_ltr_a],lat_bnd[idx_ltr_b],lon_bnd[idx_ltr_b],lat_bnd[idx_ltr_c],lon_bnd[idx_ltr_c]); } /* !fabs */ } /* !dbg */ } /* !edg_typ && flg_tri_crr_smc */ } /* !tri_idx */ if(edg_typ == nco_edg_smc && flg_ply_has_smc){ /* Current gridcell contained at least one latitude-triangle */ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO %s col_idx = %li spherical area, small circle area, %% difference: %g, %g, %g%%\n",nco_prg_nm_get(),fnc_nm,col_idx,area[col_idx],area_smc,100.0*(area_smc-area[col_idx])/area[col_idx]); } /* !edg_typ && !flg_ply_has_smc */ } /* !col_idx */ if(edg_typ == nco_edg_smc && nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO %s total spherical area, small circle area, %% difference, crc_ttl, crc_abs_ttl: %g, %g, %g%%, %g, %g\n",nco_prg_nm_get(),fnc_nm,area_ttl,area_smc_ttl,100.0*(area_smc_ttl-area_ttl)/area_ttl,area_smc_crc_ttl,area_smc_crc_abs_ttl); if(vrt_vld) vrt_vld=(long *)nco_free(vrt_vld); if(a_idx) a_idx=(long *)nco_free(a_idx); if(b_idx) b_idx=(long *)nco_free(b_idx); if(c_idx) c_idx=(long *)nco_free(c_idx); if(lat_bnd_rdn) lat_bnd_rdn=(double *)nco_free(lat_bnd_rdn); if(lon_bnd_rdn) lon_bnd_rdn=(double *)nco_free(lon_bnd_rdn); if(lat_bnd_cos) lat_bnd_cos=(double *)nco_free(lat_bnd_cos); if(lon_bnd_cos) lon_bnd_cos=(double *)nco_free(lon_bnd_cos); if(lat_bnd_sin) lat_bnd_sin=(double *)nco_free(lat_bnd_sin); if(lon_bnd_sin) lon_bnd_sin=(double *)nco_free(lon_bnd_sin); } /* !nco_sph_plg_area() */ int /* O [enm] Return code */ nco_rgr_tps /* [fnc] Regrid using TempestRemap library */ (rgr_sct * const rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Regrid fields using TempestRemap "library" (more precisely, executables) Routine was originally written to call Tempest executables However, that functionality was all placed into the ncremap shell script Thus this C-interface is currently unused TempestRemap2 has a library that may be accessed on-line Test Tempest library: no way to activate yet export DATA_TEMPEST='/data/zender/rgr';ncks -O --rgr=Y ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc */ const char fnc_nm[]="nco_rgr_tps()"; const int fmt_chr_nbr=6; const char *cmd_rgr_fmt; char *cmd_rgr; char fl_grd_dst[]="/tmp/foo_outRLLMesh.g"; char *fl_grd_dst_cdl; int rcd_sys; int lat_nbr_rqs=180; int lon_nbr_rqs=360; nco_rgr_tps_cmd nco_tps_cmd; /* [enm] TempestRemap command enum */ char *nvr_DATA_TEMPEST; /* [sng] Directory where Tempest grids, meshes, and weights are stored */ nvr_DATA_TEMPEST=getenv("DATA_TEMPEST"); rgr->drc_tps= (nvr_DATA_TEMPEST && strlen(nvr_DATA_TEMPEST) > 0L) ? (char *)strdup(nvr_DATA_TEMPEST) : (char *)strdup("/tmp"); if(nco_dbg_lvl_get() >= nco_dbg_crr){ (void)fprintf(stderr,"%s: INFO %s reports\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stderr,"drc_tps = %s, ",rgr->drc_tps ? rgr->drc_tps : "NULL"); (void)fprintf(stderr,"\n"); } /* endif dbg */ /* Allow for whitespace characters in fl_grd_dst Assume CDL translation results in acceptable name for shell commands */ fl_grd_dst_cdl=nm2sng_fl(fl_grd_dst); /* Construct and execute regridding command */ nco_tps_cmd=nco_rgr_GenerateRLLMesh; cmd_rgr_fmt=nco_tps_cmd_fmt_sng(nco_tps_cmd); cmd_rgr=(char *)nco_malloc((strlen(cmd_rgr_fmt)+strlen(fl_grd_dst_cdl)-fmt_chr_nbr+1UL)*sizeof(char)); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stderr,"%s: %s reports generating %d by %d RLL mesh in %s...\n",nco_prg_nm_get(),fnc_nm,lat_nbr_rqs,lon_nbr_rqs,fl_grd_dst); (void)sprintf(cmd_rgr,cmd_rgr_fmt,lat_nbr_rqs,lon_nbr_rqs,fl_grd_dst_cdl); rcd_sys=system(cmd_rgr); if(rcd_sys == -1){ (void)fprintf(stdout,"%s: ERROR %s unable to complete TempestRemap regridding command \"%s\"\n",nco_prg_nm_get(),fnc_nm,cmd_rgr); nco_exit(EXIT_FAILURE); } /* end if */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"done\n"); /* Clean-up memory */ if(fl_grd_dst_cdl) fl_grd_dst_cdl=(char *)nco_free(fl_grd_dst_cdl); if(cmd_rgr) cmd_rgr=(char *)nco_free(cmd_rgr); return NCO_NOERR; } /* end nco_rgr_tps() */ const char * /* O [sng] String describing two-dimensional grid-type */ nco_grd_2D_sng /* [fnc] Convert two-dimensional grid-type enum to string */ (const nco_grd_2D_typ_enm nco_grd_2D_typ) /* I [enm] Two-dimensional grid-type enum */ { /* Purpose: Convert two-dimensional grid-type enum to string */ switch(nco_grd_2D_typ){ case nco_grd_2D_unk: return "Unknown, unclassified, or unrepresentable 2D grid type (e.g., unstructured, curvilinear, POP displaced-pole)"; case nco_grd_2D_gss: return "Gaussian latitude grid. Used by spectral transform models, e.g., CCM 1-3, CAM 1-3, ECMWF Forecast, LSM, MATCH, NCEP (R1, R2), UCICTM."; case nco_grd_2D_fv: return "Cap-latitude grid, aka FV-scalar grid (in Lin-Rood representation). When global (not regional) in extent and with odd number of latitudes, poles are considered at (and labeled as) centers of first and last gridcells. For example lat_ctr=-90,-89,-88,... and lat_crn=-89.5,-88.5,-87.5,... Thus pole-gridcells span half the equi-angular latitude increment of the rest of the grid. Used by CAM FV (i.e., CAM 4-6), ECMWF (ERA-I, ERA40, ERA5), GEOS-CHEM, UCICTM, UKMO."; case nco_grd_2D_eqa: return "Uniform/Equi-Angular latitude grid. Uniform/Equi-angle (everywhere) latitude grid. When global (not regional) in extent and with even number of latitudes, poles are at corners/edges of first and last gridcells. For example lat_ctr=-89.5,-88.5,-87.5,... and lat_crn=-90,-89,-88,.... When global, forms valid FV-staggered (aka FV-velocity, aka offset) grid (for Lin-Rood representation). Used by CIESIN/SEDAC, IGBP-DIS, NASA CMG, TOMS AAI, WOCE."; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_2D_sng() */ const char * /* O [sng] String describing latitude grid-type */ nco_grd_lat_sng /* [fnc] Convert latitude grid-type enum to string */ (const nco_grd_lat_typ_enm nco_grd_lat_typ) /* I [enm] Latitude grid-type enum */ { /* Purpose: Convert latitude grid-type enum to string */ switch(nco_grd_lat_typ){ case nco_grd_lat_unk: return "Unknown, unclassified, or unrepresentable latitude grid type (e.g., unstructured, curvilinear, POP3)"; case nco_grd_lat_gss: return "Gaussian latitude grid used by global spectral models: CCM 1-3, CAM 1-3, ECMWF Forecast, LSM, MATCH, NCEP (R1, R2), UCICTM."; case nco_grd_lat_fv: return "Cap-latitude grid, aka FV-scalar grid (in Lin-Rood representation). When global (not regional) in extent and with odd number of latitudes, poles are considered at (and labeled as) centers of first and last gridcells. For example lat_ctr=-90,-89,-88,... and lat_crn=-89.5,-88.5,-87.5,... Thus pole-gridcells span half the equi-angular latitude increment of the rest of the grid. Used by CAM FV (i.e., CAM 4-6), ECMWF (ERA-I, ERA40, ERA5), GEOS-CHEM, UCICTM, UKMO."; case nco_grd_lat_eqa: return "Uniform/Equi-Angular latitude grid. Uniform/Equi-angle (everywhere) latitude grid. When global (not regional) in extent and with even number of latitudes, poles are at corners/edges of first and last gridcells. For example lat_ctr=-89.5,-88.5,-87.5,... and lat_crn=-90,-89,-88,.... When global, forms valid FV-staggered (aka FV-velocity, aka offset) grid (for Lin-Rood representation). Used by CIESIN/SEDAC, IGBP-DIS, NASA CMG, TOMS AAI, WOCE."; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_lat_sng() */ const char * /* O [sng] String describing longitude grid-type */ nco_grd_lon_sng /* [fnc] Convert longitude grid-type enum to string */ (const nco_grd_lon_typ_enm nco_grd_lon_typ) /* I [enm] Longitude grid-type enum */ { /* Purpose: Convert longitude grid-type enum to string */ switch(nco_grd_lon_typ){ case nco_grd_lon_unk: return "Unknown, unclassified, or unrepresentable longitude grid type (e.g., unstructured, curvilinear)"; case nco_grd_lon_180_wst: return "Date line at west edge of first longitude cell"; case nco_grd_lon_180_ctr: return "Date line at center of first longitude cell"; case nco_grd_lon_Grn_wst: return "Greenwich at west edge of first longitude cell"; case nco_grd_lon_Grn_ctr: return "Greenwich at center of first longitude cell"; case nco_grd_lon_bb: return "Longitude grid determined by bounding box (lon_wst/lon_est) and gridcell number (lon_nbr)"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_lon_sng() */ const char * /* O [sng] String describing grid extent */ nco_grd_xtn_sng /* [fnc] Convert two-dimensional grid-extent enum to string */ (const nco_grd_xtn_enm nco_grd_xtn) /* I [enm] Grid-extent enum */ { /* Purpose: Convert grid-extent enum to string */ switch(nco_grd_xtn){ case nco_grd_xtn_nil: return "Unknown"; case nco_grd_xtn_glb: return "Global"; case nco_grd_xtn_rgn: return "Regional"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_xtn_sng() */ const char * /* O [sng] String describing grid conversion */ nco_rgr_grd_sng /* [fnc] Convert grid conversion enum to string */ (const nco_rgr_typ_enm nco_rgr_typ) /* I [enm] Grid conversion enum */ { /* Purpose: Convert grid conversion enum to string */ switch(nco_rgr_typ){ case nco_rgr_grd_1D_to_1D: return "1D_to_1D"; case nco_rgr_grd_1D_to_2D: return "1D_to_2D"; case nco_rgr_grd_2D_to_1D: return "2D_to_1D"; case nco_rgr_grd_2D_to_2D: return "2D_to_2D"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_grd_sng() */ const char * /* O [sng] String describing regridding method */ nco_rgr_mth_sng /* [fnc] Convert regridding method enum to string */ (const nco_rgr_mth_typ_enm nco_rgr_mth_typ) /* I [enm] Regridding method enum */ { /* Purpose: Convert regridding method enum to string */ switch(nco_rgr_mth_typ){ case nco_rgr_mth_conservative: return "Conservative remapping"; case nco_rgr_mth_bilinear: return "Bilinear remapping"; case nco_rgr_mth_none: return "none"; case nco_rgr_mth_unknown: return "Unknown (TempestRemap or ESMF_weight_only)"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_mth_sng() */ const char * /* O [sng] String describing mapfile generator */ nco_rgr_mpf_sng /* [fnc] Convert mapfile generator enum to string */ (const nco_rgr_mpf_typ_enm nco_rgr_mpf_typ) /* I [enm] Mapfile generator enum */ { /* Purpose: Convert mapfile generator enum to string */ switch(nco_rgr_mpf_typ){ case nco_rgr_mpf_ESMF: return "ESMF Offline Regridding Weight Generator (ERWG), either from ESMF_RegridWeightGen directly or via NCL"; case nco_rgr_mpf_SCRIP: return "SCRIP (original LANL package)"; case nco_rgr_mpf_Tempest: return "TempestRemap (GenerateOfflineMap)"; case nco_rgr_mpf_ESMF_weight_only: return "ESMF Offline Regridding Weight Generator (ERWG), either from ESMF_RegridWeightGen directly or via NCL, with --weight_only option from ERWG 7.1+"; case nco_rgr_mpf_NCO: return "netCDF Operators (NCO) Offline Regridding Weight Generator"; case nco_rgr_mpf_MBTR: return "MOAB-TempestRemap Online Regridding Weight Generator"; case nco_rgr_mpf_unknown: return "Unknown Weight Generator"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_mpf_sng() */ const char * /* O [sng] String describing regridding normalization */ nco_rgr_nrm_sng /* [fnc] Convert regridding normalization enum to string */ (const nco_rgr_nrm_typ_enm nco_rgr_nrm_typ) /* I [enm] Regridding normalization enum */ { /* Purpose: Convert regridding normalization enum to string */ switch(nco_rgr_nrm_typ){ case nco_rgr_nrm_fracarea: return "fracarea"; case nco_rgr_nrm_destarea: return "destarea"; case nco_rgr_nrm_none: return "none"; case nco_rgr_nrm_unknown: return "Unknown (possibilities include ESMF_weight_only, NCO, and TempestRemap)"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_nrm_sng() */ const char * /* O [sng] String containing regridding command and format */ nco_tps_cmd_fmt_sng /* [fnc] Convert TempestRemap command enum to command string */ (const nco_rgr_tps_cmd nco_tps_cmd) /* I [enm] TempestRemap command enum */ { /* Purpose: Convert TempestRemap command enum to command string and format */ switch(nco_tps_cmd){ case nco_rgr_ApplyOfflineMap: return "ApplyOfflineMap"; case nco_rgr_CalculateDiffNorms: return "CalculateDiffNorms"; case nco_rgr_GenerateCSMesh: return "GenerateCSMesh --res %d --file %s"; case nco_rgr_GenerateGLLMetaData: return "GenerateGLLMetaData"; case nco_rgr_GenerateICOMesh: return "GenerateICOMesh"; case nco_rgr_GenerateLambertConfConicMesh: return "GenerateLambertConfConicMesh"; case nco_rgr_GenerateOfflineMap: return "GenerateOfflineMap --in_mesh %s --out_mesh %s --ov_mesh %s --in_data %s --out_data %s"; case nco_rgr_GenerateOverlapMesh: return "GenerateOverlapMesh --a %s --b %s --out %s"; case nco_rgr_GenerateRLLMesh: return "GenerateRLLMesh --lat %d --lon %d --file %s"; case nco_rgr_GenerateTestData: return "GenerateTestData --mesh %s --np %d --test %d --out %s"; case nco_rgr_MeshToTxt: return "MeshToTxt"; case nco_rgr_AAA_nil: case nco_rgr_ZZZ_last: default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_tps_cmd_fmt_sng() */ const char * /* O [sng] String containing regridding command name */ nco_tps_cmd_sng /* [fnc] Convert TempestRemap command enum to command name */ (const nco_rgr_tps_cmd nco_tps_cmd) /* I [enm] TempestRemap command enum */ { /* Purpose: Convert TempestRemap command enum to command string */ switch(nco_tps_cmd){ case nco_rgr_ApplyOfflineMap: return "ApplyOfflineMap"; case nco_rgr_CalculateDiffNorms: return "CalculateDiffNorms"; case nco_rgr_GenerateCSMesh: return "GenerateCSMesh"; case nco_rgr_GenerateGLLMetaData: return "GenerateGLLMetaData"; case nco_rgr_GenerateICOMesh: return "GenerateICOMesh"; case nco_rgr_GenerateLambertConfConicMesh: return "GenerateLambertConfConicMesh"; case nco_rgr_GenerateOfflineMap: return "GenerateOfflineMap"; case nco_rgr_GenerateOverlapMesh: return "GenerateOverlapMesh"; case nco_rgr_GenerateRLLMesh: return "GenerateRLLMesh"; case nco_rgr_GenerateTestData: return "GenerateTestData"; case nco_rgr_MeshToTxt: return "MeshToTxt"; case nco_rgr_AAA_nil: case nco_rgr_ZZZ_last: default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_tps_cmd_sng() */ int /* O [enm] Return code */ nco_grd_mk /* [fnc] Create SCRIP-format grid file */ (rgr_sct * const rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Use grid information to create SCRIP-format grid file Spherical geometry terminology: spherical cap = spherical dome = volume cut-off by plane spherical lune = digon = area bounded by two half-great circles = base of spherical wedge spherical segment = volume defined by cutting sphere with pair parallel planes spherical sector = volume subtended by lat1 spherical wedge = ungula = volume subtended by lon2-lon1 spherical zone = area of spherical segment excluding bases spherical quadrangle = area of intersection of spherical zone and lune (i.e., area of bearing = angle from true north geodesic = shortest path between points on a surface great circle = orthodrome = "straight path" = geodesic of the sphere convergency = difference (in azimuth?) between great circle tracks at two different positions conversion angle = angle between geodesic and rhumb line rhumb line = loxodrome = "oblique (or slanted) path" = line of constant azimuth Formulae: http://www.movable-type.co.uk/scripts/latlong.html # On-line Javascript implementation http://williams.best.vwh.net/avform.htm ACME: https://acme-svn2.ornl.gov/acme-repo/acme/mapping/grids https://acme-svn2.ornl.gov/acme-repo/acme/inputdata/cpl/gridmaps NCAR: yellowstone.ucar.edu:/glade/p/cesm/cseg/mapping/grids yellowstone.ucar.edu:/glade/p_old/cesm/cseg/mapping/grids Global RLL grids: ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr grid=${DATA}/grids/180x360_SCRIP.20150901.nc --rgr latlon=180,360 --rgr lat_typ=eqa --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Equiangular grid 90x180' --rgr grid=${DATA}/grids/90x180_SCRIP.20150901.nc --rgr latlon=90,180 --rgr lat_typ=eqa --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc Maps for global RLL grids: ESMF_RegridWeightGen -s ${DATA}/grids/180x360_SCRIP.20150901.nc -d ${DATA}/grids/90x180_SCRIP.20150901.nc -w ${DATA}/maps/map_180x360_to_90x180.20150901.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/90x180_SCRIP.20150901.nc -d ${DATA}/grids/180x360_SCRIP.20150901.nc -w ${DATA}/maps/map_90x180_to_180x360.20150901.nc --method conserve ACME grids: ncks -O -D 1 --rgr ttl='FV-scalar grid 129x256' --rgr grid=${DATA}/grids/129x256_SCRIP.20150910.nc --rgr latlon=129,256 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='FV-scalar grid 257x512' --rgr grid=${DATA}/grids/257x512_SCRIP.20150910.nc --rgr latlon=257,512 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='FV-scalar grid 801x1600' --rgr grid=${DATA}/grids/801x1600_SCRIP.20150910.nc --rgr latlon=801,1600 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ACME maps: ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/129x256_SCRIP.20150910.nc -w ${DATA}/maps/map_ne30np4_to_fv129x256_aave.20150910.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/257x512_SCRIP.20150910.nc -w ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.20150910.nc --method bilinear ESMF_RegridWeightGen -s ${DATA}/grids/ne120np4_pentagons.100310.nc -d ${DATA}/grids/257x512_SCRIP.20150910.nc -w ${DATA}/maps/map_ne120np4_to_fv257x512_aave.20150910.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne120np4_pentagons.100310.nc -d ${DATA}/grids/801x1600_SCRIP.20150910.nc -w ${DATA}/maps/map_ne120np4_to_fv801x1600_bilin.20150910.nc --method bilinear AMWG grids: AMWG diagnostics (until ~2016) mis-diagnose FV grids with odd numbers of latitudes as Gaussian Grids ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 96x144 for horizontal resolution 1.9x2.5 degrees' --rgr grid=${DATA}/grids/96x144_SCRIP.20160301.nc --rgr latlon=96,144 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 192x288 for horizontal resolution 0.9x1.25 degrees' --rgr grid=${DATA}/grids/192x288_SCRIP.20160301.nc --rgr latlon=192,288 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 128x256 for horizontal resolution 1.4x1.4 degrees' --rgr grid=${DATA}/grids/128x256_SCRIP.20160301.nc --rgr latlon=128,256 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 256x512 for horizontal resolution 0.7x0.7 degrees' --rgr grid=${DATA}/grids/256x512_SCRIP.20160301.nc --rgr latlon=256,512 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 800x1600 for horizontal resolution 0.225x0.225 degrees' --rgr grid=${DATA}/grids/800x1600_SCRIP.20160301.nc --rgr latlon=800,1600 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Equiangular grid 360x720 produced by RTM' --rgr grid=${DATA}/grids/360x720rtm_SCRIP.20160301.nc --rgr latlon=360,720 --rgr lat_typ=eqa --rgr lon_typ=180_wst ~/nco/data/in.nc ~/foo.nc AMWG maps old method (no provenance archived): ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/128x256_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv128x256_aave.20160301.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/256x512_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv256x512_bilin.20160301.nc --method bilinear ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/256x512_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv256x512_aave.20160301.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/800x1600_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv800x1600_bilin.20160301.nc --method bilinear AMWG maps with ncremap (preferred method): ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/128x256_SCRIP.20160301.nc -m ${DATA}/maps/map_ne30np4_to_fv128x256_aave.20160301.nc -w esmf -a conserve ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/256x512_SCRIP.20160301.nc -m ${DATA}/maps/map_ne30np4_to_fv256x512_bilin.20160301.nc -w esmf -a bilinear ncremap -s ${DATA}/grids/ne120np4_pentagons.100310.nc -g ${DATA}/grids/256x512_SCRIP.20160301.nc -m ${DATA}/maps/map_ne120np4_to_fv256x512_aave.20160301.nc -w esmf -a conserve ncremap -s ${DATA}/grids/ne120np4_pentagons.100310.nc -g ${DATA}/grids/800x1600_SCRIP.20160301.nc -m ${DATA}/maps/map_ne120np4_to_fv800x1600_bilin.20160301.nc -w esmf -a bilinear MPAS grids: NCO cannot yet generate MPAS grids, but given an MPAS grid it can generate appropriate maps MPAS maps: ncremap -s ${DATA}/grids/oEC60to30.SCRIP.150729.nc -g ${DATA}/grids/t62_SCRIP.20150901.nc -m ${DATA}/maps/map_oEC60to30_to_t62_aave.20160301.nc -w esmf -a conserve ncremap -s ${DATA}/grids/oEC60to30.SCRIP.150729.nc -g ${DATA}/grids/t62_SCRIP.20150901.nc -m ${DATA}/maps/map_oEC60to30_to_t62_bilin.20160301.nc -w esmf -a bilinear Regional RLL grids: ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr grid=${DATA}/sld/rgr/grd_dst.nc --rgr latlon=100,100 --rgr snwe=30.0,70.0,-120.0,-90.0 ~/nco/data/in.nc ~/foo.nc Global RLL skeleton: ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr skl=${DATA}/sld/rgr/skl_180x360.nc --rgr grid=${DATA}/grids/180x360_SCRIP.20150901.nc --rgr latlon=180,360#lat_typ=eqa#lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc Curvilinear grids: ncks -O -D 1 --rgr ttl='Curvilinear grid 10x20. Degenerate case.' --rgr crv --rgr lon_crv=0.0 --rgr skl=${DATA}/sld/rgr/skl_crv.nc --rgr grid=${DATA}/sld/rgr/grd_crv.nc --rgr latlon=10,20 --rgr snwe=-5.0,5.0,-10.0,10.0 ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Curvilinear grid 10x20. Curvilinearity = 1.0 lon' --rgr lon_crv=1.0 --rgr skl=${DATA}/sld/rgr/skl_crv.nc --rgr grid=${DATA}/sld/rgr/grd_crv.nc --rgr latlon=10,20 --rgr snwe=-5.0,5.0,-10.0,10.0 ~/nco/data/in.nc ~/foo.nc 1-D Latitude (no longitude) grids: ncks -O -D 1 --rgr ttl='Latitude-only zonal grid' --rgr skl=${DATA}/sld/rgr/skl_lat_10dgr_uni.nc --rgr grid=${DATA}/sld/rgr/grd_lat_10dgr_uni.nc --rgr latlon=18,1 --rgr snwe=-90,90,0,360 ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Latitude-only zonal grid' --rgr skl=${DATA}/sld/rgr/skl_lat_05dgr_cap.nc --rgr grid=${DATA}/sld/rgr/grd_lat_05dgr_cap.nc --rgr latlon=37,1 --rgr snwe=-90,90,0,360 ~/nco/data/in.nc ~/foo.nc ncremap -i ${DATA}/sld/rgr/skl_lat_10dgr_uni.nc -d ${DATA}/sld/rgr/skl_lat_05dgr_cap.nc -m ${DATA}/maps/map_lat10uni_to_lat05cap_aave.nc -o ~/rgr/lat10to05.nc ESMF_RegridWeightGen -s ${DATA}/sld/rgr/grd_lat_10dgr_uni.nc -d ${DATA}/sld/rgr/grd_lat_05dgr_cap.nc -w ${DATA}/maps/map_lat10uni_to_lat05cap_aave.nc --method conserve */ const char fnc_nm[]="nco_grd_mk()"; /* [sng] Function name */ const double rdn2dgr=180.0/M_PI; const double dgr2rdn=M_PI/180.0; const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */ const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */ const int itr_nbr_max=20; // [nbr] Maximum number of iterations const nc_type crd_typ=NC_DOUBLE; char *fl_out_tmp=NULL_CEWI; char *fl_out; char grd_area_nm[]="grid_area"; /* 20150830: NB ESMF_RegridWeightGen --user_areas looks for variable named "grid_area" */ char dmn_sz_nm[]="grid_dims"; char grd_crn_lat_nm[]="grid_corner_lat"; char grd_crn_lon_nm[]="grid_corner_lon"; char grd_crn_nm[]="grid_corners"; char grd_ctr_lat_nm[]="grid_center_lat"; char grd_ctr_lon_nm[]="grid_center_lon"; char grd_rnk_nm[]="grid_rank"; char grd_sz_nm[]="grid_size"; char msk_nm[]="grid_imask"; double *grd_ctr_lat; /* [dgr] Latitude centers of grid */ double *grd_ctr_lon; /* [dgr] Longitude centers of grid */ double *grd_crn_lat; /* [dgr] Latitude corners of grid */ double *grd_crn_lon; /* [dgr] Longitude corners of grid */ double *area; /* [sr] Area of grid */ double *lat_bnd=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular grid */ double *lat_crn=NULL; /* [dgr] Latitude corners of rectangular grid */ double *lat_ctr=NULL_CEWI; /* [dgr] Latitude centers of rectangular grid */ double *lat_ntf=NULL; /* [dgr] Latitude interfaces of rectangular grid */ double *lat_wgt=NULL; /* [dgr] Latitude weights of rectangular grid */ double *lon_bnd=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular grid */ double *lon_crn=NULL; /* [dgr] Longitude corners of rectangular grid */ double *lon_ctr=NULL_CEWI; /* [dgr] Longitude centers of rectangular grid */ double *lon_ntf=NULL; /* [dgr] Longitude interfaces of rectangular grid */ double area_ttl=0.0; /* [frc] Exact sum of area */ double lat_crv; /* [dgr] Latitudinal curvilinearity */ double lon_crv; /* [dgr] Longitudinal curvilinearity */ double lat_nrt; /* [dgr] Latitude of northern edge of grid */ double lat_sth; /* [dgr] Latitude of southern edge of grid */ double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */ double lat_wgt_gss; /* [frc] Latitude weight estimated from interface latitudes */ double lon_est; /* [dgr] Longitude of eastern edge of grid */ double lon_wst; /* [dgr] Longitude of western edge of grid */ double lon_ncr; /* [dgr] Longitude increment */ double lat_ncr; /* [dgr] Latitude increment */ double lon_spn; /* [dgr] Longitude span */ double lat_spn; /* [dgr] Latitude span */ double *wgt_Gss=NULL; // [frc] Gaussian weights double precision int *msk=NULL; /* [flg] Mask of grid */ int *dmn_sz_int; /* [nbr] Array of dimension sizes of grid */ int dmn_ids[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_out_fmt=NC_FORMAT_CLASSIC; /* [enm] Output file format */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int area_id; /* [id] Area variable ID */ int dmn_id_grd_crn; /* [id] Grid corners dimension ID */ int dmn_id_grd_rnk; /* [id] Grid rank dimension ID */ int dmn_id_grd_sz; /* [id] Grid size dimension ID */ int dmn_sz_int_id; /* [id] Grid dimension sizes ID */ int grd_crn_lat_id; /* [id] Grid corner latitudes variable ID */ int grd_crn_lon_id; /* [id] Grid corner longitudes variable ID */ int grd_ctr_lat_id; /* [id] Grid center latitudes variable ID */ int grd_ctr_lon_id; /* [id] Grid center longitudes variable ID */ int itr_cnt; /* Iteration counter */ int msk_id; /* [id] Mask variable ID */ long dmn_srt[dmn_nbr_grd_max]; long dmn_cnt[dmn_nbr_grd_max]; long bnd_nbr; /* [nbr] Number of bounds in gridcell */ long col_nbr; /* [nbr] Number of columns in grid */ long crn_idx; /* [idx] Counting index for corners */ long grd_crn_nbr; /* [nbr] Number of corners in gridcell */ long grd_rnk_nbr; /* [nbr] Number of dimensions in grid */ long grd_sz_nbr; /* [nbr] Number of gridcells in grid */ long idx2; /* [idx] Counting index for unrolled grids */ long idx; /* [idx] Counting index for unrolled grids */ long lat_idx2; /* [idx] Counting index for unrolled latitude */ long lat_idx; long lat_nbr; /* [nbr] Number of latitudes in grid */ long lon_idx2; /* [idx] Counting index for unrolled longitude */ long lon_idx; long lon_nbr; /* [nbr] Number of longitudes in grid */ nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=True; /* Option O */ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_CREATE=rgr->flg_uio; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool WRT_TMP_FL=False; /* [flg] Write output to temporary file */ nco_bool flg_grd_1D=False; nco_bool flg_grd_2D=False; nco_bool flg_grd_crv=False; nco_bool flg_s2n=True; /* I [enm] Latitude grid-direction is South-to-North */ nco_grd_2D_typ_enm grd_typ; /* [enm] Grid-type enum */ nco_grd_lat_drc_enm lat_drc; /* [enm] Latitude grid-direction enum */ nco_grd_lat_typ_enm lat_typ; /* [enm] Latitude grid-type enum */ nco_grd_lon_typ_enm lon_typ; /* [enm] Longitude grid-type enum */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ dfl_lvl=rgr->dfl_lvl; grd_typ=rgr->grd_typ; /* [enm] Grid type */ fl_out=rgr->fl_grd; fl_out_fmt=rgr->fl_out_fmt; lat_drc=rgr->lat_drc; /* [enm] Latitude grid direction */ lat_typ=rgr->lat_typ; /* [enm] Latitude grid type */ lon_typ=rgr->lon_typ; /* [enm] Longitude grid type */ lat_nbr=rgr->lat_nbr; /* [nbr] Number of latitudes in grid */ lon_nbr=rgr->lon_nbr; /* [nbr] Number of longitudes in grid */ lat_crv=rgr->lat_crv; /* [dgr] Latitude curvilinearity */ lon_crv=rgr->lon_crv; /* [dgr] Longitude curvilinearity */ lat_sth=rgr->lat_sth; /* [dgr] Latitude of southern edge of grid */ lon_wst=rgr->lon_wst; /* [dgr] Longitude of western edge of grid */ lat_nrt=rgr->lat_nrt; /* [dgr] Latitude of northern edge of grid */ lon_est=rgr->lon_est; /* [dgr] Longitude of eastern edge of grid */ /* Use curvilinear coordinates (lat and lon are 2D arrays) if flg_crv already set or it lat_crv or lon_crv set */ if(lat_crv != 0.0 || lon_crv != 0.0 || rgr->flg_crv) flg_grd_crv=True; if(lat_drc == nco_grd_lat_drc_n2s) flg_s2n=False; /* Assume 2D grid */ flg_grd_2D=True; grd_rnk_nbr=dmn_nbr_2D; /* Assume quadrilaterals */ grd_crn_nbr=4; /* Assume rectangles */ bnd_nbr=2; col_nbr=lat_nbr*lon_nbr; grd_sz_nbr=lat_nbr*lon_nbr; /* Allocate space for output data */ area=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); dmn_sz_int=(int *)nco_malloc(grd_rnk_nbr*nco_typ_lng((nc_type)NC_INT)); msk=(int *)nco_malloc(grd_sz_nbr*nco_typ_lng((nc_type)NC_INT)); lat_bnd=(double *)nco_malloc(lat_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(lat_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lon_bnd=(double *)nco_malloc(lon_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(lon_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(lon_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); wgt_Gss=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); grd_ctr_lat=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_ctr_lon=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lat=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lon=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); /* Define variable values */ int lon_psn=int_CEWI; /* [idx] Ordinal position of longitude in rectangular grid dimension-size array */ int lat_psn=int_CEWI; /* [idx] Ordinal position of latitude in rectangular grid dimension-size array */ if(grd_rnk_nbr == dmn_nbr_2D){ lon_psn=0; /* SCRIP introduced [lon,lat] convention because more natural for Fortran */ lat_psn=1; } /* !flg_grd_in_2D */ dmn_sz_int[lon_psn]=lon_nbr; dmn_sz_int[lat_psn]=lat_nbr; for(idx=0;idx<grd_sz_nbr;idx++) msk[idx]=1; /* Compute rectangular arrays NB: Much is a more-generic rewrite of map/map_grd.F90:map_grd_mk() */ /* 20150827: Old rule: Longitude grid was entirely specified by one of four longitude map tokens: Grn_ctr,Grn_wst,180_ctr,180_wst New rule: User may specify bounds (lon_wst,lon_est,lat_sth,lat_nrt) independently of grid token Such bounds ALWAYS refer bounding box interface edges, NEVER to centers of first last gridcells Bounds and number of gridcells completely determine uniform grid so former longitude-type tokens have no effect when bounds specified (so letting grid-type tokens affect grid would over-determine grid and lead to errors) Hence, grid-type tokens may be used as short-hand to specify grids but may not be required to exist later (because regional grids would not have specified them) Grid grid-type tokens lon_bb/lat_bb imply bounding box was originally used to specify bounds 1x1 degree global grid with first longitude centered at Greenwich: --lon_nbr=360 --lon_typ Grn_ctr --lon_nbr=360 --lon_wst=-0.5 --lon_est=359.5 1x1 degree global grid with Greenwich at west edge of first longitude: --lon_nbr=360 --lon_typ Grn_wst --lon_nbr=360 --lon_wst=0.0 --lon_est=360.0 1x1 degree regional grid, total size 9x9 degrees, Greenwich at center of middle gridcell: --lon_nbr=9 --lon_wst=-4.5 --lon_est=4.5 1x1 degree regional grid, total size 10x10 degrees, Greenwich at east/west edges of middle two gridcells --lon_nbr=10 --lon_wst=-5.0 --lon_est=5.0 */ /* Were east/west longitude bounds set explicitly or implicitly? NB: This is redundant since it was done in nco_rgr_ini(), yet better safe than sorry */ if(lon_wst != NC_MAX_DOUBLE || lon_est != NC_MAX_DOUBLE) lon_typ=rgr->lon_typ=nco_grd_lon_bb; if(lon_wst == NC_MAX_DOUBLE){ /* Precomputed longitude grids begin with longitude 0.0 or -180.0 degrees */ switch(lon_typ){ case nco_grd_lon_bb: case nco_grd_lon_Grn_ctr: case nco_grd_lon_Grn_wst: lon_wst=0.0; break; case nco_grd_lon_180_ctr: case nco_grd_lon_180_wst: lon_wst=-180.0; break; default: nco_dfl_case_generic_err(); break; } /* !lon_typ */ } /* !lon */ if(lon_est == NC_MAX_DOUBLE){ /* Precomputed longitude grids end with longitude 360.0 or 180.0 degrees */ switch(lon_typ){ case nco_grd_lon_bb: case nco_grd_lon_Grn_ctr: case nco_grd_lon_Grn_wst: lon_est=360.0; break; case nco_grd_lon_180_ctr: case nco_grd_lon_180_wst: lon_est=180.0; break; default: nco_dfl_case_generic_err(); break; } /* !lon_typ */ } /* !lon */ /* Determine longitude increment from span of pre-centered bounding box (centering will not change span) */ lon_spn=lon_est-lon_wst; lon_ncr=lon_spn/lon_nbr; /* Centering: If user did not set explicit longitude bounds then... */ if(lon_typ != nco_grd_lon_bb) /* map_lon_ctr_typ determines whether lon_wst refers to cell center or Western edge */ if((lon_typ == nco_grd_lon_Grn_ctr) || (lon_typ == nco_grd_lon_180_ctr)) lon_wst=lon_wst-(lon_ncr/2.0); /* Re-derive lon_est from lon_wst and lon_nbr (more fundamental properties) */ lon_est=lon_wst+lon_ncr*lon_nbr; /* lon_wst and lon_est have been set and will not change */ assert(lon_wst < lon_est); lon_ntf[0L]=lon_wst; lon_ntf[lon_nbr]=lon_est; for(lon_idx=1L;lon_idx<lon_nbr;lon_idx++) lon_ntf[lon_idx]=lon_ntf[0L]+lon_idx*lon_ncr; /* Ensure rounding errors do not produce unphysical grid */ lon_ntf[lon_nbr]=lon_ntf[0L]+lon_spn; /* Finished with longitude, now tackle latitude */ /* Were south/north latitude bounds set explicitly or implicitly? */ // if(lat_sth != NC_MAX_DOUBLE || lat_nrt != NC_MAX_DOUBLE) lon_typ=rgr->lat_typ=nco_grd_lat_bb; if(lat_sth == NC_MAX_DOUBLE) lat_sth=-90.0; if(lat_nrt == NC_MAX_DOUBLE) lat_nrt=90.0; /* Determine latitude increment from span of pre-centered bounding box (centering will not change span) */ lat_spn=lat_nrt-lat_sth; lat_ncr=lat_spn/lat_nbr; const long lat_nbr_hlf=lat_nbr/2L; // [nbr] Half number of latitudes (e.g., lat_nbr_hlf=32 for lat_nbr=64 and 65) double *lat_sin=NULL; // [frc] Sine of Gaussian latitudes double precision /* Create S->N grid. If user requested N->S, flip grid at end */ // if(flg_s2n) lat_ntf[0L]=lat_sth; else lat_ntf[0L]=lat_nrt; lat_ntf[0L]=lat_sth; switch(lat_typ){ case nco_grd_lat_fv: lat_ncr=lat_spn/(lat_nbr-1L); lat_ntf[1L]=lat_ntf[0L]+0.5*lat_ncr; for(lat_idx=2L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=lat_ntf[1L]+(lat_idx-1L)*lat_ncr; break; case nco_grd_lat_eqa: lat_ncr=lat_spn/lat_nbr; for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=lat_ntf[0L]+lat_idx*lat_ncr; break; case nco_grd_lat_gss: lat_sin=(double *)nco_malloc(lat_nbr*sizeof(double)); (void)nco_lat_wgt_gss(lat_nbr,True,lat_sin,wgt_Gss); for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]); /* First guess for lat_ntf is midway between Gaussian abscissae */ for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=0.5*(lat_ctr[lat_idx-1L]+lat_ctr[lat_idx]); /* Iterate guess until area between interfaces matches Gaussian weight (compute for one hemisphere, make other symmetric) */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++){ double fofx_at_x0; /* [frc] Function to iterate evaluated at current guess */ double dfdx_at_x0; /* [frc] Derivative of equation evaluated at current guess */ const double eps_rlt_cnv=1.0e-15; // Convergence criterion (1.0e-16 pushes double precision to the brink) itr_cnt=0; lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; while(fabs(fofx_at_x0) > eps_rlt_cnv){ /* Newton-Raphson iteration: Let x=lat_ntf[lat_idx], y0=lat_ntf[lat_idx-1L], gw = Gaussian weight (exact solution) f(x)=sin(dgr2rdn*x)-sin(dgr2rdn*y0)-gw=0 # s2n grid f(x)=sin(dgr2rdn*y0)-sin(dgr2rdn*x)-gw=0 # n2s grid dfdx(x)= dgr2rdn*cos(dgr2rdn*x) # s2n grid dfdx(x)=-dgr2rdn*cos(dgr2rdn*x) # n2s grid x_better=x0-f(x0)/f'(x0) */ dfdx_at_x0=dgr2rdn*cos(dgr2rdn*lat_ntf[lat_idx]); /* 20190613: n2s latitudes are constructed s2n and flipped to n2s later Hence next line is commented-out in construction mode but used in infer mode */ // if(!flg_s2n) dfdx_at_x0=-dfdx_at_x0; lat_ntf[lat_idx]+=fofx_at_x0/dfdx_at_x0; /* NB: not sure why this is minus not plus but it works :) */ lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; if(++itr_cnt > itr_nbr_max){ (void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %ld\n",nco_prg_nm_get(),fnc_nm,fabs(fofx_at_x0),itr_nbr_max,lat_idx); nco_exit(EXIT_FAILURE); } /* endif */ } /* !while */ } /* !lat_idx */ /* Use Gaussian grid symmetry to obtain same interfaces in both hemispheres (avoids cumulative rounding errors) */ if(lat_nbr%2){ /* lat_nbr is odd */ for(lat_idx=1L;lat_idx<=lat_nbr_hlf+1L;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx+1L]; }else{ /* lat_nbr is even */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx]; } /* !flg_lat_evn */ break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ /* Ensure rounding errors do not produce unphysical grid */ lat_ntf[lat_nbr]=lat_nrt; if(nco_dbg_lvl_get() > nco_dbg_old){ (void)fprintf(stderr,"%s: DEBUG %s Gaussian abscissae/interfaces for lat_nbr=%ld\n",nco_prg_nm_get(),fnc_nm,lat_nbr); (void)fprintf(stderr,"idx\tlat_ctr\tlat_ntf\tntf_p1\n"); for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ (void)fprintf(stderr,"%ld\t%20.15f\t%20.15f\t%20.15f\n",lat_idx,lat_ctr[lat_idx],lat_ntf[lat_idx],lat_ntf[lat_idx+1L]); } /* !lat_idx */ } /* !dbg */ /* Always define longitude centers midway between interfaces */ for(lon_idx=0L;lon_idx<=lon_nbr-1L;lon_idx++) lon_ctr[lon_idx]=0.5*(lon_ntf[lon_idx]+lon_ntf[lon_idx+1L]); /* Many grids have center latitude equally spaced between interfaces */ if(lat_typ != nco_grd_lat_fv && lat_typ != nco_grd_lat_gss){ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=0.5*(lat_ntf[lat_idx]+lat_ntf[lat_idx+1L]); } /* !lat_typ */ /* Cap grids excepted---they place centers of first/last gridcells at poles */ if(lat_typ == nco_grd_lat_fv){ lat_ctr[0L]=lat_ntf[0L]; for(lat_idx=1L;lat_idx<lat_nbr-1L;lat_idx++) lat_ctr[lat_idx]=0.5*(lat_ntf[lat_idx]+lat_ntf[lat_idx+1L]); lat_ctr[lat_nbr-1L]=lat_ntf[lat_nbr]; } /* !cap */ /* Gaussian grid centerpoints are defined by solutions to Legendre polynomials */ if(lat_typ == nco_grd_lat_gss){ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]); } /* !Gaussian */ for(idx=0L;idx<lon_nbr;idx++){ lon_bnd[2*idx]=lon_ntf[idx]; lon_bnd[2*idx+1L]=lon_ntf[idx+1L]; } /* !idx */ for(idx=0L;idx<lat_nbr;idx++){ lat_bnd[2*idx]=lat_ntf[idx]; lat_bnd[2*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0L;idx<lat_nbr;idx++){ (void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr[idx]); for(int bnd_idx=0L;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", "); } /* end loop over lat */ } /* endif dbg */ /* Use centers and boundaries to diagnose latitude weights */ switch(lat_typ){ case nco_grd_lat_eqa: case nco_grd_lat_fv: for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx])); break; case nco_grd_lat_gss: for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=wgt_Gss[lat_idx]; break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ /* Fuzzy test of latitude weight normalization 20180903 Tolerance threshold of eps_rlt_max=1.0e-14 is too strict for Gaussian grids somewhere lat_nbr >~ 150 20180904 Tolerance threshold of eps_rlt_max=1.0e-12 allows Gaussian grids like ECMWF O1280 Newton-Raphson method of interface determination may need improvement to fix that Tolerance threshold of 1.0e-14 works for all relevant E3SM Uniform and Cap grids */ //const double eps_rlt_max=1.0e-14; /* [frc] Round-off error tolerance: Used 1.0e-14 until 20180904 */ const double eps_rlt_max=1.0e-12; /* [frc] Round-off error tolerance: Used 1.0e-12 since 20180904 */ lat_wgt_ttl=0.0; for(idx=0L;idx<lat_nbr;idx++) lat_wgt_ttl+=lat_wgt[idx]; if(grd_typ == nco_grd_2D_fv || grd_typ == nco_grd_2D_eqa){ double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */ lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd[2*(lat_nbr-1)+1L])-sin(dgr2rdn*lat_bnd[0L])); if(fabs(1.0-lat_wgt_ttl/lat_wgt_ttl_xpc) > eps_rlt_max){ (void)fprintf(stdout,"%s: ERROR %s reports grid normalization does not meet precision tolerance eps_rlt_max = %20.15f\nlat_wgt_ttl = %20.15f, lat_wgt_ttl_xpc = %20.15f, lat_wgt_frc = %20.15f, eps_rlt = %20.15f\n",nco_prg_nm_get(),fnc_nm,eps_rlt_max,lat_wgt_ttl,lat_wgt_ttl_xpc,lat_wgt_ttl/lat_wgt_ttl_xpc,1.0-lat_wgt_ttl/lat_wgt_ttl_xpc); nco_exit(EXIT_FAILURE); } /* !imprecise */ } /* !nco_grd_lat_eqa, !nco_grd_lat_fv */ /* 20180831 Code above assumes grids run S->N User can request N->S grids with --rgr lat_drc=n2s If so, flip grid before unrolling into output arrays */ if(!flg_s2n){ double *lat_ctr_tmp=NULL_CEWI; /* [dgr] Temporary Latitude centers of rectangular grid */ double *lat_wgt_tmp=NULL; /* [dgr] Temporary Latitude weights of rectangular grid */ double *lat_ntf_tmp=NULL; /* [dgr] Temporary Latitude interfaces of rectangular grid */ lat_ctr_tmp=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lat_ntf_tmp=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt_tmp=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); long tmp_idx; /* [idx] Temporary index for swapping values */ for(idx=0L;idx<lat_nbr;idx++){ lat_ctr_tmp[idx]=lat_ctr[idx]; lat_wgt_tmp[idx]=lat_wgt[idx]; } /* !idx */ for(idx=0L;idx<lat_nbr;idx++){ tmp_idx=lat_nbr-idx-1L; lat_ctr[idx]=lat_ctr_tmp[tmp_idx]; lat_wgt[idx]=lat_wgt_tmp[tmp_idx]; } /* !idx */ for(idx=0L;idx<lat_nbr+1L;idx++){ lat_ntf_tmp[idx]=lat_ntf[idx]; } /* !idx */ for(idx=0L;idx<lat_nbr+1L;idx++){ tmp_idx=lat_nbr+1L-idx-1L; /* NB: Subtle index difference */ lat_ntf[idx]=lat_ntf_tmp[tmp_idx]; } /* !idx */ for(idx=0L;idx<lat_nbr;idx++){ lat_bnd[2*idx]=lat_ntf[idx]; lat_bnd[2*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ if(lat_ctr_tmp) lat_ctr_tmp=(double *)nco_free(lat_ctr_tmp); if(lat_ntf_tmp) lat_ntf_tmp=(double *)nco_free(lat_ntf_tmp); if(lat_wgt_tmp) lat_wgt_tmp=(double *)nco_free(lat_wgt_tmp); } /* !flg_s2n */ assert(grd_crn_nbr == 4); for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){ idx=grd_crn_nbr*lon_idx; lon_crn[idx]=lon_ntf[lon_idx]; lon_crn[idx+1L]=lon_ntf[lon_idx+1L]; lon_crn[idx+2L]=lon_ntf[lon_idx+1L]; lon_crn[idx+3L]=lon_ntf[lon_idx]; } /* !lon_idx */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ idx=grd_crn_nbr*lat_idx; lat_crn[idx]=lat_ntf[lat_idx]; lat_crn[idx+1L]=lat_ntf[lat_idx]; lat_crn[idx+2L]=lat_ntf[lat_idx+1L]; lat_crn[idx+3L]=lat_ntf[lat_idx+1L]; } /* !lat_idx */ /* Stuff rectangular arrays into unrolled arrays */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){ idx=lat_idx*lon_nbr+lon_idx; grd_ctr_lat[idx]=lat_ctr[lat_idx]; grd_ctr_lon[idx]=lon_ctr[lon_idx]; for(crn_idx=0L;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; lat_idx2=lat_idx*grd_crn_nbr+crn_idx; lon_idx2=lon_idx*grd_crn_nbr+crn_idx; grd_crn_lat[idx2]=lat_crn[lat_idx2]; grd_crn_lon[idx2]=lon_crn[lon_idx2]; } /* !crn */ } /* !lon */ } /* !lat */ if(flg_grd_crv){ /* Impose curvilinearity by adding lon_crv offset to each row relative to previous row, and lat_crv offset to each column relative to previous column */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){ idx=lat_idx*lon_nbr+lon_idx; grd_ctr_lat[idx]+=lon_idx*lat_crv; grd_ctr_lon[idx]+=lat_idx*lon_crv; for(crn_idx=0L;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; lat_idx2=lat_idx*grd_crn_nbr+crn_idx; lon_idx2=lon_idx*grd_crn_nbr+crn_idx; grd_crn_lat[idx2]=lat_crn[lat_idx2]; grd_crn_lon[idx2]=lon_crn[lon_idx2]; if(crn_idx == 0L || crn_idx == 1L){ grd_crn_lat[idx2]+=lat_idx*lat_crv; /* LL, LR */ grd_crn_lon[idx2]+=lat_idx*lon_crv; /* LL, LR */ }else if(crn_idx == 2L || crn_idx == 3L){ grd_crn_lat[idx2]+=(lat_idx+1L)*lat_crv; /* UL, UR */ grd_crn_lon[idx2]+=(lat_idx+1L)*lon_crv; /* UL, UR */ } /* !crn */ } /* !crn */ } /* !lon */ } /* !lat */ } /* !flg_grd_crv */ /* 20190613: Convert CW quadrilaterals to CCW quadrilaterals so TempestRemap accepts grids Default construction/inferral method orders corners CCW and CW for s2n and n2s grids, respectively */ if(!flg_s2n){ nco_bool flg_ccw; /* [flg] Gridcell is CCW */ const int rcr_lvl=1; /* [nbr] Recursion level (1 is top level, 2 and greater are recursed */ const int idx_ccw=0; /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */ for(idx=0L;idx<grd_sz_nbr;idx++){ idx2=grd_crn_nbr*idx; flg_ccw=nco_ccw_chk(grd_crn_lat+idx2,grd_crn_lon+idx2,grd_crn_nbr,idx_ccw,rcr_lvl); if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_vec) (void)fprintf(stderr,"%s: DEBUG %s reports nco_ccw_chk() tried to change idx = %lu from CW to CCW\n",nco_prg_nm_get(),fnc_nm,idx); } /* !idx */ } /* !flg_s2n */ if(nco_dbg_lvl_get() >= nco_dbg_std){ long int idx_crn_ll; long int idx_crn_lr; long int idx_crn_ur; long int idx_crn_ul; long idx_dbg; idx_dbg=rgr->idx_dbg; idx_crn_ll=grd_crn_nbr*idx_dbg+0L; idx_crn_lr=grd_crn_nbr*idx_dbg+1L; idx_crn_ur=grd_crn_nbr*idx_dbg+2L; idx_crn_ul=grd_crn_nbr*idx_dbg+3L; (void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,grd_ctr_lat[idx_dbg],grd_ctr_lon[idx_dbg],grd_crn_lat[idx_crn_ll],grd_crn_lon[idx_crn_ll],grd_crn_lat[idx_crn_lr],grd_crn_lon[idx_crn_lr],grd_crn_lat[idx_crn_ur],grd_crn_lon[idx_crn_ur],grd_crn_lat[idx_crn_ul],grd_crn_lon[idx_crn_ul]); } /* !dbg */ if(flg_grd_crv){ /* Area of arbitrary curvilinear grids requires spherical trigonometry */ nco_sph_plg_area(rgr,grd_crn_lat,grd_crn_lon,grd_sz_nbr,grd_crn_nbr,area); }else{ /* Area of rectangular spherical zones from elementary calculus results 20150906: Half-angle formulae for better conditioning improve area normalization for 801x1600 by 2.0e-15 area[lat_idx*lon_nbr+lon_idx]=dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*2.0*(sin(0.5*dgr2rdn*lat_bnd[2*lat_idx+1L])*cos(0.5*dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(0.5*dgr2rdn*lat_bnd[2*lat_idx])*cos(0.5*dgr2rdn*lat_bnd[2*lat_idx])); Gain not worth the extra complexity */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++) /* fabs() ensures positive area in n2s grids */ area[lat_idx*lon_nbr+lon_idx]=fabs(dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*(sin(dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(dgr2rdn*lat_bnd[2*lat_idx]))); } /* !flg_grd_2D */ if(nco_dbg_lvl_get() >= nco_dbg_sbr){ lat_wgt_ttl=0.0; area_ttl=0.0; if(flg_grd_2D){ (void)fprintf(stderr,"%s: INFO %s reports destination rectangular latitude grid:\n",nco_prg_nm_get(),fnc_nm); for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_wgt_ttl+=lat_wgt[lat_idx]; } /* !flg_grd_2D */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++) area_ttl+=area[lat_idx*lon_nbr+lon_idx]; (void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_ttl,area_ttl/(4.0*M_PI)); assert(area_ttl > 0.0); assert(area_ttl <= 4.0*M_PI); } /* endif dbg */ /* Open grid file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Define dimensions */ rcd=nco_def_dim(out_id,grd_crn_nm,grd_crn_nbr,&dmn_id_grd_crn); rcd=nco_def_dim(out_id,grd_sz_nm,grd_sz_nbr,&dmn_id_grd_sz); rcd=nco_def_dim(out_id,grd_rnk_nm,grd_rnk_nbr,&dmn_id_grd_rnk); int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; /* Define variables */ (void)nco_def_var(out_id,dmn_sz_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_rnk,&dmn_sz_int_id); /* NB: Too small to deflate */ (void)nco_def_var(out_id,grd_area_nm,(nc_type)crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,msk_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_sz,&msk_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lat_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lon_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lon_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_grd_sz; dmn_ids[1]=dmn_id_grd_crn; (void)nco_def_var(out_id,grd_crn_lat_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lat_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_grd_sz; dmn_ids[1]=dmn_id_grd_crn; (void)nco_def_var(out_id,grd_crn_lon_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lon_id,shuffle,deflate,dfl_lvl); /* Define global and "units" attributes */ char *att_val; rcd=nco_char_att_put(out_id,NULL,"title",rgr->grd_ttl); rcd=nco_char_att_put(out_id,NULL,"Conventions","SCRIP"); const char usr_cpp[]=TKN2SNG(USER); /* [sng] Hostname from C pre-processor */ rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO"); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ)); rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ)); rcd=nco_char_att_put(out_id,dmn_sz_nm,"long_name","Size(s) of horizontal dimensions (in Fortran storage order for historical reasons)"); rcd=nco_char_att_put(out_id,grd_area_nm,"long_name","Solid Angle Subtended on Source Grid"); rcd=nco_char_att_put(out_id,grd_area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,grd_area_nm,"units","steradian"); rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"standard_name","latitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"bounds",grd_crn_lat_nm); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"standard_name","longitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"bounds",grd_crn_lon_nm); rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"long_name","Latitude of Grid Cell Vertices"); rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"standard_name","latitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"long_name","Longitude of Grid Cell Vertices"); rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"standard_name","longitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,msk_nm,"long_name","Binary Integer Mask for Grid"); rcd=nco_char_att_put(out_id,msk_nm,"units","none"); /* Begin data mode */ (void)nco_enddef(out_id); /* Write variables */ dmn_srt[0]=0L; dmn_cnt[0]=grd_rnk_nbr; rcd=nco_put_vara(out_id,dmn_sz_int_id,dmn_srt,dmn_cnt,dmn_sz_int,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,msk_id,dmn_srt,dmn_cnt,msk,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ); dmn_srt[0]=0L; dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lat_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ); dmn_srt[0]=0L; dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lon_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ); /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); fl_out=rgr->fl_skl; if(fl_out){ /* Write skeleton data file on requested grid Skeleton file can then be populated with data for testing */ char *area_nm; char *bnd_nm; // char *bnd_tm_nm; char *col_nm_out; char *lat_nm_out; /* [sng] Name of output dimension for latitude */ char *lat_wgt_nm; char *lon_nm_out; /* [sng] Name of variable to recognize as longitude */ char *lat_bnd_nm; /* [sng] Name of latitude boundary variable */ char *lon_bnd_nm; /* [sng] Name of longitude boundary variable */ // int area_id; /* [id] Variable ID for area */ int dmn_id_bnd; /* [id] Dimension ID */ //int dmn_id_bnd_tm; /* [id] Dimension ID */ int dmn_id_col; /* [id] Dimension ID */ int dmn_id_lat; /* [id] Dimension ID */ int dmn_id_lon; /* [id] Dimension ID */ int lat_bnd_id; /* [id] Variable ID for lat_bnds/lat_vertices */ int lat_id; /* [id] Variable ID for latitude */ int lat_wgt_id; /* [id] Variable ID for latitude weight */ int lon_bnd_id; /* [id] Variable ID for lon_bnds/lon_vertices */ int lon_id; /* [id] Variable ID for longitude */ /* Use explicitly specified output names, if any, otherwise use input names (either explicitly specified or discovered by fuzzing) */ if(rgr->lat_nm_out) lat_nm_out=rgr->lat_nm_out; else lat_nm_out=(char *)strdup("lat"); if(rgr->lon_nm_out) lon_nm_out=rgr->lon_nm_out; else lon_nm_out=(char *)strdup("lon"); if(rgr->col_nm_out) col_nm_out=rgr->col_nm_out; else col_nm_out=(char *)strdup("ncol"); /* Name output dimensions */ area_nm=rgr->area_nm; bnd_nm=rgr->bnd_nm; //bnd_tm_nm=rgr->bnd_tm_nm; lat_bnd_nm=rgr->lat_bnd_nm; lat_wgt_nm=rgr->lat_wgt_nm; lon_bnd_nm=rgr->lon_bnd_nm; /* Use names discovered by fuzzing */ if(flg_grd_1D){ bnd_nm=rgr->vrt_nm; lat_bnd_nm=rgr->lat_vrt_nm; lon_bnd_nm=rgr->lon_vrt_nm; } /* !flg_grd_1D */ if(flg_grd_2D){ bnd_nm=rgr->bnd_nm; lat_bnd_nm=rgr->lat_bnd_nm; lon_bnd_nm=rgr->lon_bnd_nm; } /* !flg_grd_2D */ /* Open grid file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Define dimensions */ if(flg_grd_crv){ rcd=nco_def_dim(out_id,bnd_nm,grd_crn_nbr,&dmn_id_bnd); }else{ rcd=nco_def_dim(out_id,bnd_nm,bnd_nbr,&dmn_id_bnd); } /* !flg_grd_crv */ if(flg_grd_1D){ rcd=nco_def_dim(out_id,col_nm_out,col_nbr,&dmn_id_col); } /* !flg_grd_1D */ if(flg_grd_2D){ rcd=nco_def_dim(out_id,lat_nm_out,lat_nbr,&dmn_id_lat); rcd=nco_def_dim(out_id,lon_nm_out,lon_nbr,&dmn_id_lon); } /* !flg_grd_2D */ /* Define new coordinates and variables in regridded file */ if(flg_grd_1D){ (void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_col,&lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_col,&lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_col; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_col; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_1D,&dmn_id_col,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); } /* !flg_grd_1D */ if(flg_grd_crv){ dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_lon; (void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_2D,dmn_ids,&lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_2D,dmn_ids,&lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_2D,dmn_ids,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_lon; dmn_ids[2]=dmn_id_bnd; (void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_3D,dmn_ids,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_3D,dmn_ids,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); }else if(flg_grd_2D){ (void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_lat,&lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_lon,&lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lon; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lat_wgt_nm,crd_typ,dmn_nbr_1D,&dmn_id_lat,&lat_wgt_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_wgt_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_lon; (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_2D,dmn_ids,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); } /* !flg_grd_2D */ /* Define attributes */ rcd=nco_char_att_put(out_id,NULL,"title",rgr->grd_ttl); rcd=nco_char_att_put(out_id,NULL,"Conventions","CF-1.6"); rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ)); rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ)); rcd=nco_char_att_put(out_id,area_nm,"long_name","Solid angle subtended by gridcell"); rcd=nco_char_att_put(out_id,area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm,"units","steradian"); char *crd_val_sng; /* CF-standard coordinates values string */ size_t crd_val_sng_lng=strlen(lat_nm_out)+strlen(lon_nm_out)+1L; crd_val_sng=(char *)nco_malloc(crd_val_sng_lng*sizeof(char)+1L); (void)sprintf(crd_val_sng,"%s %s",lat_nm_out,lon_nm_out); rcd=nco_char_att_put(out_id,area_nm,"coordinates",crd_val_sng); if(crd_val_sng) crd_val_sng=(char *)nco_free(crd_val_sng); rcd=nco_char_att_put(out_id,lat_nm_out,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lat_nm_out,"standard_name","latitude"); rcd=nco_char_att_put(out_id,lat_nm_out,"units","degrees_north"); rcd=nco_char_att_put(out_id,lat_nm_out,"axis","Y"); rcd=nco_char_att_put(out_id,lat_nm_out,"bounds",lat_bnd_nm); if(flg_grd_2D) att_val=strdup("Gridcell latitude interfaces"); else att_val=strdup("Gridcell latitude vertices"); rcd=nco_char_att_put(out_id,lat_bnd_nm,"long_name",att_val); if(flg_grd_2D) rcd=nco_char_att_put(out_id,lat_wgt_nm,"long_name","Latitude quadrature weights (normalized to sum to 2.0 on global grids)"); rcd=nco_char_att_put(out_id,lon_nm_out,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lon_nm_out,"standard_name","longitude"); rcd=nco_char_att_put(out_id,lon_nm_out,"units","degrees_east"); rcd=nco_char_att_put(out_id,lon_nm_out,"axis","X"); rcd=nco_char_att_put(out_id,lon_nm_out,"bounds",lon_bnd_nm); if(flg_grd_2D) att_val=strdup("Gridcell longitude interfaces"); else att_val=strdup("Gridcell longitude vertices"); rcd=nco_char_att_put(out_id,lon_bnd_nm,"long_name",att_val); /* Begin data mode */ (void)nco_enddef(out_id); /* Write new coordinates and variables to regridded file */ if(flg_grd_1D){ dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; (void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; (void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; (void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); } /* !flg_grd_1D */ if(flg_grd_crv){ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; (void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ); (void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ); (void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); dmn_srt[0]=dmn_srt[1]=0L;dmn_srt[2]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; dmn_cnt[2]=grd_crn_nbr; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ); (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ); }else if(flg_grd_2D){ dmn_srt[0]=0L; dmn_cnt[0]=lat_nbr; (void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=lon_nbr; (void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=lat_nbr; (void)nco_put_vara(out_id,lat_wgt_id,dmn_srt,dmn_cnt,lat_wgt,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lon_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; (void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); } /* !flg_grd_2D */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); } /* !fl_out */ /* Free memory associated with input file */ if(dmn_sz_int) dmn_sz_int=(int *)nco_free(dmn_sz_int); if(msk) msk=(int *)nco_free(msk); if(area) area=(double *)nco_free(area); if(grd_ctr_lat) grd_ctr_lat=(double *)nco_free(grd_ctr_lat); if(grd_ctr_lon) grd_ctr_lon=(double *)nco_free(grd_ctr_lon); if(grd_crn_lat) grd_crn_lat=(double *)nco_free(grd_crn_lat); if(grd_crn_lon) grd_crn_lon=(double *)nco_free(grd_crn_lon); if(lat_bnd) lat_bnd=(double *)nco_free(lat_bnd); if(lat_crn) lat_crn=(double *)nco_free(lat_crn); if(lat_ctr) lat_ctr=(double *)nco_free(lat_ctr); if(lat_ntf) lat_ntf=(double *)nco_free(lat_ntf); if(lat_sin) lat_sin=(double *)nco_free(lat_sin); if(lat_wgt) lat_wgt=(double *)nco_free(lat_wgt); if(lon_bnd) lon_bnd=(double *)nco_free(lon_bnd); if(lon_crn) lon_crn=(double *)nco_free(lon_crn); if(lon_ctr) lon_ctr=(double *)nco_free(lon_ctr); if(lon_ntf) lon_ntf=(double *)nco_free(lon_ntf); if(wgt_Gss) wgt_Gss=(double *)nco_free(wgt_Gss); return rcd; } /* !nco_grd_mk() */ int /* O [enm] Return code */ nco_grd_nfr /* [fnc] Infer SCRIP-format grid file from input data file */ (rgr_sct * const rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Use grid information and guesswork to create SCRIP-format grid file from input data file Test curvilinear grids: ncks -O -D 1 --rgr infer --rgr grid=${DATA}/sld/rgr/grd_airs.nc ${DATA}/sld/raw/AIRS.2014.10.01.202.L2.TSurfStd.Regrid010.1DLatLon.nc ~/foo.nc ncks -O -D 1 --rgr infer --rgr grid=${DATA}/sld/rgr/grd_airs.nc ${DATA}/sld/raw/AIRS.2014.10.01.202.L2.TSurfStd.Regrid010.1DLatLon.hole.nc ~/foo.nc */ const char fnc_nm[]="nco_grd_nfr()"; /* [sng] Function name */ const double rdn2dgr=180.0/M_PI; const double dgr2rdn=M_PI/180.0; const int dmn_nbr_0D=0; /* [nbr] Rank of 0-D grid variables */ const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ const int dmn_nbr_grd_max=4; /* [nbr] Maximum rank of grid variables (msk_[src/dst] could be rank 4) */ const int itr_nbr_max=20; // [nbr] Maximum number of iterations const int idx_ccw=0; /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */ const int rcr_lvl=1; /* [nbr] Recursion level (1 is top level, 2 and greater are recursed */ const nc_type crd_typ=NC_DOUBLE; char *area_nm_in=NULL; char *fl_in; char *fl_out; char *fl_out_tmp=NULL_CEWI; char *fl_pth_lcl=NULL; char *msk_nm_in=NULL; char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */ /* SCRIP-format grid names are non-negotiable and thus fixed not dynamic */ char area_nm[]="grid_area"; /* 20150830: NB ESMF_RegridWeightGen --user_areas looks for variable named "grid_area" */ char dmn_sz_nm[]="grid_dims"; char grd_crn_lat_nm[]="grid_corner_lat"; char grd_crn_lon_nm[]="grid_corner_lon"; char grd_crn_nm[]="grid_corners"; char grd_ctr_lat_nm[]="grid_center_lat"; char grd_ctr_lon_nm[]="grid_center_lon"; char grd_rnk_nm[]="grid_rank"; char grd_sz_nm[]="grid_size"; char msk_nm[]="grid_imask"; char unt_sng[]="units"; /* netCDF-standard units attribute name */ double *grd_ctr_lat; /* [dgr] Latitude centers of grid */ double *grd_ctr_lon; /* [dgr] Longitude centers of grid */ double *grd_crn_lat; /* [dgr] Latitude corners of grid */ double *grd_crn_lon; /* [dgr] Longitude corners of grid */ double *area; /* [sr] Area of grid */ double *lat_bnd=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular grid */ double *lat_crn=NULL; /* [dgr] Latitude corners of rectangular grid */ double *lat_ctr=NULL_CEWI; /* [dgr] Latitude centers of rectangular grid */ double *lat_ntf=NULL; /* [dgr] Latitude interfaces of rectangular grid */ double *lat_wgt=NULL; /* [dgr] Latitude weights of rectangular grid */ double *lon_bnd=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular grid */ double *lon_crn=NULL; /* [dgr] Longitude corners of rectangular grid */ double *lon_ctr=NULL_CEWI; /* [dgr] Longitude centers of rectangular grid */ double *lon_ntf=NULL; /* [dgr] Longitude interfaces of rectangular grid */ double *vrt_lat=NULL; /* [rdn] MPAS latitude boundary variable latVertex */ double *vrt_lon=NULL; /* [rdn] MPAS longitude boundary variable lonVertex */ double area_ttl=0.0; /* [frc] Exact sum of area */ //double lat_nrt; /* [dgr] Latitude of northern edge of grid */ double lat_sth; /* [dgr] Latitude of southern edge of grid */ double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */ double lat_wgt_gss; /* [frc] Latitude weight estimated from interface latitudes */ // double lon_est; /* [dgr] Longitude of eastern edge of grid */ double lon_wst; /* [dgr] Longitude of western edge of grid */ double lon_ncr; /* [dgr] Longitude increment */ double lat_ncr; /* [dgr] Latitude increment */ double lon_spn; /* [dgr] Longitude span */ double lat_spn; /* [dgr] Latitude span */ double mss_val_area_dbl; double mss_val_ctr_dbl; double mss_val_msk_dbl; int *msk=NULL; /* [flg] Mask of grid */ int *vrt_cll=NULL; /* [enm] MPAS variable verticesOnCell */ int *dmn_sz_int; /* [nbr] Array of dimension sizes of grid */ int dmn_ids[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int dmn_idx; /* [idx] Dimension index */ int fl_out_fmt=NC_FORMAT_CLASSIC; /* [enm] Output file format */ int in_id; /* I [id] Input netCDF file ID */ int md_open; /* [enm] Mode flag for nc_open() call */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int area_id=NC_MIN_INT; /* [id] Area variable ID */ int dmn_id_grd_crn; /* [id] Grid corners dimension ID */ int dmn_id_grd_rnk; /* [id] Grid rank dimension ID */ int dmn_id_grd_sz; /* [id] Grid size dimension ID */ int dmn_sz_int_id; /* [id] Grid dimension sizes ID */ int grd_crn_lat_id; /* [id] Grid corner latitudes variable ID */ int grd_crn_lon_id; /* [id] Grid corner longitudes variable ID */ int grd_ctr_lat_id; /* [id] Grid center latitudes variable ID */ int grd_ctr_lon_id; /* [id] Grid center longitudes variable ID */ int itr_cnt; /* Iteration counter */ int lat_rnk; /* [nbr] Rank of latitude coordinate */ int lon_rnk; /* [nbr] Rank of longitude coordinate */ int lat_ctr_id=NC_MIN_INT; /* [id] Latitude centers of rectangular grid variable ID */ int lon_ctr_id=NC_MIN_INT; /* [id] Longitude centers of rectangular grid variable ID */ int lat_bnd_id=NC_MIN_INT; /* [id] Latitude centers of rectangular grid variable ID */ int lon_bnd_id=NC_MIN_INT; /* [id] Longitude centers of rectangular grid variable ID */ int msk_id=NC_MIN_INT; /* [id] Mask variable ID */ int msk_rnk_nbr; /* [id] Mask rank */ int mss_val_int_out=NC_MIN_INT; /* [nbr] Value that can be non-erroneously pointed to */ int val_two=2; /* [nbr] Value that can be non-erroneously pointed to */ int val_zero=0; /* [nbr] Value that can be non-erroneously pointed to */ int var_id; /* [id] Current variable ID */ int vrt_cll_id=NC_MIN_INT; /* [id] MPAS variable verticesOnCell ID */ int vrt_lat_id=NC_MIN_INT; /* [id] MPAS latitude boundary variable latVertex ID */ int vrt_lon_id=NC_MIN_INT; /* [id] MPAS longitude boundary variable lonVertex ID */ long dmn_srt[dmn_nbr_grd_max]; long dmn_cnt[dmn_nbr_grd_max]; long bnd_idx; long bnd_nbr=NC_MIN_INT; /* [nbr] Number of bounds in gridcell */ long col_idx; long col_nbr; /* [nbr] Number of columns in grid */ long crn_idx; /* [idx] Counting index for corners */ long ttl_idx; /* [idx] Total (unrolled) counting index for grid+corners */ long dmn_sz; /* [nbr] Size of current dimension */ long grd_crn_nbr; /* [nbr] Number of corners in gridcell */ long grd_rnk_nbr=int_CEWI; /* [nbr] Number of dimensions in grid */ long grd_sz_nbr; /* [nbr] Number of gridcells in grid */ long idx2; /* [idx] Counting index for unrolled grids */ long idx; /* [idx] Counting index for unrolled grids */ long idx_crn; long idx_ctr; long idx_fst; /* [idx] Index offset */ long idx_tmp; /* [idx] Temporary index */ long lat_idx2; /* [idx] Counting index for unrolled latitude */ long lat_idx; long lat_nbr; /* [nbr] Number of latitudes in grid */ long lon_idx2; /* [idx] Counting index for unrolled longitude */ long lon_idx; long lon_nbr; /* [nbr] Number of longitudes in grid */ long vrt_idx; /* [idx] Counting index for vertices */ long vrt_nbr; /* [nbr] Number of vertices in MPAS grid */ long int idx_crn_ll; long int idx_crn_lr; long int idx_crn_ur; long int idx_crn_ul; nco_bool FL_RTR_RMT_LCN; nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=True; /* Option O */ nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_CREATE=rgr->flg_uio; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool WRT_TMP_FL=False; /* [flg] Write output to temporary file */ nco_bool flg_1D_mpas_bnd=False; /* [flg] Unstructured input grid with MPAS bounds */ nco_bool flg_1D_psd_rct_bnd=False; /* [flg] Unstructured input grid with pseudo-rectangular bounds */ nco_bool flg_ccw; /* [flg] Gridcell is CCW */ nco_bool flg_grd_1D=False; nco_bool flg_grd_2D=False; nco_bool flg_grd_crv=False; nco_bool flg_s2n=True; /* [enm] Latitude grid-direction is South-to-North */ nco_bool flg_wrt_crn=True; nco_bool flg_crn_grd_lat_lon=False; /* [flg] Curvilinear corner array ordered non-canonically as grd_nbr,lat_nbr,lon_nbr */ nco_bool use_mss_val_area=False; nco_bool has_mss_val_area=False; nco_bool has_mss_val_bnd=False; nco_bool has_mss_val_ctr=False; nco_bool has_mss_val_msk=False; nco_grd_2D_typ_enm grd_typ; /* [enm] Grid-type enum */ nco_grd_lat_typ_enm lat_typ; /* [enm] Latitude grid-type enum */ nco_grd_lon_typ_enm lon_typ; /* [enm] Longitude grid-type enum */ nco_grd_xtn_enm nco_grd_xtn=nco_grd_xtn_nil; /* [enm] Grid-extent enum */ nc_type msk_typ; ptr_unn msk_unn; size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ /* Algorithm: Read grid information from input data file (aka *_in) Close input file Once grid dimensions known, allocate output grid arrays (aka *_out) Open output file (aka grid-file) Use guesswork and standard algorithms to fill-in output arrays */ /* Duplicate (because nco_fl_mk_lcl() free()'s fl_in) */ fl_in=(char *)strdup(rgr->fl_in); /* Make sure file is on local system and is readable or die trying */ fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id); char *bnd_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as bounds */ char *col_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as column */ char *lat_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as latitude */ char *lon_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as longitude */ char *lat_nm_in=NULL_CEWI; /* [sng] Name of variable to recognize as latitude */ char *lon_nm_in=NULL_CEWI; /* [sng] Name of variable to recognize as longitude */ char *lat_bnd_nm=NULL_CEWI; /* [sng] Name of latitude boundary variable */ char *lon_bnd_nm=NULL_CEWI; /* [sng] Name of longitude boundary variable */ char *vrt_dmn_nm=NULL_CEWI; /* [sng] Name of MPAS vertices dimension nVertices */ char *vrt_cll_nm=NULL_CEWI; /* [sng] Name of MPAS variable verticesOnCell */ char *vrt_lat_nm=NULL_CEWI; /* [sng] Name of MPAS latitude boundary variable latVertex */ char *vrt_lon_nm=NULL_CEWI; /* [sng] Name of MPAS longitude boundary variable lonVertex */ int dmn_id_bnd=NC_MIN_INT; /* [id] Dimension ID for spatial bounds */ int dmn_id_col=NC_MIN_INT; /* [id] Dimension ID for unstructured grids */ int dmn_id_lat=NC_MIN_INT; /* [id] Dimension ID for latitude */ int dmn_id_lon=NC_MIN_INT; /* [id] Dimension ID for longitude */ int dmn_id_vrt=NC_MIN_INT; /* [id] Dimension ID for MPAS vertices */ /* Begin CF-coordinates block */ cf_crd_sct *cf=NULL; char *rgr_var; /* [sng] Variable for special regridding treatment */ nco_bool flg_cf=False; /* [flg] Follow CF Coordinates convention to find and infer grid */ rgr_var=rgr->var_nm; if(rgr_var){ /* Infer grid from special variable Intended to be variable that has both horizontal dimensions and "coordinates" attribute, e.g., ncks --cdl -m ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc | grep coordinates 4LFTX_221_SPDY_S113:coordinates = "gridlat_221 gridlon_221" ; Usage: ncks -O -D 3 --rgr infer --rgr_var=4LFTX_221_SPDY_S113 --rgr grid=~/grd_narr.nc ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc ~/foo.nc */ char crd_sng[]="coordinates"; /* CF-standard coordinates attribute name */ cf=(cf_crd_sct *)nco_malloc(sizeof(cf_crd_sct)); cf->crd=False; /* [flg] CF coordinates information is complete */ cf->crd_id[0]=NC_MIN_INT; /* [id] Coordinate ID, first */ cf->crd_id[1]=NC_MIN_INT; /* [id] Coordinate ID, second */ cf->crd_nm[0]=NULL; /* [sng] Coordinate name, first */ cf->crd_nm[1]=NULL; /* [sng] Coordinate name, second */ cf->crd_sng=NULL; /* [sng] Coordinates attribute value */ cf->dmn_id[0]=NC_MIN_INT; /* [id] Dimension ID, first */ cf->dmn_id[1]=NC_MIN_INT; /* [id] Dimension ID, second */ cf->dmn_nm[0]=NULL; /* [sng] Dimension name, first */ cf->dmn_nm[1]=NULL; /* [sng] Dimension name, second */ cf->unt_sng[0]=NULL; /* [sng] Units string, first coordinate */ cf->unt_sng[1]=NULL; /* [sng] Units string, second coordinate */ cf->var_id=NC_MIN_INT; /* [id] Coordinate variable ID */ cf->var_nm=NULL; /* [sng] Coordinates variable name */ cf->var_type=NC_NAT; /* [enm] Coordinates variable type */ if((rcd=nco_inq_varid_flg(in_id,rgr_var,&cf->var_id)) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports special \"coordinates\" variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd */ cf->crd_sng=nco_char_att_get(in_id,cf->var_id,crd_sng); if(cf->crd_sng){ cf->crd=True; }else{ /* !rcd && att_typ */ (void)fprintf(stderr,"%s: WARNING %s reports coordinates variable %s does not have character-valued \"coordinates\" attribute. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd && att_typ */ /* Valid coordinates attribute requires two coordinate names separated by space character */ char *crd_nm[NCO_MAX_CRD_PER_VAR]; /* [sng] Coordinate name start position */ char *crd_dpl; /* [sng] Modifiable duplicate of coordinates string */ char *spc_ptr; /* [sng] Pointer to space character (' ') */ int crd_nbr=0; /* [nbr] Number of names in coordinates attribute */ int crd_spt=0; /* [nbr] Number of "spatial-like" (that include "degree" in units) coordinates */ int crd_idx=0; /* [idx] Counter for coordinate names */ for(crd_idx=0;crd_idx<NCO_MAX_CRD_PER_VAR;crd_idx++) crd_nm[crd_idx]=NULL; crd_dpl=(char *)strdup(cf->crd_sng); /* Search for spaces starting from end of string */ while((spc_ptr=strrchr(crd_dpl,' '))){ crd_nm[crd_nbr]=spc_ptr+1L; crd_nbr++; /* NUL-terminate so next search ends here */ *spc_ptr='\0'; } /* !sbs_ptr */ /* Final coordinate name begins where coordinate string starts */ crd_nm[crd_nbr]=crd_dpl; /* Change crd_nbr from 0-based index to actual coordinate number */ crd_nbr++; if(crd_nbr < 2){ (void)fprintf(stderr,"%s: WARNING %s found only %d coordinate(s) in \"coordinates\" attribute \"%s\", at least two are required. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,crd_nbr,cf->crd_sng); goto skp_cf; } /* !crd_nbr */ /* If more than two coordinate names are present, choose first two (searching backwards from end) with "degree" in units attributes, otherwise just choose first two */ crd_idx=crd_spt=0; while(crd_spt < 2 && crd_idx < crd_nbr){ cf->crd_nm[crd_spt]=crd_nm[crd_idx]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[crd_spt],&cf->crd_id[crd_spt])) == NC_NOERR){ cf->unt_sng[crd_spt]=nco_char_att_get(in_id,cf->crd_id[crd_spt],unt_sng); if(cf->unt_sng[crd_spt]){ if(strcasestr(cf->unt_sng[crd_spt],"degree")){ /* Increment count of spatial-like coordinates... */ crd_spt++; }else{ /* ...or free() memory allocated during search */ cf->unt_sng[crd_spt]=(char *)nco_free(cf->unt_sng[crd_spt]); } /* !strcasestr() */ crd_idx++; } /* !rcd && att_typ */ } /* !rcd */ } /* !crd_spt */ /* If while()-loop above was successful, our search is over Otherwise, use first two coordinate names regardless of units, and print more diagnostics */ if(crd_spt < 2){ cf->crd_nm[0]=crd_nm[0]; cf->crd_nm[1]=crd_nm[1]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[0],&cf->crd_id[0])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0]); goto skp_cf; } /* !rcd */ if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[1],&cf->crd_id[1])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1]); goto skp_cf; } /* !rcd */ cf->unt_sng[0]=nco_char_att_get(in_id,cf->crd_id[0],unt_sng); if(cf->unt_sng[0]){ if(!strcasestr(cf->unt_sng[0],"degree")) (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->unt_sng[0]); } /* !rcd && att_typ */ cf->unt_sng[1]=nco_char_att_get(in_id,cf->crd_id[1],unt_sng); if(cf->unt_sng[1]){ if(!strcasestr(cf->unt_sng[1],"degree")) (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1],cf->unt_sng[1]); } /* !rcd && att_typ */ } /* !crd_spt */ int crd_rnk; /* [nbr] Coordinate rank */ rcd=nco_inq_varndims(in_id,cf->crd_id[0],&crd_rnk); if(crd_rnk != 2){ (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s has %i dimension(s). Skipping CF coordinates method.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],crd_rnk); goto skp_cf; } /* !crd_rnk */ rcd=nco_inq_vardimid(in_id,cf->crd_id[0],cf->dmn_id); cf->dmn_nm[0]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); cf->dmn_nm[1]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); rcd=nco_inq_dimname(in_id,cf->dmn_id[0],cf->dmn_nm[0]); rcd=nco_inq_dimname(in_id,cf->dmn_id[1],cf->dmn_nm[1]); /* "coordinates" convention does not guarantee lat, lon are specified in that order Use "units" values, if any, to determine order In absence of "units", assume order is lat, lon */ nco_bool crd0_is_lat=False; /* [flg] First coordinate is latitude */ nco_bool crd0_is_lon=False; /* [flg] First coordinate is longitude */ nco_bool crd1_is_lat=False; /* [flg] Second coordinate is latitude */ nco_bool crd1_is_lon=False; /* [flg] Second coordinate is longitude */ if(cf->unt_sng[0]){ if(!strcasecmp(cf->unt_sng[0],"degrees_north") || !strcasecmp(cf->unt_sng[0],"degree_north") || !strcasecmp(cf->unt_sng[0],"degree_N") || !strcasecmp(cf->unt_sng[0],"degrees_N") || !strcasecmp(cf->unt_sng[0],"degreeN") || !strcasecmp(cf->unt_sng[0],"degreesN")) crd0_is_lat=True; if(!strcasecmp(cf->unt_sng[0],"degrees_east") || !strcasecmp(cf->unt_sng[0],"degree_east") || !strcasecmp(cf->unt_sng[0],"degree_E") || !strcasecmp(cf->unt_sng[0],"degrees_E") || !strcasecmp(cf->unt_sng[0],"degreeE") || !strcasecmp(cf->unt_sng[0],"degreesE")) crd0_is_lon=True; } /* endif */ if(cf->unt_sng[1]){ if(!strcasecmp(cf->unt_sng[1],"degrees_north") || !strcasecmp(cf->unt_sng[1],"degree_north") || !strcasecmp(cf->unt_sng[1],"degree_N") || !strcasecmp(cf->unt_sng[1],"degrees_N") || !strcasecmp(cf->unt_sng[1],"degreeN") || !strcasecmp(cf->unt_sng[1],"degreesN")) crd1_is_lat=True; if(!strcasecmp(cf->unt_sng[1],"degrees_east") || !strcasecmp(cf->unt_sng[1],"degree_east") || !strcasecmp(cf->unt_sng[1],"degree_E") || !strcasecmp(cf->unt_sng[1],"degrees_E") || !strcasecmp(cf->unt_sng[1],"degreeE") || !strcasecmp(cf->unt_sng[1],"degreesE")) crd1_is_lon=True; } /* endif */ assert((crd0_is_lat && crd1_is_lon) || (crd0_is_lon && crd1_is_lat)); int idx_lat; int idx_lon; if(crd0_is_lat && crd1_is_lon){ idx_lat=0; idx_lon=1; }else{ idx_lat=1; idx_lon=0; } /* endif */ /* Dimensions and coordinates have been vetted. Store as primary lookup names. Dimensions are always returned in order [LRV,MRV]=[0,1] LRV is along-track direction, and MRV is across-track (at least in NASA data) Internally we label LRV as "lat" and MRV as "lon" so that code looks similar for curvilinear and rectangular grids */ dmn_id_lat=cf->dmn_id[0]; dmn_id_lon=cf->dmn_id[1]; /* Subtlety: lat_nm_in is coordinate (variable+dimension) name when specified from command-line (as in nco_grd_nfr()), dimension name when found through CF-method (as in nco_rgr_wgt()). This confusing distinction could be avoided by passing command-line dimension names through-to nco_rgr_wgt(). However, that route would require complex priorities for what to do when passing command-line coordinate names not dimension names and visa-versa. */ //lat_nm_in=strdup(cf->dmn_nm[0]); //lon_nm_in=strdup(cf->dmn_nm[1]); lat_nm_in=strdup(cf->crd_nm[idx_lat]); lon_nm_in=strdup(cf->crd_nm[idx_lon]); /* Next four lines unnecessary in nco_rgr_wgt() which only needs dimension names (it reads input coordinates from map- not data-file) */ lat_ctr_id=cf->crd_id[idx_lat]; lon_ctr_id=cf->crd_id[idx_lon]; lat_dmn_nm=strdup(cf->dmn_nm[0]); lon_dmn_nm=strdup(cf->dmn_nm[1]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s \"coordinates\" attribute \"%s\" points to coordinates %s and %s. Latitude coordinate \"%s\" has LRV (along-track) and MRV (across-track) dimensions \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,rgr_var,cf->crd_sng,cf->crd_nm[0],cf->crd_nm[1],cf->crd_nm[idx_lat],cf->dmn_nm[0],cf->dmn_nm[1]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s Coordinates %s and %s \"units\" values are \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->crd_nm[1],cf->unt_sng[0] ? cf->unt_sng[0] : "(non-existent)",cf->unt_sng[1] ? cf->unt_sng[1] : "(non-existent)"); /* Clean-up CF coordinates memory */ if(crd_dpl) crd_dpl=(char *)nco_free(crd_dpl); if(cf->crd_sng) cf->crd_sng=(char *)nco_free(cf->crd_sng); if(cf->dmn_nm[0]) cf->dmn_nm[0]=(char *)nco_free(cf->dmn_nm[0]); if(cf->dmn_nm[1]) cf->dmn_nm[1]=(char *)nco_free(cf->dmn_nm[1]); if(cf->unt_sng[0]) cf->unt_sng[0]=(char *)nco_free(cf->unt_sng[0]); if(cf->unt_sng[1]) cf->unt_sng[1]=(char *)nco_free(cf->unt_sng[1]); } /* !rgr_var */ /* goto skp_cf */ skp_cf: /* free() any abandoned cf structure now */ if(!flg_cf) if(cf) cf=(cf_crd_sct *)nco_free(cf); rcd=NC_NOERR; /* End CF-coordinates block */ /* Locate fields that must be present in input file Required variables are usually latitude and longitude Currently these variables must be in root group This fails for, e.g., OMI L2 which has coordinates /GEOLOCATION_DATA/[Latitude,Longitude] fxm: Generalize with traversal table so usual suspect coordinates may be in any group */ if(lat_ctr_id == NC_MIN_INT){ if(rgr->lat_nm_in && (rcd=nco_inq_varid_flg(in_id,rgr->lat_nm_in,&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup(rgr->lat_nm_in); else if((rcd=nco_inq_varid_flg(in_id,"latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latitude"); else if((rcd=nco_inq_varid_flg(in_id,"Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("Latitude"); /* AMSR, HIRDLS, TRMM */ else if((rcd=nco_inq_varid_flg(in_id,"lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("lat"); /* CAM */ else if((rcd=nco_inq_varid_flg(in_id,"lat_d",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("lat_d"); /* EAM dynamics grid */ else if((rcd=nco_inq_varid_flg(in_id,"Lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("Lat"); else if((rcd=nco_inq_varid_flg(in_id,"XLAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("XLAT"); /* WRF */ else if((rcd=nco_inq_varid_flg(in_id,"XLAT_M",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("XLAT_M"); /* Unknown */ else if((rcd=nco_inq_varid_flg(in_id,"LAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("LAT"); /* MAR/RACMO */ else if((rcd=nco_inq_varid_flg(in_id,"LATIXY",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("LATIXY"); /* CISM/CLM/ELM */ else if((rcd=nco_inq_varid_flg(in_id,"TLAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("TLAT"); /* CICE, POP */ else if((rcd=nco_inq_varid_flg(in_id,"ULAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("ULAT"); /* CICE, POP */ else if((rcd=nco_inq_varid_flg(in_id,"latCell",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latCell"); /* MPAS-O/I */ else if((rcd=nco_inq_varid_flg(in_id,"nav_lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("nav_lat"); /* NEMO */ else if((rcd=nco_inq_varid_flg(in_id,"rlat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("rlat"); /* RACMO */ else if((rcd=nco_inq_varid_flg(in_id,"global_latitude0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("global_latitude0"); /* Oxford */ else if((rcd=nco_inq_varid_flg(in_id,"latitude0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latitude0"); /* Oxford NB: Must search for global_* first */ else if((rcd=nco_inq_varid_flg(in_id,"CO_Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("CO_Latitude"); /* MLS */ else if((rcd=nco_inq_varid_flg(in_id,"S1_Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("S1_Latitude"); /* GPM */ else if((rcd=nco_inq_varid_flg(in_id,"yc",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("yc"); /* RTM */ else if((rcd=nco_inq_varid_flg(in_id,"south_north",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("south_north"); /* StackOverflow question https://stackoverflow.com/questions/68896581 */ else if((rcd=nco_inq_varid_flg(in_id,"gridlat_0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("gridlat_0"); /* NWS HRRR */ } /* !lat_ctr_id */ if(lon_ctr_id == NC_MIN_INT){ if(rgr->lon_nm_in && (rcd=nco_inq_varid_flg(in_id,rgr->lon_nm_in,&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup(rgr->lon_nm_in); else if((rcd=nco_inq_varid_flg(in_id,"longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("longitude"); else if((rcd=nco_inq_varid_flg(in_id,"Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("Longitude"); /* AMSR, TRMM */ else if((rcd=nco_inq_varid_flg(in_id,"lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("lon"); /* CAM */ else if((rcd=nco_inq_varid_flg(in_id,"lon_d",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("lon"); /* EAM dynamics grid */ else if((rcd=nco_inq_varid_flg(in_id,"Lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("Lon"); else if((rcd=nco_inq_varid_flg(in_id,"XLONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("XLONG"); /* WRF */ else if((rcd=nco_inq_varid_flg(in_id,"XLONG_M",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("XLONG_M"); /* Unknown */ else if((rcd=nco_inq_varid_flg(in_id,"LON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("LON"); /* MAR/RACMO */ else if((rcd=nco_inq_varid_flg(in_id,"LONGXY",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("LONGXY"); /* CISM/CLM/ELM */ else if((rcd=nco_inq_varid_flg(in_id,"TLON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("TLON"); /* CICE */ else if((rcd=nco_inq_varid_flg(in_id,"TLONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("TLONG"); /* POP */ else if((rcd=nco_inq_varid_flg(in_id,"ULON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("ULON"); /* CICE */ else if((rcd=nco_inq_varid_flg(in_id,"ULONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("ULONG"); /* POP */ else if((rcd=nco_inq_varid_flg(in_id,"lonCell",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("lonCell"); /* MPAS-O/I */ else if((rcd=nco_inq_varid_flg(in_id,"nav_lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("nav_lon"); /* NEMO */ else if((rcd=nco_inq_varid_flg(in_id,"rlon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("rlon"); /* RACMO */ else if((rcd=nco_inq_varid_flg(in_id,"global_longitude0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("global_longitude0"); /* Oxford NB: Must search for global_* first */ else if((rcd=nco_inq_varid_flg(in_id,"longitude0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("longitude0"); /* Oxford */ else if((rcd=nco_inq_varid_flg(in_id,"CO_Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("CO_Longitude"); /* MLS */ else if((rcd=nco_inq_varid_flg(in_id,"S1_Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("S1_Longitude"); /* GPM */ else if((rcd=nco_inq_varid_flg(in_id,"xc",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("xc"); /* RTM */ else if((rcd=nco_inq_varid_flg(in_id,"west_east",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("west_east"); /* StackOverflow question https://stackoverflow.com/questions/68896581 */ else if((rcd=nco_inq_varid_flg(in_id,"gridlon_0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("gridlon_0"); /* NWS HRRR */ } /* !lon_ctr_id */ if(!lat_nm_in || !lon_nm_in){ (void)fprintf(stdout,"%s: ERROR %s unable to identify latitude and/or longitude variable.\nHINT: Potential causes and workarounds for this include: 1. Coordinate variables must be in the root directory (not in a group). If this might be the problem, try to \"flatten\" the input file before regridding it (see http://nco.sf.net/nco.html#flatten). 2. Horizontal dimensions with \"unusual\" names are hard to identify unless the user designates them somehow. ncremap will search for horizontal dimensions named in the \"coordinates\" attribute in a template variable specified with the \"-V rgr_var\" option. 3. NCO will also search its own internal database for likely names of horizontal coordinate variables (lat, latitude, LAT, XLAT, etc.). Contact the NCO project to have your idiosyncratic coordinate names added to the internal database.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !lat_nm_in */ /* Rank of coordinates determines whether grid is curvilinear */ rcd+=nco_inq_varndims(in_id,lat_ctr_id,&lat_rnk); rcd+=nco_inq_varndims(in_id,lon_ctr_id,&lon_rnk); /* If lat_ctr and lon_ctr share same and only dimension then grid is unstructured */ if(lat_rnk*lon_rnk == 1){ rcd+=nco_inq_vardimid(in_id,lat_ctr_id,&dmn_id_lat); rcd+=nco_inq_vardimid(in_id,lon_ctr_id,&dmn_id_lon); if(dmn_id_lat == dmn_id_lon){ dmn_id_col=dmn_id_lat; dmn_id_lat=NC_MIN_INT; dmn_id_lon=NC_MIN_INT; rcd+=nco_inq_dimname(in_id,dmn_id_col,dmn_nm); col_dmn_nm=(char *)strdup(dmn_nm); flg_grd_1D=True; } /* !unstructured */ } /* lat_rnk == lon_rnk == 1 */ if(lat_rnk*lon_rnk == 1 && dmn_id_lat != NC_MIN_INT && dmn_id_lon != NC_MIN_INT){ flg_grd_crv=False; flg_grd_2D=True; } /* !lat_rnk */ if(lat_rnk == dmn_nbr_2D || lon_rnk == dmn_nbr_2D){ flg_grd_crv=True; flg_grd_2D=False; } /* !lat_rnk */ if(lat_rnk > dmn_nbr_2D || lon_rnk > dmn_nbr_2D){ (void)fprintf(stdout,"%s: ERROR %s reports an identified grid variable (%s with rank %d and/or %s with rank %d) has rank greater than two---grid variables currently must have rank 1 or 2.\nHINT: If grid variables do not vary in time, then temporally average them (with, e.g., ncwa -a time in.nc out.nc) prior to inferring grid\n",nco_prg_nm_get(),fnc_nm,lat_nm_in,lat_rnk,lon_nm_in,lon_rnk); nco_exit(EXIT_FAILURE); } /* !3D */ if(lat_rnk*lon_rnk != 1 && lat_rnk*lon_rnk != 4) assert(False); /* Scrutinize coordinates for their dimensions NB: Unstructured already known */ if(flg_grd_2D){ rcd+=nco_inq_dimname(in_id,dmn_id_lat,dmn_nm); lat_dmn_nm=(char *)strdup(dmn_nm); rcd+=nco_inq_dimname(in_id,dmn_id_lon,dmn_nm); lon_dmn_nm=(char *)strdup(dmn_nm); } /* !flg_grd_2D */ if(flg_grd_crv){ rcd+=nco_inq_vardimid(in_id,lat_ctr_id,dmn_ids); /* fxm: use cf struct and match with units name, if any? normally curvilinear grid dimensions are just pixel dimensions that are not aligned north-south or east-west */ dmn_id_lat=dmn_ids[0]; dmn_id_lon=dmn_ids[1]; rcd+=nco_inq_dimname(in_id,dmn_id_lat,dmn_nm); lat_dmn_nm=(char *)strdup(dmn_nm); rcd+=nco_inq_dimname(in_id,dmn_id_lon,dmn_nm); lon_dmn_nm=(char *)strdup(dmn_nm); } /* !flg_grd_crv */ if(!(lat_dmn_nm && lon_dmn_nm) && !col_dmn_nm){ (void)fprintf(stdout,"%s: ERROR %s unable to identify latitude and/or longitude dimension and/or column dimension.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !col_dmn_nm !lat_dmn_nm !lon_dmn_nm */ /* Locate spatial dimensions that may be present NB: bounds dimensions may present a special problem CAM-FV and CAM-SE use nbnd for temporal bounds and have no spatial bounds dimension CAM3 uses tbnd for temporal bounds and has no spatial bounds dimension CICE and POP use d2 for temporal bounds, and CICE uses nvertices for spatial bounds while POP uses nothing Hence search for nvertices before nbnd to ensure spatial bound is found first */ if((rcd=nco_inq_dimid_flg(in_id,"nv",&dmn_id_bnd)) == NC_NOERR) bnd_dmn_nm=strdup("nv"); /* fxm */ else if((rcd=nco_inq_dimid_flg(in_id,"nvertices",&dmn_id_bnd)) == NC_NOERR) bnd_dmn_nm=strdup("nvertices"); /* CICE */ else if((rcd=nco_inq_dimid_flg(in_id,"maxEdges",&dmn_id_bnd)) == NC_NOERR) bnd_dmn_nm=strdup("maxEdges"); /* MPAS */ if((rcd=nco_inq_dimid_flg(in_id,"nVertices",&dmn_id_vrt)) == NC_NOERR) vrt_dmn_nm=strdup("nVertices"); /* MPAS */ /* Use dimension IDs to get dimension sizes and grid size */ if(flg_grd_1D){ rcd+=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr); lat_nbr=lon_nbr=col_nbr; }else{ rcd+=nco_inq_dimlen(in_id,dmn_id_lat,&lat_nbr); rcd+=nco_inq_dimlen(in_id,dmn_id_lon,&lon_nbr); col_nbr=NC_MIN_INT; } /* !flg_grd_1D */ if(dmn_id_bnd != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_bnd,&grd_crn_nbr); if(dmn_id_bnd != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_bnd,&bnd_nbr); if(dmn_id_vrt != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_vrt,&vrt_nbr); if(flg_grd_1D){ /* Unstructured grid (e.g., CAM-SE) */ grd_rnk_nbr=dmn_nbr_1D; grd_typ=nco_grd_2D_unk; lat_typ=nco_grd_lat_unk; lon_typ=nco_grd_lon_unk; /* 1D grids without their own boundaries are at the mercy of the weight generator */ if(dmn_id_bnd == NC_MIN_INT){ (void)fprintf(stdout,"%s: WARNING %s reports an unstructured grid without spatial boundary information. NCO can copy but not infer spatial boundaries from unstructured grids. Thus NCO will not write spatial bounds to the gridfile inferred from this input file. Instead, the weight generator that ingests this gridfile must generate weights for gridcells with unknown spatial extent. This is feasible for grids and mappings where weights masquerade as areas and are determined by underlying grid and interpolation type (e.g., bilinear remapping of spectral element grid). Unfortunately, the ESMF_RegridWeightGen (ERWG) program requires cell interfaces in both grid files, so ERWG will break on this gridfile. Other weight generators such as TempestRemap may be more successful with this SCRIP file.\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT Re-run the regridder, this time adding the \"-s src_grd\" option to specify the source grid file in SCRIP format. That SCRIP file will have the spatial bounds information required by the ESMF_RegridWeightGen (ERWG) program, so that the regridder will circumvent inferring the underlying grid through its black but fragile magic.\n",nco_prg_nm_get()); flg_wrt_crn=False; /* Input could actually be from grid with no polygonal definition, e.g., CAM-SE Corner number is non-deterministic since, e.g., CAM-SE dual grid can be fit to quadrilaterals, pentagons, chevrons, etc. Bounds will not be diagnosed so safe to set grd_crn_nbr to harmless (though weird) value like 4 However, ERWG requires presence of valid corner dimension "grid_corners" and arrays in input SCRIP file So ERWG will break when reading this SCRIP file regardless of whether it contains arrays (with bogus values) By default do not write grid corner values */ grd_crn_nbr=4; } /* !dmn_id_bnd */ if(bnd_nbr == 2){ /* Unstructured grids with bounds information (e.g., OCO2) may use a pseudo-rectangular convention of archiving latitude and longitude bounds as 2xN (rather than 4XN) arrays even though cell have four corners. "convention" is that two latitudes and two longitudes can specify rectangular boundary cell In this case, bnd_nbr=grd_crn_nbr=2=sizeof(nv)=sizeof(nvertices) currently Set number of corners to rectangular and leave bnd_nbr as is */ grd_crn_nbr=4; flg_1D_psd_rct_bnd=True; } /* !bnd_nbr */ if(!strcmp(bnd_dmn_nm,"maxEdges")){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Unstructured grid has dimension \"%s\" which indicates an MPAS grid. Will attempt to locate other MPAS information (dimension nVertices and variables verticesOnCell, lonVertex, and latVertex) to construct SCRIP-compliant bounds variables...\n",nco_prg_nm_get(),bnd_dmn_nm); if((rcd=nco_inq_varid_flg(in_id,"verticesOnCell",&vrt_cll_id)) == NC_NOERR) vrt_cll_nm=strdup("verticesOnCell"); if((rcd=nco_inq_varid_flg(in_id,"lonVertex",&vrt_lon_id)) == NC_NOERR) vrt_lon_nm=strdup("lonVertex"); if((rcd=nco_inq_varid_flg(in_id,"latVertex",&vrt_lat_id)) == NC_NOERR) vrt_lat_nm=strdup("latVertex"); if(dmn_id_vrt != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_vrt,&vrt_nbr); if(vrt_dmn_nm && vrt_cll_nm && vrt_lon_nm && vrt_lat_nm){ flg_1D_mpas_bnd=True; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Found all MPAS information needed to construct SCRIP-compliant bounds variables.\n",nco_prg_nm_get()); }else{ (void)fprintf(stdout,"%s: INFO Unable to find all MPAS information needed to construct SCRIP-compliant bounds variables. Will not write bounds coordinates. This will degrade usefulness of SCRIP file for regridding schemes (e.g., conservative) that require cell boundaries.\n",nco_prg_nm_get()); (void)fprintf(stdout,"%s: HINT Often MPAS restart files contain the required bounds variables (verticesOnCell, lonVertex, latVertex) that normal MPAS data files lack. Try inferring the SCRIP grid from a restart file not a normal time-varying output dataset.\n",nco_prg_nm_get()); flg_wrt_crn=False; } /* !vrt_cll_nm */ } /* !bnd_dmn_nm */ }else if(flg_grd_2D){ /* !flg_grd_1D */ /* Assume 2D grid of uninitialized type */ grd_rnk_nbr=dmn_nbr_2D; grd_typ=nco_grd_2D_nil; lat_typ=nco_grd_lat_nil; lon_typ=nco_grd_lon_nil; /* Assume rectangular grids that do not specify otherwise use quadrilaterals */ if(dmn_id_bnd == NC_MIN_INT) grd_crn_nbr=4; /* Sometimes we infer from a 2D grid, like those produced by nco_grd_mk(), that has bounds with nv=2 This signals rectangular gridcell bounds are interfaces not vertices (to save half the space) These rectangles really have four corners so we change grd_crn_nbr (not bnd_nbr) accordingly */ if(grd_crn_nbr == 2) grd_crn_nbr=4; /* Convention is to archive only two bounds for rectangular grids (since sides are identical) Non-quadrilateral rectangular grids are untested */ if(grd_crn_nbr == 4) bnd_nbr=2; }else if(flg_grd_crv){ /* !flg_grd_2D */ /* Assume curvilinear grid (e.g., WRF) */ flg_grd_2D=False; grd_rnk_nbr=dmn_nbr_2D; grd_typ=nco_grd_2D_unk; lat_typ=nco_grd_lat_unk; lon_typ=nco_grd_lon_unk; /* Assume curvilinear grids that do not specify otherwise use quadrilaterals */ if(dmn_id_bnd == NC_MIN_INT) grd_crn_nbr=4; /* Assume quadrilaterals are, well, quadrilaterals (e.g., rhomboids) not necessarily rectangles Non-quadrilateral curvilinear grids are untested */ if(grd_crn_nbr == 4) bnd_nbr=4; else assert(False); } /* !flg_grd_crv */ /* Allocate space for output data */ if(flg_grd_1D) grd_sz_nbr=col_nbr; else grd_sz_nbr=lat_nbr*lon_nbr; dmn_sz_int=(int *)nco_malloc(grd_rnk_nbr*nco_typ_lng((nc_type)NC_INT)); area=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); msk=(int *)nco_malloc(grd_sz_nbr*nco_typ_lng((nc_type)NC_INT)); if(flg_grd_1D){ if(bnd_nbr != NC_MIN_INT) lat_bnd=(double *)nco_malloc(grd_sz_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); if(bnd_nbr != NC_MIN_INT) lon_bnd=(double *)nco_malloc(grd_sz_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); }else if(flg_grd_2D){ /* !flg_grd_1D */ lat_bnd=(double *)nco_malloc(lat_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(lat_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lon_bnd=(double *)nco_malloc(lon_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(lon_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(lon_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); }else if(flg_grd_crv){ /* !flg_grd_2D */ lat_bnd=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lon_bnd=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); } /* !flg_grd_crv */ grd_ctr_lat=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_ctr_lon=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lat=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lon=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); /* Locate fields that may be present in input file */ if((rcd=nco_inq_varid_flg(in_id,"lat_bnds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_bnds"); else if((rcd=nco_inq_varid_flg(in_id,"latt_bounds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latt_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"latu_bounds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latu_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"lat_ntf",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_ntf"); else if((rcd=nco_inq_varid_flg(in_id,"lat_vertices",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_vertices"); else if((rcd=nco_inq_varid_flg(in_id,"latitude_bnds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latitude_bnds"); /* OCO2 */ else if((rcd=nco_inq_varid_flg(in_id,"LatitudeCornerpoints",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("LatitudeCornerpoints"); /* OMI */ if((rcd=nco_inq_varid_flg(in_id,"lon_bnds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_bnds"); else if((rcd=nco_inq_varid_flg(in_id,"lont_bounds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lont_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"lonu_bounds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lonu_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"lon_ntf",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_ntf"); else if((rcd=nco_inq_varid_flg(in_id,"lon_vertices",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_vertices"); else if((rcd=nco_inq_varid_flg(in_id,"longitude_bnds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("longitude_bnds"); /* OCO2 */ else if((rcd=nco_inq_varid_flg(in_id,"LongitudeCornerpoints",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("LongitudeCornerpoints"); /* OMI */ if((rcd=nco_inq_varid_flg(in_id,"area",&area_id)) == NC_NOERR) area_nm_in=strdup("area"); else if((rcd=nco_inq_varid_flg(in_id,"Area",&area_id)) == NC_NOERR) area_nm_in=strdup("Area"); else if((rcd=nco_inq_varid_flg(in_id,"areaCell",&area_id)) == NC_NOERR) area_nm_in=strdup("areaCell"); /* MPAS-O/I */ else if((rcd=nco_inq_varid_flg(in_id,"grid_area",&area_id)) == NC_NOERR) area_nm_in=strdup("grid_area"); else if((rcd=nco_inq_varid_flg(in_id,"area_d",&area_id)) == NC_NOERR) area_nm_in=strdup("area_d"); /* EAM dynamics grid */ else if((rcd=nco_inq_varid_flg(in_id,"area_p",&area_id)) == NC_NOERR) area_nm_in=strdup("area_p"); /* EAM physics grid */ // else if((rcd=nco_inq_varid_flg(in_id,"aice",&area_id)) == NC_NOERR) area_nm_in=strdup("aice"); /* CICE time-dependent ice area (3D), not total gridcell area */ else if((rcd=nco_inq_varid_flg(in_id,"tarea",&area_id)) == NC_NOERR) area_nm_in=strdup("tarea"); /* CICE time-invariant state-variable gridcell area (2D) */ else if((rcd=nco_inq_varid_flg(in_id,"uarea",&area_id)) == NC_NOERR) area_nm_in=strdup("uarea"); /* CICE time-invariant dynamics variables (2D) */ msk_nm_in=rgr->msk_var; if(msk_nm_in){ if(!strcasecmp(msk_nm_in,"none")){ /* 20170814: Some variables named "*mask*" are, e.g., quality control masks not regridding masks per se */ msk_nm_in=(char *)nco_free(msk_nm_in); }else{ /* User-supplied name overrides database */ rcd=nco_inq_varid(in_id,msk_nm_in,&msk_id); } /* !msk_nm_in */ }else{ /* Otherwise search database */ if((rcd=nco_inq_varid_flg(in_id,"mask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("mask"); else if((rcd=nco_inq_varid_flg(in_id,"Mask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("Mask"); else if((rcd=nco_inq_varid_flg(in_id,"mask_b",&msk_id)) == NC_NOERR) msk_nm_in=strdup("mask_b"); else if((rcd=nco_inq_varid_flg(in_id,"grid_imask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("grid_imask"); else if((rcd=nco_inq_varid_flg(in_id,"landmask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("landmask"); /* ALM/CLM */ else if((rcd=nco_inq_varid_flg(in_id,"tmask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("tmask"); /* CICE */ } /* !msk_nm_in */ /* Mask field requires special handling for non-conformant models */ if(msk_id != NC_MIN_INT){ /* 20151201: All models tested define mask as NC_INT except CICE which uses NC_FLOAT 20160111: Few observations tested define mask. Exceptions include AMSR and GHRSST. AMSR uses NC_SHORT to store bitmasks. Bitmask is 1 for missing data, and up to 128 for various quality levels of valid data. Hence, almost better to ignore AMSR mask variable. GHRSST uses NC_BYTE for its 3D "mask" bit-mask of surface-type values 1,2,4,8,16. */ rcd=nco_inq_varndims(in_id,msk_id,&msk_rnk_nbr); if(msk_rnk_nbr != grd_rnk_nbr && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports input mask variable \"%s\" is rank %d while grid is rank %ld so will use first timestep/layer to determine output mask\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,msk_rnk_nbr,grd_rnk_nbr); rcd=nco_inq_vartype(in_id,msk_id,&msk_typ); msk_unn.vp=(void *)nco_malloc(grd_sz_nbr*nco_typ_lng(msk_typ)); } /* !msk */ /* All grids: Some real-world datasets violate convention that coordinates ought never have missing values CICE lists missing value for lat/lon_ctr arrays (TLAT, TLONG) and re-uses that for bounds arrays (latt_bounds, lont_bounds) that do not bother to have their own missing value attributes Without counter-example, assume has_mss_val_bnd=has_mss_val_ctr and mss_val_bnd_dbl=mss_val_ctr_dbl */ has_mss_val_bnd=has_mss_val_ctr=nco_mss_val_get_dbl(in_id,lat_ctr_id,&mss_val_ctr_dbl); char *att_val; char *area_unt=NULL; /* [sng] Dimensional units used in area */ char *ngl_unt=NULL; /* [sng] Angular units used in coordinates */ long att_sz; nc_type att_typ; nco_bool flg_area_sr=True; /* [flg] Input area is in sterradians not something weird like km2 */ nco_bool flg_crd_rdn=False; /* [flg] Input coordinates are in radians not degrees */ if(flg_grd_1D){ /* Obtain fields that must be present in unstructured input file */ dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); /* Obtain fields that may be present in unstructured input file */ if(area_id != NC_MIN_INT) rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); if(msk_id != NC_MIN_INT){ if(msk_rnk_nbr > grd_rnk_nbr){ /* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */ for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=1L; } /* !dmn_idx */ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=col_nbr; } /* !msk_rnk_nbr */ rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ); } /* !msk_id */ dmn_srt[0]=dmn_srt[1]=0L; if(flg_1D_psd_rct_bnd){ dmn_cnt[0]=col_nbr; dmn_cnt[1]=bnd_nbr; if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); }else if(flg_1D_mpas_bnd){ const long grd_crn_nbrm1=grd_crn_nbr-1L; /* [nbr] Number of corners in gridcell minus one */ vrt_cll=(int *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng((nc_type)NC_INT)); vrt_lat=(double *)nco_malloc(vrt_nbr*nco_typ_lng(crd_typ)); vrt_lon=(double *)nco_malloc(vrt_nbr*nco_typ_lng(crd_typ)); dmn_cnt[0]=col_nbr; dmn_cnt[1]=grd_crn_nbr; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports dimension sizes bnd_nbr=%ld, col_nbr=%ld, grd_crn_nbr=%ld, vrt_nbr=%ld\n",nco_prg_nm_get(),fnc_nm,bnd_nbr,col_nbr,grd_crn_nbr,vrt_nbr); if(vrt_cll_id != NC_MIN_INT) rcd=nco_get_vara(in_id,vrt_cll_id,dmn_srt,dmn_cnt,vrt_cll,(nc_type)NC_INT); dmn_cnt[0]=vrt_nbr; if(vrt_lat_id != NC_MIN_INT) rcd=nco_get_vara(in_id,vrt_lat_id,dmn_srt,dmn_cnt,vrt_lat,crd_typ); if(vrt_lon_id != NC_MIN_INT) rcd=nco_get_vara(in_id,vrt_lon_id,dmn_srt,dmn_cnt,vrt_lon,crd_typ); rcd=nco_inq_att_flg(in_id,vrt_lat_id,unt_sng,&att_typ,&att_sz); if(rcd == NC_NOERR && att_typ == NC_CHAR){ att_val=(char *)nco_malloc((att_sz+1L)*nco_typ_lng(att_typ)); rcd+=nco_get_att(in_id,vrt_lat_id,unt_sng,att_val,att_typ); /* NUL-terminate attribute before using strstr() */ att_val[att_sz]='\0'; /* Match "radian" and "radians" */ if(strstr(att_val,"radian")) flg_crd_rdn=True; if(att_val) ngl_unt=(char *)strdup(att_val); if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ for(col_idx=0;col_idx<col_nbr;col_idx++){ idx=col_idx*grd_crn_nbr; for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ ttl_idx=idx+crn_idx; vrt_idx=vrt_cll[ttl_idx]; assert(vrt_idx >= 0); //if(vrt_idx >= vrt_nbr) (void)fprintf(stdout,"%s: WARNING %s input gridcell %ld corner %ld has extreme MPAS input verticesOnCell value %ld (maximum valid vertex = vrt_nbr-1 = %ld-1 = %ld)\n",nco_prg_nm_get(),fnc_nm,col_idx,crn_idx,vrt_idx,vrt_nbr,vrt_nbr-1); if(vrt_idx == 0){ /* 20201220: Convert values of zero to neighboring valid vertex index */ for(idx_fst=1;idx_fst<grd_crn_nbr;idx_fst++){ idx_tmp=crn_idx+idx_fst; /* Wrap to initial corner of this cell when candidate corner would be in next cell */ if(idx_tmp > grd_crn_nbrm1) idx_tmp-=grd_crn_nbr; ttl_idx=idx+idx_tmp; vrt_idx=vrt_cll[ttl_idx]; if(vrt_idx != 0) break; } /* !idx_fst */ assert(idx_fst < grd_crn_nbr); } /* !vrt_idx */ /* 20201220: Stored vertex indices use Fortran-based convention---subtract one for C */ vrt_idx--; lat_crn[ttl_idx]=vrt_lat[vrt_idx]; lon_crn[ttl_idx]=vrt_lon[vrt_idx]; //(void)fprintf(stdout,"%s: DEBUG %s reports col_idx = %ld, crn_idx = %ld, ttl_idx = %ld, vrt_idx = %ld, vrt_lat = %g, vrt_lon = %g\n",nco_prg_nm_get(),fnc_nm,col_idx,crn_idx,ttl_idx,vrt_idx,vrt_lat[vrt_idx],vrt_lon[vrt_idx]); } /* !crn_idx */ } /* !col_idx */ }else{ /* !flg_1D_mpas_bnd */ dmn_cnt[0]=col_nbr; dmn_cnt[1]=grd_crn_nbr; if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_crn,crd_typ); if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_crn,crd_typ); } /* !flg_1D_psd_rct_bnd */ } /* !flg_grd_1D */ if(flg_grd_crv){ /* Obtain fields that must be present in curvilinear input file */ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); /* 20150923: Also input, if present in curvilinear file, corners, area, and mask area and mask are same size as lat and lon */ if(area_id != NC_MIN_INT) rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); if(msk_id != NC_MIN_INT){ if(msk_rnk_nbr > grd_rnk_nbr){ /* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */ for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=1L; } /* !dmn_idx */ dmn_srt[dmn_idx]=dmn_srt[dmn_idx+1]=0L; dmn_cnt[dmn_idx]=lat_nbr; dmn_cnt[dmn_idx+1]=lon_nbr; } /* !msk_rnk_nbr */ rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ); } /* !msk_id */ /* Corners are on curvilinear corner grid Rectangular boundaries (i.e., lat_bnd=[lat_nbr,2]) DNE for curvilinear grids Read-in *_crn arrays in curvilinear grids, and *_bnd arrays for rectilinear grids Rank-ordering of corner arrays is usually lat_nbr,lon_nbr,grd_crn_nbr as produced/expected by SCRIP However some datasets, e.g., OMI DOMINO use grd_crn_nbr,lat_nbr,lon_nbr Sigh... */ dmn_srt[0]=dmn_srt[1]=dmn_srt[2]=0L; if(lat_bnd_id != NC_MIN_INT && lon_bnd_id != NC_MIN_INT){ rcd=nco_inq_vardimid(in_id,lat_bnd_id,dmn_ids); if((dmn_ids[0] == dmn_id_lat && dmn_ids[1] == dmn_id_lon) || (dmn_ids[0] == dmn_id_lon && dmn_ids[1] == dmn_id_lat)){ dmn_id_bnd=dmn_ids[2]; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; dmn_cnt[2]=grd_crn_nbr; }else if((dmn_ids[1] == dmn_id_lat && dmn_ids[2] == dmn_id_lon) || (dmn_ids[1] == dmn_id_lon && dmn_ids[2] == dmn_id_lat)){ dmn_id_bnd=dmn_ids[0]; dmn_cnt[0]=grd_crn_nbr; dmn_cnt[1]=lat_nbr; dmn_cnt[2]=lon_nbr; flg_crn_grd_lat_lon=True; }else{ (void)fprintf(stdout,"%s: WARNING %s confused by dimension-ordering of latitude bounds variable \"%s\". Will ignore this bounds variable and attempt to extrapolate vertices from centers internally...\n",nco_prg_nm_get(),fnc_nm,lat_nm_in); lat_bnd_id=NC_MIN_INT; lon_bnd_id=NC_MIN_INT; } /* !dmn_ids */ rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_crn,crd_typ); rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_crn,crd_typ); if(flg_crn_grd_lat_lon){ /* Permute corner arrays from non-canonical (grd_nbr,lat_nbr,lon_nbr) to canonical (lat_nbr,lon_nbr,grd_nbr) order */ double *lat_crn_tmp=NULL; double *lon_crn_tmp=NULL; lat_crn_tmp=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_crn_tmp=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); memcpy(lat_crn_tmp,lat_crn,grd_sz_nbr*grd_crn_nbr*sizeof(double)); memcpy(lon_crn_tmp,lon_crn,grd_sz_nbr*grd_crn_nbr*sizeof(double)); for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ for(idx=0;idx<grd_sz_nbr;idx++){ lat_idx=idx/lon_nbr; lon_idx=idx%lon_nbr; /* NB: Variables differ (lat vs. lon) but indexes are identical in next two lines */ lat_crn[lat_idx*lon_nbr*grd_crn_nbr+lon_idx*grd_crn_nbr+crn_idx]=lat_crn_tmp[crn_idx*grd_sz_nbr+idx]; lon_crn[lat_idx*lon_nbr*grd_crn_nbr+lon_idx*grd_crn_nbr+crn_idx]=lon_crn_tmp[crn_idx*grd_sz_nbr+idx]; } /* !idx */ } /* !crn_idx */ if(lat_crn_tmp) lat_crn_tmp=(double *)nco_free(lat_crn_tmp); if(lon_crn_tmp) lon_crn_tmp=(double *)nco_free(lon_crn_tmp); /* In this code branch, thought to be executed only for OMI DOMINO grids, re-compute grid center arrays (known to contain missing values) as centroids of supplied grid corners */ for(idx=0;idx<grd_sz_nbr;idx++){ lat_idx=idx/lon_nbr; lon_idx=idx%lon_nbr; lat_ctr[idx]=0.25*(lat_crn[idx*grd_crn_nbr+0L]+lat_crn[idx*grd_crn_nbr+1L]+lat_crn[idx*grd_crn_nbr+2L]+lat_crn[idx*grd_crn_nbr+3L]); lon_ctr[idx]=nco_lon_crn_avg_brnch(lon_crn[idx*grd_crn_nbr+0L],lon_crn[idx*grd_crn_nbr+1L],lon_crn[idx*grd_crn_nbr+2L],lon_crn[idx*grd_crn_nbr+3L]); } /* !idx */ } /* !flg_crd_grd_lat_lon */ } /* !lat_bnd_id */ } /* !flg_grd_crv */ if(flg_grd_2D){ int lon_psn_in=1L; /* [idx] Ordinal position of longitude dimension in rectangular grid variables like area */ int lat_psn_in=0L; /* [idx] Ordinal position of latitude dimension in rectangular grid variables like area */ int tpl_id=NC_MIN_INT; /* [id] ID of template field */ /* Obtain fields that must be present in input file */ dmn_srt[0L]=0L; dmn_cnt[0L]=lat_nbr; rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); dmn_srt[0L]=0L; dmn_cnt[0L]=lon_nbr; rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); if(lat_ctr[1L] < lat_ctr[0L]) flg_s2n=False; /* Use fields that may be present in input file to override, if necessary, default lon/lat order area and mask are both suitable templates for determining input lat/lon ordering NB: Algorithm assumes area is same rank as grid, and falls-back to mask if that has same rank as grid */ if(area_id != NC_MIN_INT) tpl_id=area_id; else if(msk_id != NC_MIN_INT && msk_rnk_nbr == grd_rnk_nbr) tpl_id=msk_id; if(tpl_id != NC_MIN_INT){ int tpl_rnk_nbr; var_id=tpl_id; /* NB: Template variable rank may exceed two with --msk_[src/dst] (e.g., SST(time,lat,lon)) */ rcd=nco_inq_varndims(in_id,var_id,&tpl_rnk_nbr); rcd=nco_inq_vardimid(in_id,var_id,dmn_ids); /* fxm: Optimize discovery of lat/lon ordering */ for(dmn_idx=0;dmn_idx<grd_rnk_nbr;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_ids[dmn_idx],dmn_nm); rcd+=nco_inq_dimlen(in_id,dmn_ids[dmn_idx],&dmn_sz); if(!strcmp(dmn_nm,lat_dmn_nm)){ assert(dmn_sz == lat_nbr); assert(dmn_idx == 0); lat_psn_in=dmn_idx; } /* !lat */ if(!strcmp(dmn_nm,lon_dmn_nm)){ assert(dmn_sz == lon_nbr); assert(dmn_idx == 1); lon_psn_in=dmn_idx; } /* !lon */ } /* !dmn_idx */ } /* !tpl */ /* Obtain fields that may be present in input file */ if(area_id != NC_MIN_INT){ var_id=area_id; rcd=nco_inq_vardimid(in_id,var_id,dmn_ids); dmn_srt[lat_psn_in]=0L; dmn_cnt[lat_psn_in]=lat_nbr; dmn_srt[lon_psn_in]=0L; dmn_cnt[lon_psn_in]=lon_nbr; rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); } /* !area */ if(msk_id != NC_MIN_INT){ var_id=msk_id; rcd=nco_inq_vardimid(in_id,var_id,dmn_ids); dmn_srt[lat_psn_in]=0L; dmn_cnt[lat_psn_in]=lat_nbr; dmn_srt[lon_psn_in]=0L; dmn_cnt[lon_psn_in]=lon_nbr; if(msk_rnk_nbr != grd_rnk_nbr){ /* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */ for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=1L; } /* !dmn_idx */ dmn_srt[dmn_idx]=dmn_srt[dmn_idx+1]=0L; dmn_cnt[dmn_idx+lat_psn_in]=lat_nbr; dmn_cnt[dmn_idx+lon_psn_in]=lon_nbr; } /* !msk_rnk_nbr */ rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ); } /* !msk */ /* Rectangular boundaries are often on "abbreviated" bounds grid (two bounds per center) Read-in *_crn arrays for 1D and curvilinear grids, and *_bnd arrays for rectilinear grids */ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=bnd_nbr; if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lon_nbr; dmn_cnt[1]=bnd_nbr; if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); } /* !flg_grd_2D */ /* Obtain units, if any, of input area */ if(area_id != NC_MIN_INT){ rcd=nco_inq_att_flg(in_id,area_id,unt_sng,&att_typ,&att_sz); if(rcd == NC_NOERR && att_typ == NC_CHAR){ att_val=(char *)nco_malloc((att_sz+1L)*nco_typ_lng(att_typ)); rcd+=nco_get_att(in_id,area_id,unt_sng,att_val,att_typ); /* NUL-terminate attribute before using strstr() */ att_val[att_sz]='\0'; if(!strcasestr(att_val,"radian")) flg_area_sr=False; if(att_val) area_unt=(char *)strdup(att_val); if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ } /* !area_id */ /* Additional information that may be required for any input grid */ if(area_id != NC_MIN_INT) has_mss_val_area=nco_mss_val_get_dbl(in_id,area_id,&mss_val_area_dbl); if(msk_id != NC_MIN_INT) has_mss_val_msk=nco_mss_val_get_dbl(in_id,msk_id,&mss_val_msk_dbl); /* 20160115: AMSR coordinates are packed as NC_SHORT with scale_value=0.01f. What to do? Is it worth unpacking everything? */ int flg_pck; /* [flg] Variable is packed on disk */ rcd=nco_inq_var_packing(in_id,lat_ctr_id,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports lat_ctr variable \"%s\" is packed so results unpredictable. HINT: If grid-generation causes problems, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,lat_nm_in); rcd=nco_inq_var_packing(in_id,lon_ctr_id,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports lon_ctr variable \"%s\" is packed so results unpredictable. HINT: If grid-generation causes problems, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,lon_nm_in); /* Close input netCDF file */ nco_close(in_id); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in); /* Above this line, fl_in and in_id refer to input file to be regridded Below this line, fl_out and out_id refer to grid-file to be output */ dfl_lvl=rgr->dfl_lvl; fl_out=rgr->fl_grd; fl_out_fmt=rgr->fl_out_fmt; if(!fl_out){ (void)fprintf(stdout,"%s: ERROR %s filename for inferred SCRIP grid-file is uninitialized, supply it with \"ncks --rgr grid=filename.nc\" or \"ncremap -R '--rgr grid=filename.nc'\"\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT ncremap supplies an automatically generated default name for any output SCRIP grid-file. Users of the standalone regridder (ncks) must explicitly specify a name for the inferred SCRIP grid-file.\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* !fl_out */ /* Define output variable values */ int lon_psn; /* [idx] Ordinal position of longitude dimension in rectangular grid dimension-size array */ int lat_psn; /* [idx] Ordinal position of latitude dimension in rectangular grid dimension-size array */ if(grd_rnk_nbr == dmn_nbr_1D){ dmn_sz_int[0]=col_nbr; }else if(grd_rnk_nbr == dmn_nbr_2D){ /* !dmn_nbr_1D */ /* SCRIP introduced [lon,lat] convention because more natural for Fortran NB: This [lon,lat] convention applies ONLY to grid_dims variable Write all other SCRIP variables as [lat,lon] Nonsensical? Yes, but backwards compatibility is priceless */ lon_psn=0; lat_psn=1; dmn_sz_int[lon_psn]=lon_nbr; dmn_sz_int[lat_psn]=lat_nbr; } /* !dmn_nbr_2D */ if(flg_grd_crv){ /* For curvilinear grids first, if necessary, infer corner boundaries Then perform sanity check using same code on inferred and copied grids */ if(False && has_mss_val_bnd && grd_crn_nbr == 4 && !strcmp(lat_bnd_nm,"latt_bounds") && !strcmp(lon_bnd_nm,"lont_bounds") && lat_bnd_id != NC_MIN_INT && lon_bnd_id != NC_MIN_INT){ /* Only CESM CICE is known to fit these constraints Cell center locations are (misleadingly) reported in a regular, rectangular, regional grid Cell corners/boundaries are regular only in SH, curvilinear in NH, i.e., displaced or tripole grid Grid is from southernmost Antarctic Ocean latitude and longitude near 79S,320E to North Pole Nominal centers do not agree with true centers computed from corners CICE may run in decomposed/unstructured mode, each column writes separately to output buffer? This could explain missing coordinates in non-ocean gridcells However, land points are completely masked (grid centers and corners are missing) Oversight? Why not write coordinates for land-masked cells? Regridder needs corners so we fill-in missing boundaries with derived grid Gave up on inferring 20170521 once tri-pole grid complexity became apparent */ const long idx_dbg=rgr->idx_dbg; double lat_ctr_drv; /* [dgr] Latitude center, derived */ double lon_ctr_drv; /* [dgr] Longitude center, derived */ double lat_crn_drv; /* [dgr] Latitude corner, derived */ double lon_crn_drv; /* [dgr] Longitude corner, derived */ long idx_ctr_sth; /* [idx] Index of southern neighbor */ long idx_ctr_nrt; /* [idx] Index of northern neighbor */ long idx_crn_sth; /* [idx] Index of southern neighbor */ long idx_crn_nrt; /* [idx] Index of northern neighbor */ long lon_idx_crr; /* [idx] Current longitude index */ long lon_vld_frs; /* [idx] First valid longitude in latitude row */ long *lon_vld_prv=NULL; /* [idx] Previous valid longitude in latitude row */ long *lon_vld_nxt=NULL; /* [idx] Next valid longitude in latitude row */ lon_vld_prv=(long *)nco_malloc(lon_nbr*sizeof(long)); lon_vld_nxt=(long *)nco_malloc(lon_nbr*sizeof(long)); /* First valid gridcell sets west and south bounds of entire grid */ for(idx_ctr=0;idx_ctr<grd_sz_nbr;idx_ctr++){ if(lat_ctr[idx_ctr] != mss_val_ctr_dbl) break; } /* !grd_sz_nbr */ assert(idx_ctr != grd_sz_nbr); idx_crn=idx_ctr*grd_crn_nbr; lat_sth=lat_crn[idx_crn]; lat_ncr=lat_crn[idx_crn+3]-lat_crn[idx_crn]; /* ul-ll */ lon_wst=lon_crn[idx_crn]; lon_ncr=lon_crn[idx_crn+1]-lon_crn[idx_crn]; /* lr-ll */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s will assume grid is regional CICE in curvilinear format with masked land. Will diagnose missing cell boundaries and centers from present boundaries and centers in grid of size lat_nbr=%ld, lon_nbr=%ld.\n",nco_prg_nm_get(),fnc_nm,lat_nbr,lon_nbr); for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ idx_ctr=lat_idx*lon_nbr; /* Find first valid longitude at this latitude */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++) if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break; lon_vld_frs=lon_idx; /* 20170519: Verified all tri-pole grid latitudes have at least one valid point */ if(lon_vld_frs == -1L) abort(); for(lon_idx_crr=0;lon_idx_crr<lon_nbr;lon_idx++){ /* Find previous and next valid longitude for all longitudes at this latitude Cells can be their own previous/next valid longitude */ lon_vld_prv[lon_idx_crr]=-1L; lon_vld_nxt[lon_idx_crr]=-1L; /* Start from current longitude and move left (west)... */ for(lon_idx=lon_idx_crr;lon_idx>=0;lon_idx--) if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break; if(lon_idx >= 0) lon_vld_prv[lon_idx_crr]=lon_idx; /* Start from current longitude and move right (east)... */ for(lon_idx=lon_idx_crr;lon_idx<lon_nbr;lon_idx++) if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break; if(lon_idx < lon_nbr) lon_vld_nxt[lon_idx_crr]=lon_idx; /* Wrap west if previous valid cell not found */ lon_vld_prv[lon_idx_crr]=lon_vld_prv[lon_nbr-1L]; /* Wrap east if next valid cell not found */ lon_vld_nxt[lon_idx_crr]=lon_vld_nxt[0]; } /* !lon_idx_crr */ /* Derive centers and corners for each missing point */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx_ctr=lat_idx*lon_nbr+lon_idx; idx_crn=idx_ctr*grd_crn_nbr; if(lat_ctr[idx_ctr] != mss_val_ctr_dbl){ lat_sth=lat_crn[idx_crn]; lat_ncr=lat_crn[idx_crn+3]-lat_crn[idx_crn]; /* ul-ll */ lat_ctr_drv=lat_sth+0.5*lat_ncr; lat_crn_drv=lat_sth; lon_wst=lon_crn[idx_crn]; lon_ncr=lon_crn[idx_crn+1]-lon_crn[idx_crn]; /* lr-ll */ lon_ctr_drv=lon_wst+lon_ncr*(lon_idx+0.5); if(nco_dbg_lvl_get() >= nco_dbg_std && idx_ctr == idx_dbg) (void)fprintf(stdout,"%s: DEBUG %s idx=%ld lat_idx=%ld, lon_idx=%ld, lat_sth=%g, lat_ncr=%g, lon_wst=%g, lon_ncr=%g\n",nco_prg_nm_get(),fnc_nm,idx_ctr,lat_idx,lon_idx,lat_sth,lat_ncr,lon_wst,lon_ncr); } /* !idx_ctr */ if(lat_ctr[idx_ctr] == mss_val_ctr_dbl){ if(lat_idx != 0L){ /* Not bottom row */ idx_ctr_sth=idx_ctr-lon_nbr; if(lat_ctr[idx_ctr_sth] != mss_val_ctr_dbl){ /* Copy southern corners from northern corners of southern neighbor */ idx_crn_sth=idx_ctr_sth*grd_crn_nbr; lat_crn[idx_crn+0L]=lat_crn[idx_crn_sth+3L]; lat_crn[idx_crn+1L]=lat_crn[idx_crn_sth+2L]; lon_crn[idx_crn+0L]=lon_crn[idx_crn_sth+3L]; lon_crn[idx_crn+1L]=lon_crn[idx_crn_sth+2L]; } /* !mss_val */ } /* !lat_idx */ if(lat_idx != lat_nbr-1L){ /* Not top row */ idx_ctr_nrt=idx_ctr+lon_nbr; if(lat_ctr[idx_ctr_nrt] != mss_val_ctr_dbl){ /* Copy northern corners from southern corners of northern neighbor */ idx_crn_nrt=idx_ctr_nrt*grd_crn_nbr; lat_crn[idx_crn+2L]=lat_crn[idx_crn_nrt+1L]; lat_crn[idx_crn+3L]=lat_crn[idx_crn_nrt+0L]; lon_crn[idx_crn+2L]=lon_crn[idx_crn_nrt+1L]; lon_crn[idx_crn+3L]=lon_crn[idx_crn_nrt+0L]; } /* !mss_val */ } /* !lat_idx */ /* Got to here before giving up Idea was to interpolate missing cell corners between previous and next valid cell */ /* Algorithm assumes lon_wst never changes (too simple for displaced/tri_pole) */ lon_ctr_drv=lon_wst+lon_ncr*(lon_idx+0.5); lon_crn_drv=lon_wst+lon_ncr*lon_idx; if(lon_ctr_drv >= 360.0) lon_ctr_drv-=360.0; lat_ctr[idx_ctr]=lat_ctr_drv; lon_ctr[idx_ctr]=lon_ctr_drv; lat_crn[idx_crn+0L]=lat_crn[idx_crn+1L]=lat_crn_drv; lat_crn[idx_crn+2L]=lat_crn[idx_crn+3L]=lat_crn_drv+lat_ncr; lon_crn[idx_crn+0L]=lon_crn[idx_crn+3L]=lon_crn_drv; lon_crn[idx_crn+1L]=lon_crn[idx_crn+2L]=lon_crn_drv+lon_ncr; /* Branch-cut rule */ if(lon_crn_drv+lon_ncr >= 360.0){ lon_crn[idx_crn+0L]=lon_crn[idx_crn+3L]=lon_crn_drv-360.0; lon_crn[idx_crn+1L]=lon_crn[idx_crn+2L]=lon_crn_drv+lon_ncr-360.0; } /* !brnch */ } /* !mss_val */ } /* !lon_idx */ } /* !lat_idx */ if(lon_vld_nxt) lon_vld_nxt=(long *)nco_free(lon_vld_nxt); if(lon_vld_prv) lon_vld_prv=(long *)nco_free(lon_vld_prv); } /* !False || !CICE */ if(lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT){ /* Interfaces (ntf) and boundaries (bnd) for curvilinear grids are ill-defined since sides need not follow latitudes nor meridians Simplest representation that contains equivalent information to interfaces/boundaries is grid corners array Diagnose grid corners from midpoints Most curvilinear data (e.g., WRF) is dimensioned lat x lon unlike SCRIP which uses lon x lat Hence we keep lat_ctr, lon_ctr, lat_crn, lon_crn with same order (likely lat x lon) as data file from which we infer grid Always use input order to write skeleton file Change that order, if necessary, to write SCRIP grid file In the interior of a curvilinear grid, nine points contribute to the four corners of a quadrilateral surrounding each center point These are the three points above the point, the three points at the same latitude, and the three points beneath the point In other words, a nine-point stencil is required to define the four corners inferred around each gridcell center It is cleanest to use this stencil only once for all cells in the "real"-grid, including those on the edges, not the interior For this to work cleanly we define an enlarged "fake"-grid where we pre-copy the values that lead to the desired extrapolation on "real"-grid edges Inspired by array-based solutions to integration of PDEs on meshes in Juri Toomre's class NB: implementation is not robust to missing value points in interior of grid. Hopefully grids have no missing values in coordinate variables, although they may have missing values in non-grid fields (e.g., mask, temperature) */ double *lat_ctr_fk; /* [dgr] Latitude grid with extrapolated boundaries necessary for 9-point template to find four grid corners for each real center */ double *lon_ctr_fk; /* [dgr] Longitude grid with extrapolated boundaries necessary for 9-point template to find four grid corners for each real center */ lat_ctr_fk=(double *)nco_malloc((lat_nbr+2)*(lon_nbr+2)*sizeof(double)); lon_ctr_fk=(double *)nco_malloc((lat_nbr+2)*(lon_nbr+2)*sizeof(double)); long int idx_rl; /* [idx] Index into real unrolled array */ long int idx_fk; /* [idx] Index into fake unrolled array */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ /* lat idx on real grid */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ /* lon idx on real grid */ idx_rl=lat_idx*lon_nbr+lon_idx; idx_fk=(lat_idx+1)*(lon_nbr+2)+lon_idx+1; /* Copy real grid to interior of fake grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]; lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]; } /* !lon */ } /* !lat */ /* Formulae to extrapolate sides and corners of fake grid are written as a starting lat/lon plus or minus adjustment Adjustment is positive-definite if grid monotonically increases in latitude and longitude from LL to UR 20160111: Use macros/functions to determine longitude adjustments that are always less than 180 This ensures all longitudes contributing to extrapolated longitude are from same branch cut */ /* Bottom row */ lat_idx=0; /* lat idx of extrapolated point on fake grid */ for(lon_idx=1;lon_idx<lon_nbr+1;lon_idx++){ /* lon idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on bottom row of fake grid */ idx_rl=lat_idx*lon_nbr+lon_idx-1; /* 1D-offset of neighboring point on bottom row of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]-(lat_ctr[idx_rl+lon_nbr]-lat_ctr[idx_rl]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]-nco_lon_dff_brnch_dgr(lon_ctr[idx_rl+lon_nbr],lon_ctr[idx_rl]); } /* !lon */ /* Top row */ lat_idx=lat_nbr+1; /* lat idx of extrapolated point on fake grid */ for(lon_idx=1;lon_idx<lon_nbr+1;lon_idx++){ /* lon idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on top row of fake grid */ idx_rl=(lat_nbr-1)*lon_nbr+lon_idx-1; /* 1D-offset of neighboring point on top row of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]+(lat_ctr[idx_rl]-lat_ctr[idx_rl-lon_nbr]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]+nco_lon_dff_brnch_dgr(lon_ctr[idx_rl],lon_ctr[idx_rl-lon_nbr]); } /* !lon */ /* Left side */ lon_idx=0; /* lon idx of extrapolated point on fake grid */ for(lat_idx=1;lat_idx<lat_nbr+1;lat_idx++){ /* lat idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on left side of fake grid */ idx_rl=(lat_idx-1)*lon_nbr+lon_idx; /* 1D-offset of neighboring point on left side of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]-(lat_ctr[idx_rl+1]-lat_ctr[idx_rl]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]-nco_lon_dff_brnch_dgr(lon_ctr[idx_rl+1],lon_ctr[idx_rl]); } /* !lat */ /* Right side */ lon_idx=lon_nbr+1; /* lon idx of extrapolated point on fake grid */ for(lat_idx=1;lat_idx<lat_nbr+1;lat_idx++){ /* lat idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on right side of fake grid */ idx_rl=(lat_idx-1)*lon_nbr+lon_idx-2; /* 1D-offset of neighboring point on right side of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]+(lat_ctr[idx_rl]-lat_ctr[idx_rl-1]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]+nco_lon_dff_brnch_dgr(lon_ctr[idx_rl],lon_ctr[idx_rl-1]); } /* !lat */ /* LL */ lat_ctr_fk[0]=lat_ctr_fk[lon_nbr+2]-(lat_ctr_fk[2*(lon_nbr+2)]-lat_ctr_fk[lon_nbr+2]); lon_ctr_fk[0]=lon_ctr_fk[1]-nco_lon_dff_brnch_dgr(lon_ctr_fk[2],lon_ctr_fk[1]); /* LR */ lat_ctr_fk[lon_nbr+1]=lat_ctr_fk[2*(lon_nbr+2)-1]-(lat_ctr_fk[3*(lon_nbr+2)-1]-lat_ctr_fk[2*(lon_nbr+2)-1]); lon_ctr_fk[lon_nbr+1]=lon_ctr_fk[lon_nbr]+nco_lon_dff_brnch_dgr(lon_ctr_fk[lon_nbr],lon_ctr_fk[lon_nbr-1]); /* UR */ lat_ctr_fk[(lat_nbr+2)*(lon_nbr+2)-1]=lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-1]+(lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-1]-lat_ctr_fk[lat_nbr*(lon_nbr+2)-1]); lon_ctr_fk[(lat_nbr+2)*(lon_nbr+2)-1]=lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-2]+nco_lon_dff_brnch_dgr(lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-2],lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-3]); /* UL */ lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)]=lat_ctr_fk[lat_nbr*(lon_nbr+2)]+(lat_ctr_fk[lat_nbr*(lon_nbr+2)]-lat_ctr_fk[(lat_nbr-1)*(lon_nbr+2)]); lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)]=lon_ctr_fk[lat_nbr*(lon_nbr+2)+1]-nco_lon_dff_brnch_dgr(lon_ctr_fk[lat_nbr*(lon_nbr+2)+2],lon_ctr_fk[lat_nbr*(lon_nbr+2)+1]); if(nco_dbg_lvl_get() >= nco_dbg_std){ long idx_dbg; idx_dbg=rgr->idx_dbg; (void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Fake Center [lat,lon]=[%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,lat_ctr_fk[idx_dbg],lon_ctr_fk[idx_dbg]); } /* !dbg */ long int lat_idx_fk; /* [idx] Index into fake (extrapolated) latitude array */ long int lon_idx_fk; /* [idx] Index into fake (extrapolated) longitude array */ long int idx_fk_crn_ll_ctr_ll; long int idx_fk_crn_ll_ctr_lr; long int idx_fk_crn_ll_ctr_ur; long int idx_fk_crn_ll_ctr_ul; long int idx_fk_crn_lr_ctr_ll; long int idx_fk_crn_lr_ctr_lr; long int idx_fk_crn_lr_ctr_ur; long int idx_fk_crn_lr_ctr_ul; long int idx_fk_crn_ur_ctr_ll; long int idx_fk_crn_ur_ctr_lr; long int idx_fk_crn_ur_ctr_ur; long int idx_fk_crn_ur_ctr_ul; long int idx_fk_crn_ul_ctr_ll; long int idx_fk_crn_ul_ctr_lr; long int idx_fk_crn_ul_ctr_ur; long int idx_fk_crn_ul_ctr_ul; double *crn_lat; double *crn_lon; crn_lat=(double *)nco_malloc(grd_crn_nbr*sizeof(double)); crn_lon=(double *)nco_malloc(grd_crn_nbr*sizeof(double)); size_t wrn_nbr_max=20; size_t wrn_nbr=0; for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ /* 9-point template valid at all interior (non-edge) points in real grid, and at all points (including edges) in fake grid Read variables idx_crn_ll_ctr_ul as "index of upper left gridcell center that contributes to lower-left gridcell corner" Algorithms execute in counter-clockwise (CCW) direction: lower-left, lower-right, upper-right, upper-left lat_idx and lon_idx are true indices and are used to write into grd_crn_lat/lon arrays lat_idx_fk and lon_idx_fk are indices into fake arrays with extrapolated boundaries and are used to read data from fake arrays */ lon_idx_fk=lon_idx+1; lat_idx_fk=lat_idx+1; idx_rl=lat_idx*lon_nbr+lon_idx; idx_fk=lat_idx_fk*(lon_nbr+2)+lon_idx_fk; /* Determine index into fake array (valid everywhere it is applied) Comments after each equation are formula for real index (valid only at interior gridcells) */ idx_fk_crn_ll_ctr_ll=idx_fk-(lon_nbr+2)-1; // (lat_idx-1)*lon_nbr+lon_idx-1 idx_fk_crn_ll_ctr_lr=idx_fk-(lon_nbr+2); // (lat_idx-1)*lon_nbr+lon_idx idx_fk_crn_ll_ctr_ur=idx_fk; // lat_idx*lon_nbr+lon_idx idx_fk_crn_ll_ctr_ul=idx_fk-1; // lat_idx*lon_nbr+lon_idx-1; idx_fk_crn_lr_ctr_ll=idx_fk-(lon_nbr+2); // (lat_idx-1)*lon_nbr+lon_idx idx_fk_crn_lr_ctr_lr=idx_fk-(lon_nbr+2)+1; // (lat_idx-1)*lon_nbr+lon_idx+1 idx_fk_crn_lr_ctr_ur=idx_fk+1; // lat_idx*lon_nbr+lon_idx+1 idx_fk_crn_lr_ctr_ul=idx_fk; // lat_idx*lon_nbr+lon_idx; idx_fk_crn_ur_ctr_ll=idx_fk; // lat_idx*lon_nbr+lon_idx idx_fk_crn_ur_ctr_lr=idx_fk+1; // lat_idx*lon_nbr+lon_idx+1 idx_fk_crn_ur_ctr_ur=idx_fk+(lon_nbr+2)+1; // (lat_idx+1)*lon_nbr+lon_idx+1 idx_fk_crn_ur_ctr_ul=idx_fk+(lon_nbr+2); // (lat_idx+1)*lon_nbr+lon_idx; idx_fk_crn_ul_ctr_ll=idx_fk-1; // lat_idx*lon_nbr+lon_idx-1 idx_fk_crn_ul_ctr_lr=idx_fk; // lat_idx*lon_nbr+lon_idx idx_fk_crn_ul_ctr_ur=idx_fk+(lon_nbr+2); // (lat_idx+1)*lon_nbr+lon_idx idx_fk_crn_ul_ctr_ul=idx_fk+(lon_nbr+2)-1; // (lat_idx+1)*lon_nbr+lon_idx-1; /* 20160111: Algorithm requires that all longitudes in template be on same "branch cut" If, say, LL longitude is 179.0 and LR longitude is -179.0 then their sum and average are zero, not 180.0 or -180.0 as desired Routines labeled "*_brnch" in the following ensure that branch-cut rules are followed */ idx_crn_ll=grd_crn_nbr*idx_rl+0; lat_crn[idx_crn_ll]=0.25*(lat_ctr_fk[idx_fk_crn_ll_ctr_ll]+lat_ctr_fk[idx_fk_crn_ll_ctr_lr]+lat_ctr_fk[idx_fk_crn_ll_ctr_ur]+lat_ctr_fk[idx_fk_crn_ll_ctr_ul]); lon_crn[idx_crn_ll]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ll_ctr_ll],lon_ctr_fk[idx_fk_crn_ll_ctr_lr],lon_ctr_fk[idx_fk_crn_ll_ctr_ur],lon_ctr_fk[idx_fk_crn_ll_ctr_ul]); idx_crn_lr=grd_crn_nbr*idx_rl+1; lat_crn[idx_crn_lr]=0.25*(lat_ctr_fk[idx_fk_crn_lr_ctr_ll]+lat_ctr_fk[idx_fk_crn_lr_ctr_lr]+lat_ctr_fk[idx_fk_crn_lr_ctr_ur]+lat_ctr_fk[idx_fk_crn_lr_ctr_ul]); lon_crn[idx_crn_lr]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_lr_ctr_ll],lon_ctr_fk[idx_fk_crn_lr_ctr_lr],lon_ctr_fk[idx_fk_crn_lr_ctr_ur],lon_ctr_fk[idx_fk_crn_lr_ctr_ul]); idx_crn_ur=grd_crn_nbr*idx_rl+2; lat_crn[idx_crn_ur]=0.25*(lat_ctr_fk[idx_fk_crn_ur_ctr_ll]+lat_ctr_fk[idx_fk_crn_ur_ctr_lr]+lat_ctr_fk[idx_fk_crn_ur_ctr_ur]+lat_ctr_fk[idx_fk_crn_ur_ctr_ul]); lon_crn[idx_crn_ur]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ur_ctr_ll],lon_ctr_fk[idx_fk_crn_ur_ctr_lr],lon_ctr_fk[idx_fk_crn_ur_ctr_ur],lon_ctr_fk[idx_fk_crn_ur_ctr_ul]); idx_crn_ul=grd_crn_nbr*idx_rl+3; lat_crn[idx_crn_ul]=0.25*(lat_ctr_fk[idx_fk_crn_ul_ctr_ll]+lat_ctr_fk[idx_fk_crn_ul_ctr_lr]+lat_ctr_fk[idx_fk_crn_ul_ctr_ur]+lat_ctr_fk[idx_fk_crn_ul_ctr_ul]); lon_crn[idx_crn_ul]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ul_ctr_ll],lon_ctr_fk[idx_fk_crn_ul_ctr_lr],lon_ctr_fk[idx_fk_crn_ul_ctr_ur],lon_ctr_fk[idx_fk_crn_ul_ctr_ul]); crn_lat[0]=lat_crn[idx_crn_ll]; crn_lat[1]=lat_crn[idx_crn_lr]; crn_lat[2]=lat_crn[idx_crn_ur]; crn_lat[3]=lat_crn[idx_crn_ul]; crn_lon[0]=lon_crn[idx_crn_ll]; crn_lon[1]=lon_crn[idx_crn_lr]; crn_lon[2]=lon_crn[idx_crn_ur]; crn_lon[3]=lon_crn[idx_crn_ul]; /* 20210411: From 2016 until today, nco_ccw_chk() overwrote fourth (UL) with first (LL) corner */ flg_ccw=nco_ccw_chk(crn_lat,crn_lon,grd_crn_nbr,idx_ccw,rcr_lvl); if(!flg_ccw && wrn_nbr < wrn_nbr_max){ (void)fprintf(stdout,"%s: %s WARNING reports non-CCW gridcell at idx=%li, (lat,lon)_idx=(%li,%li), (lat,lon) = (%g, %g)\n",nco_prg_nm_get(),fnc_nm,idx_rl,lat_idx,lon_idx,lat_ctr[lat_idx],lon_ctr[lon_idx]); wrn_nbr++; if(wrn_nbr == wrn_nbr_max) (void)fprintf(stdout,"%s: %s INFO Number of non-CCW errors reached maximum = %li, not printing anymore\n",nco_prg_nm_get(),fnc_nm,wrn_nbr_max); } /* endif */ lat_crn[idx_crn_ll]=crn_lat[0]; lat_crn[idx_crn_lr]=crn_lat[1]; lat_crn[idx_crn_ur]=crn_lat[2]; lat_crn[idx_crn_ul]=crn_lat[3]; lon_crn[idx_crn_ll]=crn_lon[0]; lon_crn[idx_crn_lr]=crn_lon[1]; lon_crn[idx_crn_ur]=crn_lon[2]; lon_crn[idx_crn_ul]=crn_lon[3]; } /* !lon */ } /* !lat */ if(lat_ctr_fk) lat_ctr_fk=(double *)nco_free(lat_ctr_fk); if(lon_ctr_fk) lon_ctr_fk=(double *)nco_free(lon_ctr_fk); if(crn_lon) crn_lon=(double *)nco_free(crn_lon); if(crn_lat) crn_lat=(double *)nco_free(crn_lat); } /* !(lat_bnd_id && lon_bnd_id) */ } /* !flg_grd_crv */ if(flg_1D_psd_rct_bnd){ double lon_brnch_min; double lon_brnch_max; double lon_dff; assert(grd_crn_nbr == 4); /* Make boundaries that were provided as pseudo-rectangular branch-cut-compliant */ for(col_idx=0;col_idx<col_nbr;col_idx++){ lon_brnch_min=(lon_bnd[2*col_idx] <= lon_bnd[2*col_idx+1]) ? lon_bnd[2*col_idx] : lon_bnd[2*col_idx+1]; lon_brnch_max=(lon_bnd[2*col_idx] >= lon_bnd[2*col_idx+1]) ? lon_bnd[2*col_idx] : lon_bnd[2*col_idx+1]; lon_dff=lon_brnch_max-lon_brnch_min; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports 1D pseudo-rectangular bounds branch-cut straddle at col_idx=%ld lon_brnch_max, lon_brnch_min, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,col_idx,lon_brnch_max,lon_brnch_min,lon_dff); lon_brnch_max-=360.0; }else if(lon_dff <= -180.0){ lon_brnch_max+=360.0; } /* !lon_dff */ /* Extra condition to convert CW bounds to CCW bounds (necessary for OCO2) */ if(lon_brnch_min <= lon_brnch_max){ lon_bnd[2*col_idx]=lon_brnch_min; lon_bnd[2*col_idx+1]=lon_brnch_max; }else{ lon_bnd[2*col_idx]=lon_brnch_max; lon_bnd[2*col_idx+1]=lon_brnch_min; } /* end else */ } /* !col_idx */ /* Convert boundaries that were provided as pseudo-rectangular to corners */ for(col_idx=0;col_idx<col_nbr;col_idx++){ idx=grd_crn_nbr*col_idx; /* fxm: OCO2 provides boundaries in CW not CCW orientation */ lon_crn[idx]=lon_bnd[2*col_idx]; /* LL */ lon_crn[idx+1]=lon_bnd[2*col_idx+1]; /* LR */ lon_crn[idx+2]=lon_bnd[2*col_idx+1]; /* UR */ lon_crn[idx+3]=lon_bnd[2*col_idx]; /* UL */ lat_crn[idx]=lat_bnd[2*col_idx]; /* LL */ lat_crn[idx+1]=lat_bnd[2*col_idx]; /* LR */ lat_crn[idx+2]=lat_bnd[2*col_idx+1]; /* UR */ lat_crn[idx+3]=lat_bnd[2*col_idx+1]; /* UL */ /* fxm: OCO2 provides boundaries in CW not CCW orientation */ } /* !col_idx */ } /* flg_1D_psd_rct_bnd */ if(flg_grd_crv || flg_1D_psd_rct_bnd){ /* As of 20160308, use same sanity check for 1D pseudo-rectangular grids as for curvilinear grids Pseudo-rectangular grids rely on user-produced boundaries that may be psychotic (CW, non-branch-cut) Starting 20151205, use same sanity check for both inferred and copied curvilinear grids 20151129: Curvilinear extrapolation technique above yields corners outside [-90.0,90.0], [-180.0,360.0] Also, it may assume input is ascending swath and fail for descending swaths Complications not fully addressed: Swaths may (verify this) turn from ascending to descending, or visa-versa, when satellite crosses latitude extrema Swaths may cross the date-line (and back!) */ /* Determine numeric bounds of input coordinate system */ double lon_min_min; double lon_max_max; nco_bool NCO_LON_0_TO_360=True; if(has_mss_val_ctr){ for(idx=0;idx<grd_sz_nbr;idx++) if(lon_ctr[idx] != mss_val_ctr_dbl && lon_ctr[idx] < 0.0) break; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(lon_ctr[idx] < 0.0) break; } /* !has_mss_val_ctr */ if(idx != grd_sz_nbr) NCO_LON_0_TO_360=False; if(NCO_LON_0_TO_360){ lon_min_min=0.0; lon_max_max=360.0; }else{ lon_min_min=-180.0; lon_max_max=180.0; } /* !NCO_LON_0_TO_360 */ /* Correct for extrapolation outside boundaries */ for(idx=0;idx<grd_sz_nbr*grd_crn_nbr;idx++){ idx_ctr=idx/grd_crn_nbr; if(has_mss_val_ctr) if(lat_ctr[idx_ctr] == mss_val_ctr_dbl) continue; if(lat_crn[idx] < -90.0 || lat_crn[idx] > 90.0 || lon_crn[idx] < lon_min_min || lon_crn[idx] > lon_max_max){ idx_crn_ll=grd_crn_nbr*idx_ctr+0; idx_crn_lr=grd_crn_nbr*idx_ctr+1; idx_crn_ur=grd_crn_nbr*idx_ctr+2; idx_crn_ul=grd_crn_nbr*idx_ctr+3; if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s reports %s corner outside canonical bounds at idx = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,(lat_bnd_id == NC_MIN_INT) ? "inferred" : "copied",idx_ctr,lat_ctr[idx_ctr],lon_ctr[idx_ctr],lat_crn[idx_crn_ll],lon_crn[idx_crn_ll],lat_crn[idx_crn_lr],lon_crn[idx_crn_lr],lat_crn[idx_crn_ur],lon_crn[idx_crn_ur],lat_crn[idx_crn_ul],lon_crn[idx_crn_ul]); /* Restrict grid to real latitudes and to the 360-degree range detected from input cell-centers */ if(lat_crn[idx] < -90.0) lat_crn[idx]=-90.0; if(lat_crn[idx] > 90.0) lat_crn[idx]=90.0; if(lon_crn[idx] < lon_min_min) lon_crn[idx]+=360.0; if(lon_crn[idx] > lon_max_max) lon_crn[idx]-=360.0; } /* !sanity */ } /* !idx */ /* Vertices (for valid points) are now within 360 degrees (either [0,360] or [-180,180]) implied by input coordinate system Curvilinear inferred grid are, by construction, branch-cut compliant fxm: Curvilinear and 1D pseudo-rectangular grids prescribed by (i.e., read-in from) input may not be branch-cut compliant */ if(nco_dbg_lvl_get() >= nco_dbg_std){ long idx_dbg; idx_dbg=rgr->idx_dbg; idx_crn_ll=grd_crn_nbr*idx_dbg+0; idx_crn_lr=grd_crn_nbr*idx_dbg+1; idx_crn_ur=grd_crn_nbr*idx_dbg+2; idx_crn_ul=grd_crn_nbr*idx_dbg+3; (void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,lat_ctr[idx_dbg],lon_ctr[idx_dbg],lat_crn[idx_crn_ll],lon_crn[idx_crn_ll],lat_crn[idx_crn_lr],lon_crn[idx_crn_lr],lat_crn[idx_crn_ur],lon_crn[idx_crn_ur],lat_crn[idx_crn_ul],lon_crn[idx_crn_ul]); } /* !dbg */ } /* !flg_grd_crv || flg_1D_psd_rct_bnd */ if(flg_grd_crv){ /* Copy centers into empty output array */ for(idx=0;idx<grd_sz_nbr;idx++){ grd_ctr_lat[idx]=lat_ctr[idx]; grd_ctr_lon[idx]=lon_ctr[idx]; } /* !idx */ /* Copy inferred or copied (from input) sanity-checked corners into empty output array */ for(idx=0;idx<grd_sz_nbr*grd_crn_nbr;idx++){ grd_crn_lat[idx]=lat_crn[idx]; grd_crn_lon[idx]=lon_crn[idx]; } /* !idx */ } /* !flg_grd_crv */ /* 20150512 Many 2D datasets have bad bounds Primary example is Gaussian grids archived by CESM models that use midpoint rule rather than iterate to compute interfaces from quadrature points Such files have correct gw arrays and incorrect cell bounds flg_dgn_bnd allows nco_grd_nfr() to override faulty boundaries in file with correct bounds */ const nco_bool flg_dgn_bnd=rgr->flg_dgn_bnd; /* [flg] Diagnose rather than copy inferred bounds */ const long lat_nbr_hlf=lat_nbr/2L; // [nbr] Half number of latitudes (e.g., lat_nbr_hlf=32 for lat_nbr=64 and 65) if(flg_grd_2D){ if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){ if(nco_dbg_lvl_get() >= nco_dbg_std && flg_dgn_bnd) (void)fprintf(stdout,"%s: INFO %s will diagnose cell boundaries from cell centers...\n",nco_prg_nm_get(),fnc_nm); /* Derive interfaces (ntf) and bounds (bnd) from midpoints approximation applied to center data NB: Simplistically derived interfaces (ntf) only valid on some rectangular grids (not on Gaussian grids) These inferred-from-midpoint interfaces/bounds are overwritten in next block once lat grid is known */ if(flg_s2n) lat_ntf[0L]=lat_ctr[0L]-0.5*(lat_ctr[1L]-lat_ctr[0L]); else lat_ntf[0L]=lat_ctr[0L]+0.5*(lat_ctr[0L]-lat_ctr[1L]); if(lat_ntf[0L] < -90.0) lat_ntf[0L]=-90.0; /* NB: lat_ntf[0] can be same as lat_ctr[0] for cap grid */ if(lat_ntf[0L] > 90.0) lat_ntf[0L]=90.0; for(lat_idx=0L;lat_idx<lat_nbr-1L;lat_idx++) lat_ntf[lat_idx+1L]=0.5*(lat_ctr[lat_idx]+lat_ctr[lat_idx+1L]); if(flg_s2n) lat_ntf[lat_nbr]=lat_ctr[lat_nbr-1L]+0.5*(lat_ctr[lat_nbr-1L]-lat_ctr[lat_nbr-2L]); else lat_ntf[lat_nbr]=lat_ctr[lat_nbr-1L]-0.5*(lat_ctr[lat_nbr-2L]-lat_ctr[lat_nbr-1L]); if(lat_ntf[lat_nbr] > 90.0) lat_ntf[lat_nbr]=90.0; /* NB: lat_ntf[lat_nbr] can be same as lat_ctr[lat_nbr-1] for cap grid */ if(lat_ntf[lat_nbr] < -90.0) lat_ntf[lat_nbr]=-90.0; /* NB: lat_ntf[lat_nbr] can be same as lat_ctr[lat_nbr-1] for cap grid */ if(flg_s2n) lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); /* fabs() ensures positive-definite span for N->S grids */ lon_ntf[0L]=lon_ctr[0L]-0.5*(lon_ctr[1L]-lon_ctr[0L]); for(lon_idx=0;lon_idx<lon_nbr-1L;lon_idx++) lon_ntf[lon_idx+1L]=0.5*(lon_ctr[lon_idx]+lon_ctr[lon_idx+1L]); lon_ntf[lon_nbr]=lon_ctr[lon_nbr-1L]+0.5*(lon_ctr[lon_nbr-1L]-lon_ctr[lon_nbr-2L]); lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L]; for(idx=0;idx<lon_nbr;idx++){ lon_bnd[2L*idx]=lon_ntf[idx]; lon_bnd[2L*idx+1L]=lon_ntf[idx+1L]; } /* !idx */ for(idx=0;idx<lat_nbr;idx++){ lat_bnd[2L*idx]=lat_ntf[idx]; lat_bnd[2L*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ }else{ /* !(lat_bnd_id && lon_bnd_id) */ /* Derive interfaces (ntf) from bounds (bnd) data on disk */ for(idx=0;idx<lon_nbr;idx++) lon_ntf[idx]=lon_bnd[2L*idx]; lon_ntf[lon_nbr]=lon_bnd[2L*lon_nbr-1L]; for(idx=0;idx<lat_nbr;idx++) lat_ntf[idx]=lat_bnd[2L*idx]; lat_ntf[lat_nbr]=lat_bnd[2L*lat_nbr-1L]; lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); /* fabs() ensures positive-definite span for N->S grids */ lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L]; } /* !(lat_bnd_id && lon_bnd_id) */ } /* !flg_grd_2D */ if(flg_grd_2D){ /* Diagnose type of two-dimensional input grid by testing second latitude center against formulae */ double lat_ctr_tst_eqa; double lat_ctr_tst_fv; if(flg_s2n) lat_ctr_tst_eqa=lat_ntf[0L]+lat_spn*1.5/lat_nbr; else lat_ctr_tst_eqa=lat_ntf[0L]-lat_spn*1.5/lat_nbr; if(flg_s2n) lat_ctr_tst_fv=lat_ntf[0L]+lat_spn/(lat_nbr-1L); else lat_ctr_tst_fv=lat_ntf[0L]-lat_spn/(lat_nbr-1L); double lat_ctr_tst_gss; /* In diagnosing grids, agreement with input to single-precision is "good enough for government work" Hence some comparisons cast from double to float before comparison 20150526: T42 grid from SCRIP and related maps are only accurate to ~eight digits 20150611: map_ne120np4_to_fv801x1600_bilin.150418.nc has yc_b[1600]=-89.775000006 not expected exact value lat_ctr[1]=-89.775000000000006 20170521: T62 grid from NCEP-NCAR Reanalysis 1 worse than single precision, has yc_[192]=-86.6531 not expected exact value lat_ctr[1]=-86.6531671712612, relative difference is 7.86021e-07 20191008: T62 grid from NCEP-NCAR Reanalysis 2 worse than single precision, has yc_[92]=-86.6531 not expected exact value lat_ctr[1]=-86.6531671712612, relative difference is 7.86021e-07 */ if(nco_dbg_lvl_get() >= nco_dbg_scl && !flg_s2n) (void)fprintf(stderr,"%s: INFO %s reports that grid inferral has detected a 2D grid that runs from north-to-south, not south-to-north. Support for creating/inferring 2D N-to-S grids was added in NCO 4.7.7 (September, 2018) and should work fine.\nHINT: If present command fails, report problem to developers and then re-try inferring grid after reversing input dataset's latitude coordinate (with, e.g., ncpdq -a time,-lat,lon in.nc out.nc)\n",nco_prg_nm_get(),fnc_nm); if((float)lat_ctr[1L] == (float)lat_ctr_tst_eqa) lat_typ=nco_grd_lat_eqa; if((float)lat_ctr[1L] == (float)lat_ctr_tst_fv) lat_typ=nco_grd_lat_fv; double *lat_sin=NULL_CEWI; // [frc] Sine of Gaussian latitudes double precision double *wgt_Gss=NULL; // [frc] Gaussian weights double precision if(lat_typ == nco_grd_lat_nil){ /* Check for Gaussian grid */ lat_sin=(double *)nco_malloc(lat_nbr*sizeof(double)); wgt_Gss=(double *)nco_malloc(lat_nbr*sizeof(double)); (void)nco_lat_wgt_gss(lat_nbr,flg_s2n,lat_sin,wgt_Gss); lat_ctr_tst_gss=rdn2dgr*asin(lat_sin[1L]); /* Gaussian weights on output grid will be double-precision accurate Grid itself is kept as user-specified so area diagnosed by ESMF_RegridWeightGen may be slightly inconsistent with weights */ const double eps_rlt_cnv_gss=1.0e-6; // Convergence criterion (1.0e-7 fails for NCEP NCAR Reanalysis 1!) if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG %s reports lat_ctr[1]=%g, lat_ctr_tst_gss=%g, fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss))=%g\n",nco_prg_nm_get(),fnc_nm,lat_ctr[1],lat_ctr_tst_gss,fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss))); if(fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss)) < eps_rlt_cnv_gss) lat_typ=nco_grd_lat_gss; } /* !Gaussian */ if(lat_typ == nco_grd_lat_nil){ /* If still of unknown type, this 2D grid may be weird This occurs, e.g., with POP3 destination grid Change gridtype from nil (which means not-yet-set) to unknown (which means none of the others matched) */ lat_typ=nco_grd_lat_unk; } /* !nil */ /* Currently grd_lat_typ and grd_2D_typ are equivalent, though that may be relaxed in future */ if(lat_typ == nco_grd_lat_unk) grd_typ=nco_grd_2D_unk; else if(lat_typ == nco_grd_lat_gss) grd_typ=nco_grd_2D_gss; else if(lat_typ == nco_grd_lat_fv) grd_typ=nco_grd_2D_fv; else if(lat_typ == nco_grd_lat_eqa) grd_typ=nco_grd_2D_eqa; else assert(False); /* Diagnose latitude interfaces from gridcell centers (if boundaries not provided) */ if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){ //if(flg_s2n) lat_nrt=lat_ntf[lat_nbr]; else lat_nrt=lat_ntf[0L]; lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); switch(lat_typ){ case nco_grd_lat_fv: lat_ncr=lat_spn/(lat_nbr-1L); if(flg_s2n) lat_ntf[1L]=lat_ntf[0L]+0.5*lat_ncr; else lat_ntf[1L]=lat_ntf[0L]-0.5*lat_ncr; for(lat_idx=2;lat_idx<lat_nbr;lat_idx++) if(flg_s2n) lat_ntf[lat_idx]=lat_ntf[1L]+(lat_idx-1L)*lat_ncr; else lat_ntf[lat_idx]=lat_ntf[1L]-(lat_idx-1L)*lat_ncr; break; case nco_grd_lat_eqa: lat_ncr=lat_spn/lat_nbr; for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) if(flg_s2n) lat_ntf[lat_idx]=lat_ntf[0L]+lat_idx*lat_ncr; else lat_ntf[lat_idx]=lat_ntf[0L]-lat_idx*lat_ncr; break; case nco_grd_lat_gss: for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]); /* First guess for lat_ntf is midway between Gaussian abscissae */ for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=0.5*(lat_ctr[lat_idx-1L]+lat_ctr[lat_idx]); /* Iterate guess until area between interfaces matches Gaussian weight */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++){ double fofx_at_x0; /* [frc] Function to iterate evaluated at current guess */ double dfdx_at_x0; /* [frc] Derivative of equation evaluated at current guess */ // 20190531: Wuyin Lin reports this convergence criterion fails on ECMWF F640 grid // Probably because latitude iterations assume s2n grid and ECMWF is n2s // Possibly also because latitude coordinates are stored in single precision // Implement precision-dependent convergence criterion, e.g., 1.0e-15 and 1.0e-7 for double- and single-precision, respectively? const double eps_rlt_cnv=1.0e-15; // Convergence criterion (1.0e-16 pushes double precision to the brink) itr_cnt=0; lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; while(fabs(fofx_at_x0) > eps_rlt_cnv){ /* Newton-Raphson iteration: Let x=lat_ntf[lat_idx], y0=lat_ntf[lat_idx-1], gw = Gaussian weight (exact solution) f(x)=sin(dgr2rdn*x)-sin(dgr2rdn*y0)-gw=0 # s2n grid f(x)=sin(dgr2rdn*y0)-sin(dgr2rdn*x)-gw=0 # n2s grid dfdx(x)= dgr2rdn*cos(dgr2rdn*x) # s2n grid dfdx(x)=-dgr2rdn*cos(dgr2rdn*x) # n2s grid x_better=x0-f(x0)/f'(x0) */ dfdx_at_x0=dgr2rdn*cos(dgr2rdn*lat_ntf[lat_idx]); if(!flg_s2n) dfdx_at_x0=-dfdx_at_x0; lat_ntf[lat_idx]+=fofx_at_x0/dfdx_at_x0; /* NB: not sure why this is minus not plus but it works :) */ lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; if(++itr_cnt > itr_nbr_max){ (void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %ld\n",nco_prg_nm_get(),fnc_nm,fabs(fofx_at_x0),itr_nbr_max,lat_idx); nco_exit(EXIT_FAILURE); } /* endif */ } /* !while */ } /* !lat_idx */ /* Use Gaussian grid symmetry to obtain same interfaces in both hemispheres (avoids cumulative rounding errors) */ if(lat_nbr%2){ /* lat_nbr is odd */ for(lat_idx=1L;lat_idx<=lat_nbr_hlf+1L;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx+1L]; }else{ /* lat_nbr is even */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx]; } /* !flg_lat_evn */ if(lat_sin) lat_sin=(double *)nco_free(lat_sin); break; case nco_grd_lat_unk: /* No generic formula exists so use interfaces already read or diagnosed as midpoints between centers */ break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ if(lat_typ == nco_grd_lat_gss){ /* 20170510: First approximation above to exterior interfaces for Gaussian grid are ~ +/-89 degrees Loops below recompute interior interfaces only Southern- and northern-most interfaces must be explicitly assigned Inferral test for Gaussian grid _assumes_ global grid Hence WLOG can assign [-90.0, 90.0] to Gaussian grid exterior boundaries */ if(flg_s2n) lat_ntf[0L]=-90.0; else lat_ntf[0L]=90.0; if(flg_s2n) lat_ntf[lat_nbr]=90.0; else lat_ntf[lat_nbr]=-90.0; } /* !nco_grd_lat_gss */ /* Now that final latitude interfaces are known for all grid-types, assign to boundaries, overwriting provisional values stored there earlier */ for(idx=0;idx<lat_nbr;idx++){ lat_bnd[2L*idx]=lat_ntf[idx]; lat_bnd[2L*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ } /* !(lat_bnd_id && lon_bnd_id) */ /* Use centers and boundaries to diagnose latitude weights */ switch(lat_typ){ case nco_grd_lat_eqa: case nco_grd_lat_fv: for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx])); break; case nco_grd_lat_gss: for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=wgt_Gss[lat_idx]; break; case nco_grd_lat_unk: for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx])); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING %s reports unknown input latitude grid-type. Guessing that weights for grid of rectangles is OK.\n",nco_prg_nm_get(),fnc_nm); break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ /* Diagnose type of longitude grid by testing second longitude center against formulae */ lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L]; lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; if(lon_typ == nco_grd_lon_nil){ if( (float)lon_ctr[0L] == 0.0f && (float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_Grn_ctr; else if((float)lon_ctr[0L] == -180.0f && (float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_180_ctr; else if((float)lon_ntf[0L] == 0.0f && (float)lon_ntf[1L] == (float)(lon_ntf[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_Grn_wst; else if((float)lon_ntf[0L] == -180.0f && (float)lon_ntf[1L] == (float)(lon_ntf[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_180_wst; else if((float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_bb; else lon_typ=nco_grd_lon_unk; } /* !lon_typ */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input 2D grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_2D_sng(grd_typ)); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input latitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lat_sng(lat_typ)); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input longitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lon_sng(lon_typ)); } /* !flg_grd_2D */ if(flg_grd_2D){ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0;idx<lat_nbr;idx++){ (void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", "); } /* end loop over lat */ for(idx=0;idx<lon_nbr;idx++){ (void)fprintf(stdout,"lon[%li] = %g, vertices = ",idx,lon_ctr[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lon_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", "); } /* end loop over lon */ } /* endif dbg */ /* Fuzzy test of latitude weight normalization */ //const double eps_rlt_max=1.0e-14; /* [frc] Round-off error tolerance: Used 1.0e-14 until 20180904 */ const double eps_rlt_max=1.0e-12; /* [frc] Round-off error tolerance: Used 1.0e-12 since 20180904 */ lat_wgt_ttl=0.0; for(idx=0;idx<lat_nbr;idx++) lat_wgt_ttl+=lat_wgt[idx]; if(grd_typ == nco_grd_2D_fv || grd_typ == nco_grd_2D_eqa){ double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */ lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd[2*(lat_nbr-1)+1L])-sin(dgr2rdn*lat_bnd[0L])); if(grd_typ != nco_grd_2D_unk && fabs(1.0-lat_wgt_ttl/lat_wgt_ttl_xpc) > eps_rlt_max){ (void)fprintf(stdout,"%s: ERROR %s reports grid normalization does not meet precision tolerance eps_rlt_max = %20.15f\nlat_wgt_ttl = %20.15f, lat_wgt_ttl_xpc = %20.15f, lat_wgt_frc = %20.15f, eps_rlt = %20.15f\n",nco_prg_nm_get(),fnc_nm,eps_rlt_max,lat_wgt_ttl,lat_wgt_ttl_xpc,lat_wgt_ttl/lat_wgt_ttl_xpc,1.0-lat_wgt_ttl/lat_wgt_ttl_xpc); nco_exit(EXIT_FAILURE); } /* !imprecise */ } /* !nco_grd_lat_eqa, !nco_grd_lat_fv */ } /* !flg_grd_2D */ if(flg_grd_2D){ assert(grd_crn_nbr == 4); if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){ /* If interfaces were diagnosed from centers, copy corners from interfaces */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx=grd_crn_nbr*lon_idx; lon_crn[idx]=lon_ntf[lon_idx]; /* LL */ lon_crn[idx+1L]=lon_ntf[lon_idx+1L]; /* LR */ lon_crn[idx+2L]=lon_ntf[lon_idx+1L]; /* UR */ lon_crn[idx+3L]=lon_ntf[lon_idx]; /* UL */ } /* !lon_idx */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ idx=grd_crn_nbr*lat_idx; lat_crn[idx]=lat_ntf[lat_idx]; /* LL */ lat_crn[idx+1L]=lat_ntf[lat_idx]; /* LR */ lat_crn[idx+2L]=lat_ntf[lat_idx+1L]; /* UR */ lat_crn[idx+3L]=lat_ntf[lat_idx+1L]; /* UL */ } /* !lat_idx */ }else{ /* !lat_bnd_id */ /* If boundaries were provided in input dataset, copy corners from boundaries */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx=grd_crn_nbr*lon_idx; lon_crn[idx]=lon_bnd[2*lon_idx]; /* LL */ lon_crn[idx+1L]=lon_bnd[2*lon_idx+1L]; /* LR */ lon_crn[idx+2L]=lon_bnd[2*lon_idx+1L]; /* UR */ lon_crn[idx+3L]=lon_bnd[2*lon_idx]; /* UL */ } /* !lon_idx */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ idx=grd_crn_nbr*lat_idx; lat_crn[idx]=lat_bnd[2*lat_idx]; /* LL */ lat_crn[idx+1L]=lat_bnd[2*lat_idx]; /* LR */ lat_crn[idx+2L]=lat_bnd[2*lat_idx+1L]; /* UR */ lat_crn[idx+3L]=lat_bnd[2*lat_idx+1L]; /* UL */ } /* !lat_idx */ } /* !lat_bnd_id */ } /* !flg_grd_2D */ /* lat/lon_crn will not change anymore so stuff rectangular arrays into unrolled arrays */ if(flg_grd_1D){ for(idx=0;idx<grd_sz_nbr;idx++){ grd_ctr_lat[idx]=lat_ctr[idx]; grd_ctr_lon[idx]=lon_ctr[idx]; if(flg_wrt_crn){ for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; grd_crn_lat[idx2]=lat_crn[idx2]; grd_crn_lon[idx2]=lon_crn[idx2]; } /* !crn */ }else{ /* !flg_wrt_crn */ /* Defaults for ERWG when corners are unknown */ for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; grd_crn_lat[idx2]=0.0; grd_crn_lon[idx2]=0.0; } /* !crn */ } /* !flg_wrt_crn */ } /* !col */ } /* !flg_grd_1D */ if(flg_grd_2D){ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx=lat_idx*lon_nbr+lon_idx; grd_ctr_lat[idx]=lat_ctr[lat_idx]; grd_ctr_lon[idx]=lon_ctr[lon_idx]; for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; lat_idx2=lat_idx*grd_crn_nbr+crn_idx; lon_idx2=lon_idx*grd_crn_nbr+crn_idx; grd_crn_lat[idx2]=lat_crn[lat_idx2]; grd_crn_lon[idx2]=lon_crn[lon_idx2]; } /* !crn */ } /* !lon */ } /* !lat */ /* 20190613: Convert CW quadrilaterals to CCW quadrilaterals so TempestRemap accepts grids Default construction/inferral method orders corners CCW and CW for s2n and n2s grids, respectively */ if(!flg_s2n){ for(idx=0L;idx<grd_sz_nbr;idx++){ idx2=grd_crn_nbr*idx; flg_ccw=nco_ccw_chk(grd_crn_lat+idx2,grd_crn_lon+idx2,grd_crn_nbr,idx_ccw,rcr_lvl); } /* !idx */ } /* !flg_s2n */ } /* !flg_grd_2D */ /* Find span of all grids */ double lat_max; /* [dgr] Maximum latitude */ double lat_min; /* [dgr] Minimum latitude */ double lon_max; /* [dgr] Maximum longitude */ double lon_min; /* [dgr] Minimum longitude */ idx_ctr=0; if(has_mss_val_ctr){ /* Find first non-missing value center and thus corners */ for(idx_ctr=0;idx_ctr<grd_sz_nbr;idx_ctr++){ if(grd_ctr_lat[idx_ctr] != mss_val_ctr_dbl) break; } /* !grd_sz_nbr */ assert(idx_ctr != grd_sz_nbr); } /* !has_mss_val_ctr */ if(flg_wrt_crn){ /* Grids with corner boundaries supplied or inferred */ lon_max=grd_crn_lon[idx_ctr*grd_crn_nbr]; lat_max=grd_crn_lat[idx_ctr*grd_crn_nbr]; lon_min=grd_crn_lon[idx_ctr*grd_crn_nbr]; lat_min=grd_crn_lat[idx_ctr*grd_crn_nbr]; for(idx=1;idx<grd_sz_nbr*grd_crn_nbr;idx++){ idx_ctr=idx/grd_crn_nbr; if(has_mss_val_ctr) if(grd_ctr_lat[idx_ctr] == mss_val_ctr_dbl) continue; lat_max=(grd_crn_lat[idx] > lat_max) ? grd_crn_lat[idx] : lat_max; lon_max=(grd_crn_lon[idx] > lon_max) ? grd_crn_lon[idx] : lon_max; lat_min=(grd_crn_lat[idx] < lat_min) ? grd_crn_lat[idx] : lat_min; lon_min=(grd_crn_lon[idx] < lon_min) ? grd_crn_lon[idx] : lon_min; } /* !idx */ }else{ /* !flg_wrt_crn */ /* 20170424: Diagnose grid-extent when corners were not provided or inferred This is usually (always?) for 1d unstructured grids with only centers provided */ lon_max=grd_ctr_lon[idx_ctr]; lat_max=grd_ctr_lat[idx_ctr]; lon_min=grd_ctr_lon[idx_ctr]; lat_min=grd_ctr_lat[idx_ctr]; for(idx_ctr=1;idx_ctr<grd_sz_nbr;idx_ctr++){ if(has_mss_val_ctr) if(grd_ctr_lat[idx_ctr] == mss_val_ctr_dbl) continue; lat_max=(grd_ctr_lat[idx_ctr] > lat_max) ? grd_ctr_lat[idx_ctr] : lat_max; lon_max=(grd_ctr_lon[idx_ctr] > lon_max) ? grd_ctr_lon[idx_ctr] : lon_max; lat_min=(grd_ctr_lat[idx_ctr] < lat_min) ? grd_ctr_lat[idx_ctr] : lat_min; lon_min=(grd_ctr_lon[idx_ctr] < lon_min) ? grd_ctr_lon[idx_ctr] : lon_min; } /* !idx_ctr */ } /* flg_wrt_crn */ lat_spn=lat_max-lat_min; lon_spn=lon_max-lon_min; /* Use strict rules for rectangular grids, looser for spans that are inferred, or center-to-center not corner-to-corner */ if(flg_grd_2D){ if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; }else{ /* !flg_grd_2D */ if((float)lon_spn >= 340.0f && (float)lat_spn >= 170.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; } /* flg_wrt_crn */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports grid resolution %li x %li, spans %g x %g degrees: [%g <= lat <= %g], [%g <= lon <= %g]\n",nco_prg_nm_get(),fnc_nm,lat_nbr,lon_nbr,lat_spn,lon_spn,lat_min,lat_max,lon_min,lon_max); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input grid-extent: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_xtn_sng(nco_grd_xtn)); /* Write ERWG hints if filenames provided and grid is regional */ char *fl_hnt=NULL; char *fl_hnt_dst=NULL; char *fl_hnt_src=NULL; if(rgr->fl_hnt_dst) fl_hnt=fl_hnt_dst=rgr->fl_hnt_dst; if(rgr->fl_hnt_src) fl_hnt=fl_hnt_src=rgr->fl_hnt_src; if(nco_grd_xtn == nco_grd_xtn_rgn && fl_hnt){ const char *fl_mode="w"; FILE *fp_hnt; /* [fl] Hint file (for ERWG switches) file handle */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s writing ERWG weight-generation regional hint to file %s\n",nco_prg_nm_get(),fnc_nm,fl_hnt); /* Open output file */ if((fp_hnt=fopen(fl_hnt,fl_mode)) == NULL){ (void)fprintf(stderr,"%s: ERROR unable to open hint output file %s\n",nco_prg_nm_get(),fl_hnt); nco_exit(EXIT_FAILURE); } /* end if */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: Opened hint file %s\n",nco_prg_nm_get(),fl_hnt); if(fl_hnt_src) (void)fprintf(fp_hnt,"--src_regional"); if(fl_hnt_dst) (void)fprintf(fp_hnt,"--dst_regional"); rcd=fclose(fp_hnt); if(rcd != 0){ (void)fprintf(stderr,"%s: ERROR unable to close hint output file %s\n",nco_prg_nm_get(),fl_hnt); nco_exit(EXIT_FAILURE); } /* end if */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: Closed hint file %s\n",nco_prg_nm_get(),fl_hnt); } /* !nco_grd_xtn */ /* Diagnose area if necessary 20170510: ALM/CLM "area" is _FillValue=1.0e36f over ocean and total gridcell area in km2 (not multiplied by landfrac) elsewhere Writing this ALM/CLM "area" variable to gridfile, then using with ERWG --user_areas could be disastrous (depending on mask array and interpolation type) On the other hand CAM "area" variable is exactly what we want for gridfile Input areas are considered "untrustworthy" iff they have _and use_ missing value attribute Re-diagnose areas considered untrustworthy so output area array does not contain missing values */ if(flg_wrt_crn && has_mss_val_area){ const double mss_val_dbl=mss_val_area_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(area[idx] == mss_val_dbl) break; if(idx < grd_sz_nbr) use_mss_val_area=True; if(nco_dbg_lvl_get() >= nco_dbg_fl && use_mss_val_area) (void)fprintf(stdout,"%s: INFO %s reports input area field %s is considered untrustworthy because it uses missing values, will diagnose area from cell boundaries instead...\n",nco_prg_nm_get(),fnc_nm,area_nm_in); } /* !has_mss_val_area */ /* 20170511: There remain a handful of cases when input area should be diagnosed not copied These include using ncremap in SGS mode when inferred grids must use sensible area units Otherwise an inferred grid with area [km2] from ALM/CLM might be combined with area [sr] from NCO This would bias ERWG --user_areas produced values by ~10^10 Setting flg_dgn_area ensures inferred area uses [sr] */ const nco_bool flg_dgn_area=rgr->flg_dgn_area; /* [flg] Diagnose rather than copy inferred area */ if(flg_wrt_crn && /* If bounds are available to compute area and ... */ (area_id == NC_MIN_INT || /* Area is not in input file ... */ use_mss_val_area || /* Area is untrustworthy */ flg_dgn_area)){ /* User/application explicitly requests diagnostic area */ /* Not absolutely necessary to diagnose area because ERWG will diagnose and output area itself _unless_ --user_areas option is given */ if(nco_dbg_lvl_get() >= nco_dbg_std && flg_dgn_area) (void)fprintf(stdout,"%s: INFO %s reports diagnosing area from cell boundaries...\n",nco_prg_nm_get(),fnc_nm); if(flg_grd_crv || flg_grd_1D){ /* Area of arbitrary unstructured or curvilinear grids requires spherical trigonometry */ nco_sph_plg_area(rgr,grd_crn_lat,grd_crn_lon,grd_sz_nbr,grd_crn_nbr,area); }else if(flg_grd_2D){ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr;lon_idx++) area[lat_idx*lon_nbr+lon_idx]=fabs(dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*(sin(dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(dgr2rdn*lat_bnd[2*lat_idx]))); /* fabs() ensures positive area in n2s grids */ } /* !flg_grd_2D */ } /* !area_id */ /* ERWG will fail unless grid file has mask variable Use nul-mask (all points included) whenever input mask variable not supplied/detected Define nul-mask true everywhere and overwrite with false below Input mask can be any type and output mask will always be NC_INT */ for(idx=0;idx<grd_sz_nbr;idx++) msk[idx]=1; if(msk_id != NC_MIN_INT){ /* Change missing-value-masked points to 0 integer mask for SCRIP grids (SCRIP has no missing value convention) Input mask can be any type and output mask will always be NC_INT Applications: ALM/CLM mask (landmask) is NC_FLOAT and defines though does not use NC_FLOAT missing value CICE mask (tmask/umask) is NC_FLOAT and defines and uses NC_FLOAT missing value RACMO mask is NC_FLOAT and defines though does not use NC_FLOAT missing value AMSR mask is NC_SHORT and has no missing value GHRSST mask is NC_BYTE and is a multi-valued surface-type flag with missing value == -1b */ if(msk_typ != NC_INT){ if(nco_dbg_lvl_get() == nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s mask variable \"%s\" has odd type = %s. Re-run with higher debugging level for more information.\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,nco_typ_sng(msk_typ)); if(nco_dbg_lvl_get() > nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s mask variable \"%s\" has odd type = %s. Regridding weight generators require a mask variable of type NC_INT to specify points to include/exclude as sources/destinations. Points where the mask variable is zero or the missing value will be excluded (ignored) in regridding, all other points will be included. When inferring gridfiles, NCO assumes the first variable with a \"mask\"-like name (\"mask\", \"Mask\", \"grid_imask\", \"landmask\", or \"tmask\"), or the variable designated by the \"--msk_[src/dst]=msk_nm\" option, is this mask. However the variable \"%s\" in this file is not type NC_INT and so may not be intended as a regridding mask, hence this oh so pleasant informational WARNING. To prevent NCO from interpreting \"%s\" as a regridding mask, specify \"--msk_src=none\" and/or \"--msk_dst=none\", as appropriate. To utilize some other variable as the mask variable, specify \"--msk_src=msk_nm\" and/or \"--msk_dst=msk_nm\", as appropriate. Mask treatment is subtle, and NCO tries to \"do the right thing\". Whether it does is often easiest to discern by visual inspection of the regridded results in a turn-key viewer like Panoply or ncview.\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,nco_typ_sng(msk_typ),msk_nm_in,msk_nm_in); } /* msk_typ */ switch(msk_typ){ case NC_FLOAT: if(has_mss_val_msk){ const float mss_val_flt=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.fp[idx] == mss_val_flt || msk_unn.fp[idx] == 0.0f) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.fp[idx] == 0.0f) msk[idx]=0; } /* !mss_val */ break; case NC_DOUBLE: if(has_mss_val_msk){ const double mss_val_dbl=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.dp[idx] == mss_val_dbl || msk_unn.dp[idx] == 0.0) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.dp[idx] == 0.0) msk[idx]=0; } /* !mss_val */ break; case NC_INT: if(has_mss_val_msk){ const int mss_val_int=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.ip[idx] == mss_val_int || msk_unn.ip[idx] == 0) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.ip[idx] == 0) msk[idx]=0; } /* !mss_val */ break; case NC_SHORT: /* http://stackoverflow.com/questions/208433/how-do-i-write-a-short-literal-in-c */ if(has_mss_val_msk){ const short mss_val_sht=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.sp[idx] == mss_val_sht || msk_unn.sp[idx] == ((short)0)) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.sp[idx] == ((short)0)) msk[idx]=0; /* 20160111: AMSR kludge fxm */ // for(idx=0;idx<grd_sz_nbr;idx++) if(msk[idx] == 1) msk[idx]=0; } /* !mss_val */ break; case NC_BYTE: if(has_mss_val_msk){ const nco_byte mss_val_byt=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.bp[idx] == mss_val_byt || msk_unn.bp[idx] == ((nco_byte)0)) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.bp[idx] == ((nco_byte)0)) msk[idx]=0; /* 20170811: GHRSST kludge? */ } /* !mss_val */ break; default: (void)fprintf(stderr,"%s: ERROR %s mask variable \"%s\" has unsupported type = %s\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,nco_typ_sng(msk_typ)); nco_dfl_case_generic_err(); return NCO_ERR; break; } /* !msk_typ */ if(msk_unn.vp) msk_unn.vp=(void *)nco_free(msk_unn.vp); } /* !msk_id */ if(nco_dbg_lvl_get() >= nco_dbg_scl){ lat_wgt_ttl=0.0; area_ttl=0.0; if(flg_grd_2D){ (void)fprintf(stderr,"%s: INFO %s reports inferred rectangular latitude grid area diagnostics (lat_wgt_ttl and frc_lat_wgt should be valid):\n",nco_prg_nm_get(),fnc_nm); for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt_ttl+=lat_wgt[lat_idx]; }else{ (void)fprintf(stderr,"%s: INFO %s reports inferred unstructured or curvilinear latitude grid area diagnostics (ignore lat_wgt_ttl and frc_lat_wgt):\n",nco_prg_nm_get(),fnc_nm); } /* !flg_grd_2D */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr;lon_idx++) area_ttl+=area[lat_idx*lon_nbr+lon_idx]; (void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_ttl,area_ttl/(4.0*M_PI)); assert(area_ttl > 0.0); /* Protect following assertion since area might be in, e.g., km2 (ELM, RACMO) */ if(flg_area_sr) assert(area_ttl <= 4.0*M_PI); const double eps_rlt_area=1.0e-12; /* [frc] Error tolerance for global area */ if(nco_grd_xtn == nco_grd_xtn_glb){ if(fabs(1.0-area_ttl/(4.0*M_PI)) > eps_rlt_area) (void)fprintf(stdout,"%s: WARNING %s reports area for inferred global grid differs from true global area (4*pi sr) by greater than allowed fraction %g\n",nco_prg_nm_get(),fnc_nm,eps_rlt_area); } /* !nco_grd_xtn_glb */ } /* !dbg */ /* Open grid file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Define dimensions */ /* 20151230 ERWG appears to require presence of corner arrays in grid file even when they are not used (e.g., bilinear) But ERWG will break when corner values are bad. Default is do not write bad corner values. Uncomment next line to write bad corner values. */ /* flg_wrt_crn=True; */ if(flg_wrt_crn) rcd=nco_def_dim(out_id,grd_crn_nm,grd_crn_nbr,&dmn_id_grd_crn); rcd=nco_def_dim(out_id,grd_sz_nm,grd_sz_nbr,&dmn_id_grd_sz); rcd=nco_def_dim(out_id,grd_rnk_nm,grd_rnk_nbr,&dmn_id_grd_rnk); int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; /* Define variables */ (void)nco_def_var(out_id,dmn_sz_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_rnk,&dmn_sz_int_id); /* NB: Too small to deflate */ (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,msk_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_sz,&msk_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lat_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lon_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lon_id,shuffle,deflate,dfl_lvl); if(flg_wrt_crn){ dmn_ids[0]=dmn_id_grd_sz; dmn_ids[1]=dmn_id_grd_crn; (void)nco_def_var(out_id,grd_crn_lat_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_crn_lon_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lon_id,shuffle,deflate,dfl_lvl); } /* !flg_wrt_crn */ /* Define attributes */ aed_sct aed_mtd; char *att_nm; if(strstr(rgr->grd_ttl,"None given")){ const char att_fmt[]="NCO inferred this grid from input file %s"; att_val=(char *)nco_malloc((strlen(att_fmt)+strlen(rgr->fl_in)+1L)*sizeof(char)); sprintf(att_val,att_fmt,rgr->fl_in); }else{ att_val=strdup(rgr->grd_ttl); } /* !grd_ttl */ rcd=nco_char_att_put(out_id,NULL,"title",att_val); rcd=nco_char_att_put(out_id,NULL,"Conventions","SCRIP"); const char usr_cpp[]=TKN2SNG(USER); /* [sng] Hostname from C pre-processor */ rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO"); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ)); rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ)); rcd=nco_char_att_put(out_id,dmn_sz_nm,"long_name","Size(s) of horizontal dimensions (in Fortran storage order for historical reasons)"); if(flg_area_sr){ rcd=nco_char_att_put(out_id,area_nm,"long_name","Solid Angle Subtended on Source Grid"); rcd=nco_char_att_put(out_id,area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm,"units","steradian"); }else{ /* !flg_area_sr */ rcd=nco_char_att_put(out_id,area_nm,"long_name","Area on Source Grid"); // rcd=nco_char_att_put(out_id,area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm,"units",area_unt); } /* !flg_area_sr */ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"standard_name","latitude"); if(ngl_unt){ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,unt_sng,ngl_unt); }else{ /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees"); } /* !ngl_unt */ rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"standard_name","longitude"); if(ngl_unt){ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,unt_sng,ngl_unt); }else{ /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees"); } /* !ngl_unt */ if(flg_wrt_crn){ rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"long_name","Latitude of Grid Cell Vertices"); if(ngl_unt){ rcd=nco_char_att_put(out_id,grd_crn_lat_nm,unt_sng,ngl_unt); }else{ /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees"); } /* !ngl_unt */ rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"long_name","Longitude of Grid Cell Vertices"); if(ngl_unt){ rcd=nco_char_att_put(out_id,grd_crn_lon_nm,unt_sng,ngl_unt); }else{ /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees"); } /* !ngl_unt */ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"bounds",grd_crn_lat_nm); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"bounds",grd_crn_lon_nm); } /* !flg_wrt_crn */ rcd=nco_char_att_put(out_id,msk_nm,"long_name","Binary Integer Mask for Grid"); rcd=nco_char_att_put(out_id,msk_nm,"units","none"); /* Begin data mode */ (void)nco_enddef(out_id); /* Write variables */ dmn_srt[0]=0L; dmn_cnt[0]=grd_rnk_nbr; rcd=nco_put_vara(out_id,dmn_sz_int_id,dmn_srt,dmn_cnt,dmn_sz_int,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,msk_id,dmn_srt,dmn_cnt,msk,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ); if(flg_wrt_crn){ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lat_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lon_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ); } /* !flg_wrt_crn */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); fl_out=rgr->fl_ugrid; if(fl_out){ /* Test UGRID: Documentation: https://github.com/ugrid-conventions/ugrid-conventions Procedure: Create 1x1 skeleton file, infer UGRID and SCRIP grids from it ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr skl=${HOME}/skl_180x360.nc --rgr scrip=${HOME}/grd_180x360_SCRIP.nc --rgr latlon=180,360#lat_typ=eqa#lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr infer --rgr ugrid=${HOME}/grd_ugrid.nc --rgr scrip=${HOME}/grd_scrip.nc ~/skl_180x360.nc ~/foo.nc ncks --cdl -v mesh_node_y ~/grd_ugrid.nc ncks --cdl -v mesh_face_nodes,mesh_face_x,mesh_face_y -d nFaces,0 ~/grd_ugrid.nc ncks --cdl -v mesh_edge_nodes,mesh_edge_x,mesh_edge_y -d nEdges,0 ~/grd_ugrid.nc ncks --cdl -v grid_center_lat,grid_corner_lat -d grid_size,0,,360 -d grid_corners,0,3 ~/grd_scrip.nc ncks --cdl -m -M ~/grd_ugrid.nc */ char *dgx_nm=NULL_CEWI; /* [sng] Name of edge_coordinates x variable */ char *dgy_nm=NULL_CEWI; /* [sng] Name of edge_coordinates y variable */ char *dg_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as edges */ char *dg_nd_nm=NULL_CEWI; /* [sng] Name of edge_node_connectivity variable */ char *fcx_nm=NULL_CEWI; /* [sng] Name of face_coordinates x variable */ char *fcy_nm=NULL_CEWI; /* [sng] Name of face_coordinates y variable */ char *fc_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as faces */ char *fc_nd_nm=NULL_CEWI; /* [sng] Name of face_node_connectivity variable */ char *msh_nm=NULL_CEWI; /* [sng] Name of mesh topology variable */ char *nd_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes */ char *ndx_nm=NULL_CEWI; /* [sng] Name of node_coordinates x variable */ char *ndy_nm=NULL_CEWI; /* [sng] Name of node_coordinates y variable */ char *npe_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes-per-edge */ char *npf_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes-per-face */ double *dgx=NULL_CEWI; /* [dgr] Characteristic longitude of edges */ double *dgy=NULL_CEWI; /* [dgr] Characteristic latitude of edges */ double *fcx=NULL_CEWI; /* [dgr] Characteristic longitude of faces */ double *fcy=NULL_CEWI; /* [dgr] Characteristic latitude of faces */ double *ndx=NULL_CEWI; /* [dgr] Longitude of nodes */ double *ndy=NULL_CEWI; /* [dgr] Latitude of nodes */ int *dg_nd; /* [idx] edge_node_connectivity variable */ int *fc_nd; /* [idx] face_node_connectivity variable */ int dg_nd_id=NC_MIN_INT; /* [id] edge_node_connectivity variable ID */ int dgx_id=NC_MIN_INT; /* [id] Characteristic longitude of edges variable ID */ int dgy_id=NC_MIN_INT; /* [id] Characteristic latitude of edges variable ID */ int dmn_id_dg=NC_MIN_INT; /* [id] Dimension ID for edges */ int dmn_id_fc=NC_MIN_INT; /* [id] Dimension ID for faces */ int dmn_id_nd=NC_MIN_INT; /* [id] Dimension ID for nodes */ int dmn_id_npe=NC_MIN_INT; /* [id] Dimension ID for nodes-per-edge */ int dmn_id_npf=NC_MIN_INT; /* [id] Dimension ID for nodes-per-face */ int fc_nd_id=NC_MIN_INT; /* [id] face_node_connectivity variable ID */ int fcx_id=NC_MIN_INT; /* [id] Characteristic longitude of faces variable ID */ int fcy_id=NC_MIN_INT; /* [id] Characteristic latitude of faces variable ID */ int msh_id=NC_MIN_INT; /* [id] Mesh topology variable ID */ int msh_val=42; /* [id] Mesh topology variable value from Monty Python */ int ndx_id=NC_MIN_INT; /* [id] Longitude of mesh nodes variable ID */ int ndy_id=NC_MIN_INT; /* [id] Latitude of mesh nodes variable ID */ const long fc_nbr=grd_sz_nbr; /* [nbr] Number of faces in mesh */ const long npe_nbr=2; /* [nbr] Number of nodes per edge */ const long npf_nbr=grd_crn_nbr; /* [nbr] Number of nodes per face */ long dg_idx; /* [idx] Counting index for edges */ long dg_nbr=(long)NC_MIN_INT64; /* [nbr] Number of edges in mesh */ long fc_idx; /* [idx] Counting index for faces */ long nd_idx; /* [idx] Counting index for nodes */ long nd_nbr=(long)NC_MIN_INT64; /* [nbr] Number of nodes in mesh */ long srt_idx=0; /* [idx] start_index (C/Fortran) for edge_nodes, face_nodes */ if(!dgx_nm) dgx_nm=(char *)strdup("mesh_edge_x"); if(!dgy_nm) dgy_nm=(char *)strdup("mesh_edge_y"); if(!dg_dmn_nm) dg_dmn_nm=(char *)strdup("nEdges"); if(!fcx_nm) fcx_nm=(char *)strdup("mesh_face_x"); if(!fcy_nm) fcy_nm=(char *)strdup("mesh_face_y"); if(!fc_dmn_nm) fc_dmn_nm=(char *)strdup("nFaces"); if(!dg_nd_nm) dg_nd_nm=(char *)strdup("mesh_edge_nodes"); if(!fc_nd_nm) fc_nd_nm=(char *)strdup("mesh_face_nodes"); if(!msh_nm) msh_nm=(char *)strdup("mesh"); if(!nd_dmn_nm) nd_dmn_nm=(char *)strdup("nNodes"); if(!ndx_nm) ndx_nm=(char *)strdup("mesh_node_x"); if(!ndy_nm) ndy_nm=(char *)strdup("mesh_node_y"); if(!npe_dmn_nm) npe_dmn_nm=(char *)strdup("two"); if(!npf_dmn_nm) npf_dmn_nm=(char *)strdup("maxNodesPerFace"); if(flg_grd_1D){ (void)fprintf(stdout,"%s: ERROR %s UGRID output does not yet support 1D grids\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); }else if(flg_grd_2D){ /* Assume 2D grids are global and comprised of quadrilaterals */ switch(lat_typ){ case nco_grd_lat_fv: /* Currently all 2D grids are converted to the same UGRID representation fxm: Cap grids (e.g., FV) should eventually be written with a real cap, rather than as the "polar teeth" representation currently used. Polar teeth convention allows cap grid to be represented as rectangular on disk However, cap grids are better suited to non-rectangular UGRID meshes */ case nco_grd_lat_eqa: case nco_grd_lat_gss: /* Numbers of unique edges and nodes counted from South Pole (SP) to North Pole (NP) */ dg_nbr=lon_nbr*2+ /* SP: cells_per_lat*unique_edges_per_cell */ (lat_nbr-2)*lon_nbr*2+ /* Mid: lats*cells_per_lat*unique_edges_per_cell */ lon_nbr*1; /* NP: cells_per_lat*unique_edges_per_cell */ nd_nbr=1+lon_nbr*1+ /* SP: SP+cells_per_lat*unique_nodes_per_cell */ (lat_nbr-2)*lon_nbr*1+ /* Mid: lats*cells_per_lat*unique_nodes_per_cell */ 1; /* NP: NP */ break; case nco_grd_lat_unk: case nco_grd_lat_nil: default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ }else if(flg_grd_crv){ (void)fprintf(stdout,"%s: ERROR %s UGRID output does not yet support curvilinear grids\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !flg_grd */ dg_nd=(int *)nco_malloc(dg_nbr*npe_nbr*nco_typ_lng(NC_INT)); dgx=(double *)nco_malloc(dg_nbr*nco_typ_lng(crd_typ)); dgy=(double *)nco_malloc(dg_nbr*nco_typ_lng(crd_typ)); fc_nd=(int *)nco_malloc(fc_nbr*npf_nbr*nco_typ_lng(NC_INT)); fcx=(double *)nco_malloc(fc_nbr*nco_typ_lng(crd_typ)); fcy=(double *)nco_malloc(fc_nbr*nco_typ_lng(crd_typ)); ndx=(double *)nco_malloc(nd_nbr*nco_typ_lng(crd_typ)); ndy=(double *)nco_malloc(nd_nbr*nco_typ_lng(crd_typ)); const long int idx_fst_crn_ll=0; const long int idx_fst_crn_lr=1; const long int idx_fst_crn_ur=2; const long int idx_fst_crn_ul=3; /* Node Ordering: Each interior face requires one new node Node 0 at SP New latitude row moves next node North Add nodes to run West->East */ /* SP */ ndx[0]=lon_crn[0]; /* Longitude degenerate at SP, NP, keep same longitude as corner array */ ndy[0]=lat_crn[0]; /* Mid */ for(nd_idx=1;nd_idx<nd_nbr-1L;nd_idx++){ fc_idx=nd_idx-1L; lat_idx=fc_idx/lon_nbr; lon_idx=fc_idx%lon_nbr; ndx[nd_idx]=lon_crn[lon_idx*grd_crn_nbr+idx_fst_crn_ul]; ndy[nd_idx]=lat_crn[lat_idx*grd_crn_nbr+idx_fst_crn_ul]; } /* !nd_idx */ /* NP */ ndx[nd_nbr-1L]=lon_crn[(lon_nbr-1)*grd_crn_nbr+idx_fst_crn_ul]; ndy[nd_nbr-1L]=lat_crn[(lat_nbr-1)*grd_crn_nbr+idx_fst_crn_ul]; /* Edge Ordering: epf_nbr is number of distinct edges-per-face (incremental, for interior cells) Each additional interior rectangular gridcell requires two new edges: Edge 0 runs South->North for all cells Edge 1 runs West->East for all cells NP row requires only one new edge per face */ /* SP */ const int epf_nbr=2; /* [nbr] Number of distinct edges-per-face (incremental, for interior cells) */ for(fc_idx=0;fc_idx<lon_nbr;fc_idx++){ dg_idx=fc_idx*epf_nbr; /* Edge 0 */ dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx; dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+fc_idx+1L; /* Edge 1 */ dg_nd[(dg_idx+1L)*npe_nbr+0L]=srt_idx+fc_idx+1L; dg_nd[(dg_idx+1L)*npe_nbr+1L]=srt_idx+fc_idx+2L; } /* !fc_idx */ /* Mid */ for(fc_idx=lon_nbr;fc_idx<(lat_nbr-1L)*lon_nbr;fc_idx++){ dg_idx=fc_idx*epf_nbr; /* Edge 0 */ dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx+fc_idx-lon_nbr+1L; dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+fc_idx+1L; /* Edge 1 */ dg_nd[(dg_idx+1L)*npe_nbr+0L]=srt_idx+fc_idx+1L; dg_nd[(dg_idx+1L)*npe_nbr+1L]=srt_idx+fc_idx+2L; } /* !fc_idx */ /* NP */ for(fc_idx=fc_nbr-lon_nbr;fc_idx<fc_nbr;fc_idx++){ /* Only one new edge per face in last row, easiest to count backwards from last edge */ dg_idx=dg_nbr-(fc_nbr-fc_idx); /* NP faces require only only one new edge, Edge 0 */ dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx+fc_idx-lon_nbr+1L; dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+nd_nbr-1L; } /* !fc_idx */ /* SP */ for(fc_idx=0;fc_idx<lon_nbr;fc_idx++){ fc_nd[fc_idx*npf_nbr+0L]=srt_idx+0L; fc_nd[fc_idx*npf_nbr+1L]=srt_idx+fc_idx+2L; /* NB: CCW */ fc_nd[fc_idx*npf_nbr+2L]=srt_idx+fc_idx+1L; /* NB: CCW */ fc_nd[fc_idx*npf_nbr+3L]=mss_val_int_out; } /* !fc_idx */ /* Mid */ for(fc_idx=lon_nbr;fc_idx<fc_nbr-lon_nbr;fc_idx++){ fc_nd[fc_idx*npf_nbr+idx_fst_crn_ll]=srt_idx+fc_idx-lon_nbr+1L; fc_nd[fc_idx*npf_nbr+idx_fst_crn_lr]=srt_idx+fc_idx-lon_nbr+2L; fc_nd[fc_idx*npf_nbr+idx_fst_crn_ur]=srt_idx+fc_idx+2L; fc_nd[fc_idx*npf_nbr+idx_fst_crn_ul]=srt_idx+fc_idx+1L; } /* !fc_idx */ /* NP */ for(fc_idx=fc_nbr-lon_nbr;fc_idx<fc_nbr;fc_idx++){ fc_nd[fc_idx*npf_nbr+0L]=srt_idx+nd_nbr-(fc_nbr-fc_idx)-2L; fc_nd[fc_idx*npf_nbr+1L]=srt_idx+nd_nbr-(fc_nbr-fc_idx)-1L; fc_nd[fc_idx*npf_nbr+2L]=srt_idx+nd_nbr-1L; fc_nd[fc_idx*npf_nbr+3L]=mss_val_int_out; } /* !fc_idx */ /* Characteristic coordinates */ for(dg_idx=0;dg_idx<dg_nbr-1L;dg_idx++){ idx=dg_idx*npe_nbr; dgx[dg_idx]=0.5*(ndx[dg_nd[idx+0L]]+ndx[dg_nd[idx+1L]]); dgy[dg_idx]=0.5*(ndy[dg_nd[idx+0L]]+ndy[dg_nd[idx+1L]]); } /* !dg_idx */ /* Degenerate longitude at SP, NP, causes weird characterisic longitude unless special care taken */ for(fc_idx=0;fc_idx<fc_nbr-1L;fc_idx++){ idx=fc_idx*npf_nbr; if(fc_idx < lon_nbr){ fcx[fc_idx]=0.5*(ndx[fc_nd[idx+1L]]+ndx[fc_nd[idx+2L]]); }else if(fc_idx >= fc_nbr-lon_nbr-1){ fcx[fc_idx]=0.5*(ndx[fc_nd[idx+0L]]+ndx[fc_nd[idx+1L]]); }else if(fc_nd[idx+3L] != mss_val_int_out){ /* fxm for fcx use nco_lon_crn_avg_brnch() and 3-node version too */ fcx[fc_idx]=0.25*(ndx[fc_nd[idx+0L]]+ndx[fc_nd[idx+1L]]+ndx[fc_nd[idx+2L]]+ndx[fc_nd[idx+3L]]); }else{ abort(); } /* !fc_idx */ if(fc_nd[idx+3L] != mss_val_int_out) fcy[fc_idx]=0.25*(ndy[fc_nd[idx+0L]]+ndy[fc_nd[idx+1L]]+ndy[fc_nd[idx+2L]]+ndy[fc_nd[idx+3L]]); else fcy[fc_idx]=0.33*(ndy[fc_nd[idx+0L]]+ndy[fc_nd[idx+1L]]+ndy[fc_nd[idx+2L]]); } /* !fc_idx */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); rcd=nco_def_dim(out_id,dg_dmn_nm,dg_nbr,&dmn_id_dg); rcd=nco_def_dim(out_id,fc_dmn_nm,fc_nbr,&dmn_id_fc); rcd=nco_def_dim(out_id,nd_dmn_nm,nd_nbr,&dmn_id_nd); rcd=nco_def_dim(out_id,npe_dmn_nm,npe_nbr,&dmn_id_npe); rcd=nco_def_dim(out_id,npf_dmn_nm,npf_nbr,&dmn_id_npf); dmn_ids[0]=dmn_id_dg; dmn_ids[1]=dmn_id_npe; rcd=nco_def_var(out_id,dg_nd_nm,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids,&dg_nd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dg_nd_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_fc; dmn_ids[1]=dmn_id_npf; rcd=nco_def_var(out_id,fc_nd_nm,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids,&fc_nd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fc_nd_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,msh_nm,(nc_type)NC_INT,dmn_nbr_0D,(int *)NULL,&msh_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msh_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,ndx_nm,crd_typ,dmn_nbr_1D,&dmn_id_nd,&ndx_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ndx_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,ndy_nm,crd_typ,dmn_nbr_1D,&dmn_id_nd,&ndy_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ndy_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,dgx_nm,crd_typ,dmn_nbr_1D,&dmn_id_dg,&dgx_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dgx_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,dgy_nm,crd_typ,dmn_nbr_1D,&dmn_id_dg,&dgy_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dgy_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,fcx_nm,crd_typ,dmn_nbr_1D,&dmn_id_fc,&fcx_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fcx_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,fcy_nm,crd_typ,dmn_nbr_1D,&dmn_id_fc,&fcy_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fcy_id,shuffle,deflate,dfl_lvl); if(strstr(rgr->grd_ttl,"None given")){ const char att_fmt[]="NCO constructed this UGRID grid from scratch"; att_val=(char *)nco_malloc((strlen(att_fmt)+strlen(rgr->fl_in)+1L)*sizeof(char)); sprintf(att_val,att_fmt); }else{ att_val=strdup(rgr->grd_ttl); } /* !grd_ttl */ rcd=nco_char_att_put(out_id,NULL,"title",att_val); rcd=nco_char_att_put(out_id,NULL,"Conventions","CF-1.6, UGRID-1.0"); rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO"); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,msh_nm,"cf_role","mesh_topology"); rcd=nco_char_att_put(out_id,msh_nm,"standard_name","mesh_topology"); rcd=nco_char_att_put(out_id,msh_nm,"long_name","Topology data"); att_nm=strdup("topology_dimension"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=msh_nm; aed_mtd.id=msh_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&val_two; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,msh_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); aed_mtd.sz=strlen(ndx_nm)+strlen(ndy_nm)+1L; att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR)); (void)sprintf(att_val,"%s %s",ndx_nm,ndy_nm); rcd=nco_char_att_put(out_id,msh_nm,"node_coordinates",att_val); rcd=nco_char_att_put(out_id,msh_nm,"face_node_connectivity",fc_nd_nm); aed_mtd.sz=strlen(fcx_nm)+strlen(fcy_nm)+1L; att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR)); (void)sprintf(att_val,"%s %s",fcx_nm,fcy_nm); rcd=nco_char_att_put(out_id,msh_nm,"face_coordinates",att_val); rcd=nco_char_att_put(out_id,msh_nm,"face_dimension",fc_dmn_nm); rcd=nco_char_att_put(out_id,msh_nm,"edge_node_connectivity",dg_nd_nm); aed_mtd.sz=strlen(dgx_nm)+strlen(dgy_nm)+1L; att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR)); (void)sprintf(att_val,"%s %s",dgx_nm,dgy_nm); rcd=nco_char_att_put(out_id,msh_nm,"edge_coordinates",att_val); rcd=nco_char_att_put(out_id,msh_nm,"edge_dimension",dg_dmn_nm); rcd=nco_char_att_put(out_id,ndx_nm,"standard_name","longitude"); rcd=nco_char_att_put(out_id,ndx_nm,"long_name","Longitude of mesh nodes"); rcd=nco_char_att_put(out_id,ndx_nm,"units","degrees_east"); rcd=nco_char_att_put(out_id,ndy_nm,"standard_name","latitude"); rcd=nco_char_att_put(out_id,ndy_nm,"long_name","Latitude of mesh nodes"); rcd=nco_char_att_put(out_id,ndy_nm,"units","degrees_north"); rcd=nco_char_att_put(out_id,dg_nd_nm,"cf_role","edge_node_connectivity"); rcd=nco_char_att_put(out_id,dg_nd_nm,"long_name","Maps every edge to the two nodes that it connects"); att_nm=strdup("start_index"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=dg_nd_nm; aed_mtd.id=dg_nd_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&val_zero; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,dg_nd_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,fc_nd_nm,"cf_role","face_node_connectivity"); rcd=nco_char_att_put(out_id,fc_nd_nm,"long_name","Maps every face to its corner nodes"); att_nm=strdup("start_index"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=fc_nd_nm; aed_mtd.id=fc_nd_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&val_zero; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,fc_nd_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); att_nm=strdup("_FillValue"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=fc_nd_nm; aed_mtd.id=fc_nd_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&mss_val_int_out; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,fc_nd_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,dgx_nm,"standard_name","longitude"); rcd=nco_char_att_put(out_id,dgx_nm,"long_name","Characteristic longitude of 2D mesh face"); rcd=nco_char_att_put(out_id,dgx_nm,"units","degrees_east"); rcd=nco_char_att_put(out_id,dgy_nm,"standard_name","latitude"); rcd=nco_char_att_put(out_id,dgy_nm,"long_name","Characteristic latitude of 2D mesh face"); rcd=nco_char_att_put(out_id,dgy_nm,"units","degrees_north"); rcd=nco_char_att_put(out_id,fcx_nm,"standard_name","longitude"); rcd=nco_char_att_put(out_id,fcx_nm,"long_name","Characteristic longitude of 2D mesh edge"); rcd=nco_char_att_put(out_id,fcx_nm,"units","degrees_east"); rcd=nco_char_att_put(out_id,fcy_nm,"standard_name","latitude"); rcd=nco_char_att_put(out_id,fcy_nm,"long_name","Characteristic latitude of 2D mesh edge"); rcd=nco_char_att_put(out_id,fcy_nm,"units","degrees_north"); /* Begin data mode */ (void)nco_enddef(out_id); (void)nco_put_vara(out_id,msh_id,dmn_srt,dmn_cnt,&msh_val,(nc_type)NC_INT); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=dg_nbr; dmn_cnt[1]=epf_nbr; (void)nco_put_vara(out_id,dg_nd_id,dmn_srt,dmn_cnt,dg_nd,(nc_type)NC_INT); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=fc_nbr; dmn_cnt[1]=npf_nbr; (void)nco_put_vara(out_id,fc_nd_id,dmn_srt,dmn_cnt,fc_nd,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=nd_nbr; (void)nco_put_vara(out_id,ndx_id,dmn_srt,dmn_cnt,ndx,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=nd_nbr; (void)nco_put_vara(out_id,ndy_id,dmn_srt,dmn_cnt,ndy,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=dg_nbr; (void)nco_put_vara(out_id,dgx_id,dmn_srt,dmn_cnt,dgx,crd_typ); (void)nco_put_vara(out_id,dgy_id,dmn_srt,dmn_cnt,dgy,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=fc_nbr; (void)nco_put_vara(out_id,fcx_id,dmn_srt,dmn_cnt,fcx,crd_typ); (void)nco_put_vara(out_id,fcy_id,dmn_srt,dmn_cnt,fcy,crd_typ); /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); /* Free memory associated with output file */ if(dgx) dgx=(double *)nco_free(dgx); if(dgy) dgy=(double *)nco_free(dgy); if(dg_nd) dg_nd=(int *)nco_free(dg_nd); if(fcx) fcx=(double *)nco_free(fcx); if(fcy) fcy=(double *)nco_free(fcy); if(fc_nd) fc_nd=(int *)nco_free(fc_nd); if(ndx) ndx=(double *)nco_free(ndx); if(ndy) ndy=(double *)nco_free(ndy); /* Free strings */ if(dgx_nm) dgx_nm=(char *)nco_free(dgx_nm); if(dgy_nm) dgy_nm=(char *)nco_free(dgy_nm); if(dg_dmn_nm) dg_dmn_nm=(char *)nco_free(dg_dmn_nm); if(dg_nd_nm) dg_nd_nm=(char *)nco_free(dg_nd_nm); if(fcx_nm) fcx_nm=(char *)nco_free(fcx_nm); if(fcy_nm) fcy_nm=(char *)nco_free(fcy_nm); if(fc_dmn_nm) fc_dmn_nm=(char *)nco_free(fc_dmn_nm); if(fc_nd_nm) fc_nd_nm=(char *)nco_free(fc_nd_nm); if(msh_nm) msh_nm=(char *)nco_free(msh_nm); if(nd_dmn_nm) nd_dmn_nm=(char *)nco_free(nd_dmn_nm); if(ndx_nm) ndx_nm=(char *)nco_free(ndx_nm); if(ndy_nm) ndy_nm=(char *)nco_free(ndy_nm); if(npe_dmn_nm) npe_dmn_nm=(char *)nco_free(npe_dmn_nm); if(npf_dmn_nm) npf_dmn_nm=(char *)nco_free(npf_dmn_nm); } /* !fl_ugrid */ /* Free memory associated with input file */ if(dmn_sz_int) dmn_sz_int=(int *)nco_free(dmn_sz_int); if(msk) msk=(int *)nco_free(msk); if(area) area=(double *)nco_free(area); if(grd_ctr_lat) grd_ctr_lat=(double *)nco_free(grd_ctr_lat); if(grd_ctr_lon) grd_ctr_lon=(double *)nco_free(grd_ctr_lon); if(grd_crn_lat) grd_crn_lat=(double *)nco_free(grd_crn_lat); if(grd_crn_lon) grd_crn_lon=(double *)nco_free(grd_crn_lon); if(lat_bnd) lat_bnd=(double *)nco_free(lat_bnd); if(lat_crn) lat_crn=(double *)nco_free(lat_crn); if(lat_ctr) lat_ctr=(double *)nco_free(lat_ctr); if(lat_ntf) lat_ntf=(double *)nco_free(lat_ntf); if(lat_wgt) lat_wgt=(double *)nco_free(lat_wgt); if(lon_bnd) lon_bnd=(double *)nco_free(lon_bnd); if(lon_crn) lon_crn=(double *)nco_free(lon_crn); if(lon_ctr) lon_ctr=(double *)nco_free(lon_ctr); if(lon_ntf) lon_ntf=(double *)nco_free(lon_ntf); if(vrt_cll) vrt_cll=(int *)nco_free(vrt_cll); if(vrt_lat) vrt_lat=(double *)nco_free(vrt_lat); if(vrt_lon) vrt_lon=(double *)nco_free(vrt_lon); /* Free strings */ if(area_nm_in) area_nm_in=(char *)nco_free(area_nm_in); if(area_unt) area_unt=(char *)nco_free(area_unt); if(bnd_dmn_nm) bnd_dmn_nm=(char *)nco_free(bnd_dmn_nm); if(col_dmn_nm) col_dmn_nm=(char *)nco_free(col_dmn_nm); if(lat_bnd_nm) lat_bnd_nm=(char *)nco_free(lat_bnd_nm); if(lat_dmn_nm) lat_dmn_nm=(char *)nco_free(lat_dmn_nm); if(lat_nm_in) lat_nm_in=(char *)nco_free(lat_nm_in); if(lon_bnd_nm) lon_bnd_nm=(char *)nco_free(lon_bnd_nm); if(lon_dmn_nm) lon_dmn_nm=(char *)nco_free(lon_dmn_nm); if(lon_nm_in) lon_nm_in=(char *)nco_free(lon_nm_in); if(msk_nm_in) msk_nm_in=(char *)nco_free(msk_nm_in); if(ngl_unt) ngl_unt=(char *)nco_free(ngl_unt); if(vrt_cll_nm) vrt_cll_nm=(char *)nco_free(vrt_cll_nm); if(vrt_lat_nm) vrt_lat_nm=(char *)nco_free(vrt_lat_nm); if(vrt_lon_nm) vrt_lon_nm=(char *)nco_free(vrt_lon_nm); return rcd; } /* !nco_grd_nfr() */ double /* O [dgr] Longitude difference (lon_r-lon_l) */ nco_lon_dff_brnch_dgr /* [fnc] Subtract longitudes with branch-cut rules */ (double lon_r, /* I [dgr] Longitude on right of gridcell (subtractor) */ double lon_l) /* I [dgr] Longitude on left of gridcell (subtractee) */ { /* Purpose: Return difference of two longitudes in degrees Assume longitudes are within 180 degrees of eachother Default orientation is monotonically increasing longitude from left to right */ const char fnc_nm[]="nco_lon_dff_brnch_dgr()"; const double lon_dff=lon_r-lon_l; /* [dgr] Longitude difference (lon_r-lon_l) */ if(lon_dff >= 180.0){ (void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff); return lon_dff-360.0; }else if(lon_dff <= -180.0){ return lon_dff+360.0; } /* !lon_dff */ return lon_dff; } /* !nco_lon_dff_brnch_dgr() */ double /* O [rdn] Longitude difference (lon_r-lon_l) */ nco_lon_dff_brnch_rdn /* [fnc] Subtract longitudes with branch-cut rules */ (double lon_r, /* I [rdn] Longitude on right of gridcell (subtractor) */ double lon_l) /* I [rdn] Longitude on left of gridcell (subtractee) */ { /* Purpose: Return difference of two longitudes in radians Assume longitudes are within pi radians of eachother Default orientation is monotonically increasing longitude from left to right */ const char fnc_nm[]="nco_lon_dff_brnch_rdn()"; const double lon_dff=lon_r-lon_l; /* [rdn] Longitude difference (lon_r-lon_l) */ //nco_bool dbg_prn=False; /* [flg] Print warning when longitude difference is suspicious */ /* longitudes on different branch cuts are expected when computing polygon area, so warn only if requested with high debugging level */ if(lon_dff >= M_PI){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff); return lon_dff-M_PI-M_PI; }else if(lon_dff <= -M_PI){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff); return lon_dff+M_PI+M_PI; } /* !lon_dff */ return lon_dff; } /* !nco_lon_dff_brnch_rdn() */ double /* O [dgr] Longitude average */ nco_lon_crn_avg_brnch /* [fnc] Average quadrilateral longitude with branch-cut rules */ (double lon_ll, /* I [dgr] Longitude at lower left of gridcell */ double lon_lr, /* I [dgr] Longitude at lower right of gridcell */ double lon_ur, /* I [dgr] Longitude at upper right of gridcell */ double lon_ul) /* I [dgr] Longitude at upper left of gridcell */ { /* Purpose: Return average of four corner longitudes of quadrilateral Assume longitudes are within 180 degrees of eachother Default orientation is monotonically increasing longitude from left to right WLOG, adjust all longitudes to be on same branch as lon_ll */ const char fnc_nm[]="nco_lon_crn_avg_brnch()"; double lon_dff; /* [dgr] Longitude difference */ lon_dff=lon_lr-lon_ll; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_lr, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_lr,lon_ll,lon_dff); lon_lr-=360.0; }else if(lon_dff <= -180.0){ lon_lr+=360.0; } /* !lon_dff */ lon_dff=lon_ur-lon_ll; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_ur, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_ur,lon_ll,lon_dff); lon_ur-=360.0; }else if(lon_dff <= -180.0){ lon_ur+=360.0; } /* !lon_dff */ lon_dff=lon_ul-lon_ll; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_ul, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_ul,lon_ll,lon_dff); lon_ul-=360.0; }else if(lon_dff <= -180.0){ lon_ul+=360.0; } /* !lon_dff */ return 0.25*(lon_ll+lon_lr+lon_ur+lon_ul); } /* !nco_lon_crn_avg_brnch() */ double /* O [dgr] Longitude average */ nco_lon_ply_avg_brnch_dgr /* [fnc] Average polygon longitude with branch-cut rules */ (double *lon_crn, /* I [dgr] Longitude of gridcell corners */ long lon_nbr) /* I [nbr] Number of vertices in polygon */ { /* Purpose: Return average longitude of polygon vertices, i.e., centroid longitude Assume longitudes are within 180 degrees of one another Default orientation is monotonically increasing longitude from left to right WLOG, adjust all longitudes to be on same branch as lon_ll */ // const char fnc_nm[]="nco_lon_ply_avg_brnch()"; double lon_dff; /* [dgr] Longitude difference */ double lon_avg; /* [dgr] Longitude average */ int lon_idx; /* [idx] Polygon vertex index */ assert(lon_nbr != 0); lon_avg=lon_crn[0]; for(lon_idx=1;lon_idx<lon_nbr;lon_idx++){ lon_avg+=lon_crn[lon_idx]; lon_dff=lon_crn[lon_idx]-lon_crn[0]; if(lon_dff >= 180.0){ lon_avg-=360.0; }else if(lon_dff <= -180.0){ lon_avg+=360.0; } /* !lon_dff */ } /* !lon_idx */ return lon_avg/lon_nbr; } /* !nco_lon_ply_avg_brnch() */ nco_bool /* O [flg] Input corners were CCW */ nco_ccw_chk /* [fnc] Convert quadrilateral gridcell corners to CCW orientation */ (double * const crn_lat, /* [dgr] Latitude corners of gridcell */ double * const crn_lon, /* [dgr] Latitude corners of gridcell */ const int crn_nbr, /* [nbr] Number of corners per gridcell */ int idx_ccw, /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */ const int rcr_lvl) /* [nbr] Recursion level */ { /* Purpose: Determine whether corner vertices are oriented CCW If not, alter order so they are returned in CCW order Function can call itself, and rcr_lvl indicates recursion level: rcr_lvl=1: Called by host code, i.e., nco_grd_nfr() rcr_lvl=2: Called by itself, i.e., nco_ccw_chk() Assumptions: Quadrilateral vertices are already corrected to obey branch-cut rules, i.e., all vertices are on "same side" of dateline or Greenwich as appropriate Algorithm: Start crn_idx=0, i.e., quadrilateral LL corner Vector A runs from crn_idx=0 to crn_idx=1, i.e., quadrilateral LL->LR Vector B runs from crn_idx=1 to crn_idx=2, i.e., quadrilateral LR->UR Compute cross-product C = A x B C is normal to plane containing A and B Dot-product of C with radial vector to head A = tail B is positive if A and B are CCW if(ABC is CCW){ if(CDA is CCW) Done else Copy D:=A (make CDA degenerate, triangularize quadrilateral) endif }else(ABC is not CCW){ Assume entire quadrilateral is CW Take mirror image of quadrilateral by switching B with D If(new ABC is CCW){ If(CDA is CCW) Done else Copy D:=A (make CDA degenerate, triangularize quadrilateral) endif }else{ Fail (return False, meaning point should be masked) } All cases return True (i.e., CCW) from rcr_lvl=1 except last Last case returns False, and calling code should mask such an aberrant point */ const char fnc_nm[]="nco_ccw_chk()"; /* MSVC compiler chokes unless array size is compile-time constant */ const int CRN_NBR_MSVC=4; double sin_lat[CRN_NBR_MSVC]; double sin_lon[CRN_NBR_MSVC]; double cos_lat[CRN_NBR_MSVC]; double cos_lon[CRN_NBR_MSVC]; double A_tail_x,A_tail_y,A_tail_z; double A_head_x,A_head_y,A_head_z; double A_x,A_y,A_z; double B_tail_x,B_tail_y,B_tail_z; double B_head_x,B_head_y,B_head_z; double B_x,B_y,B_z; double C_x,C_y,C_z; double R_x,R_y,R_z; double lat_rdn; double lon_rdn; double dot_prd; int crn_idx; /* [idx] Corner idx */ int A_tail_idx,A_head_idx; int B_tail_idx,B_head_idx; nco_bool flg_ccw; /* [flg] Input is CCW */ assert(crn_nbr == CRN_NBR_MSVC); for(crn_idx=0;crn_idx<crn_nbr;crn_idx++){ lat_rdn=crn_lat[crn_idx]*M_PI/180.0; lon_rdn=crn_lon[crn_idx]*M_PI/180.0; sin_lat[crn_idx]=sin(lat_rdn); cos_lat[crn_idx]=cos(lat_rdn); sin_lon[crn_idx]=sin(lon_rdn); cos_lon[crn_idx]=cos(lon_rdn); } /* !crn_idx */ /* Calls from host code (i.e., nco_grd_nfr()) start at lower-left of quadrilateral ABCD = Point A = vertex 0 Calls from self can start from quadrilateral Point A or C To check triangle CDA, start at upper-right of quadrilateral ABCD = Point C = vertex 2 */ A_tail_idx=idx_ccw; A_head_idx=B_tail_idx=(A_tail_idx+1)%crn_nbr; B_head_idx=(B_tail_idx+1)%crn_nbr; A_tail_x=cos_lat[A_tail_idx]*cos_lon[A_tail_idx]; A_tail_y=cos_lat[A_tail_idx]*sin_lon[A_tail_idx]; A_tail_z=sin_lat[A_tail_idx]; A_head_x=B_tail_x=R_x=cos_lat[A_head_idx]*cos_lon[A_head_idx]; A_head_y=B_tail_y=R_y=cos_lat[A_head_idx]*sin_lon[A_head_idx]; A_head_z=B_tail_z=R_z=sin_lat[A_head_idx]; B_head_x=cos_lat[B_head_idx]*cos_lon[B_head_idx]; B_head_y=cos_lat[B_head_idx]*sin_lon[B_head_idx]; B_head_z=sin_lat[B_head_idx]; A_x=A_head_x-A_tail_x; A_y=A_head_y-A_tail_y; A_z=A_head_z-A_tail_z; B_x=B_head_x-B_tail_x; B_y=B_head_y-B_tail_y; B_z=B_head_z-B_tail_z; /* Cross-Product C = A x B */ C_x=A_y*B_z-B_y*A_z; C_y=-A_x*B_z+B_x*A_z; C_z=A_x*B_y-B_x*A_y; /* Dot-Product R dot C */ dot_prd=C_x*R_x+C_y*R_y+C_z*R_z; if(dot_prd > 0.0) flg_ccw=True; else flg_ccw=False; if(flg_ccw && crn_nbr == 4 && rcr_lvl == 1){ /* Original ABC is CCW, now check CDA */ idx_ccw=2; flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1); if(!flg_ccw){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports triangle ABC is and CDA is not CCW in quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Setting D:=A to triangularize quadrilateral.\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd); /* Triangularize quadrilateral D:=A */ /* 20210411: From 2016 until today, nco_ccw_chk() overwrote fourth (UL) with first (LL) corner right here even when flg_ccw was True :( */ crn_lat[3]=crn_lat[0]; crn_lon[3]=crn_lon[0]; return True; } /* !flg_ccw */ }else if(!flg_ccw && crn_nbr == 4 && rcr_lvl == 1){ /* Original ABC is not CCW 20160124: Simplistic fix: reverse gridpoint order This only works for quadrilaterals without degenerate points */ double crn_tmp; if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: INFO %s reports triangle ABC is non-CCW in quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Mirror-imaging...\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd); crn_tmp=crn_lat[1]; crn_lat[1]=crn_lat[3]; crn_lat[3]=crn_tmp; crn_tmp=crn_lon[1]; crn_lon[1]=crn_lon[3]; crn_lon[3]=crn_tmp; /* Check new triangle ABC */ idx_ccw=0; flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1); if(flg_ccw){ /* Inverted ABC is CCW, now check CDA */ idx_ccw=2; flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1); if(flg_ccw){ return True; }else{ if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: INFO %s reports triangle ABC is CCW after inversion, but triangle CDA is not at quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Setting D:=A to triangularize quadrilateral.\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd); /* Triangularize quadrilateral D:=A */ crn_lat[3]=crn_lat[0]; crn_lon[3]=crn_lon[0]; return True; } /* flg_ccw */ }else{ /* Original and Inverted ABC are not CCW */ if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports triangle ABC remains non-CCW after first inversion\n",nco_prg_nm_get(),fnc_nm); return False; } /* !flg_ccw */ } /* flg_ccw */ return flg_ccw; } /* !nco_ccw_chk() */
#include "nco_rgr.h" /* Regridding */ extern double min_dbl(double a, double b); extern double max_dbl(double a, double b); inline double min_dbl(double a, double b){return (a < b) ? a : b;} inline double max_dbl(double a, double b){return (a > b) ? a : b;} int /* O [enm] Return code */ nco_rgr_ctl /* [fnc] Control regridding logic */ (rgr_sct * const rgr, /* I/O [sct] Regridding structure */ trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */ { /* Purpose: Control regridding logic */ int rcd=NCO_NOERR; const char fnc_nm[]="nco_rgr_ctl()"; nco_bool flg_grd=False; /* [flg] Create SCRIP-format grid file */ nco_bool flg_map=False; /* [flg] Create ESMF-format mapfile */ nco_bool flg_nfr=False; /* [flg] Infer SCRIP-format grid file */ nco_bool flg_smf=False; /* [flg] ESMF regridding (unused) */ nco_bool flg_s1d=False; /* [flg] Unpack sparse-1D CLM/ELM variables */ nco_bool flg_tps=False; /* [flg] Tempest regridding (unused) */ nco_bool flg_vrt=False; /* [flg] Interpolate to new vertical grid */ nco_bool flg_wgt=False; /* [flg] Regrid with external weights */ /* Main control branching occurs here Branching complexity and utility will increase as regridding features are added */ if(rgr->flg_grd) flg_grd=True; if(rgr->flg_grd_src && rgr->flg_grd_dst && rgr->flg_wgt) flg_map=True; if(rgr->flg_nfr) flg_nfr=True; if(rgr->flg_wgt && !(rgr->flg_grd_src && rgr->flg_grd_dst)) flg_wgt=True; if(rgr->flg_s1d) flg_s1d=True; if(rgr->fl_vrt) flg_vrt=True; assert(!flg_smf); assert(!flg_tps); /* Create SCRIP-format grid file */ if(flg_grd) rcd=nco_grd_mk(rgr); /* Create ESMF-format map file */ if(flg_map) rcd=nco_map_mk(rgr); /* Infer SCRIP-format grid file from data file */ if(flg_nfr) rcd=nco_grd_nfr(rgr); /* Interpolate data file to new vertical grid */ if(flg_vrt) rcd=nco_ntp_vrt(rgr,trv_tbl); /* Unpack sparse-1D CLM/ELM variables into full file */ if(flg_s1d) rcd=nco_s1d_unpack(rgr,trv_tbl); /* Regrid data horizontally using weights from mapping file */ if(flg_wgt) rcd=nco_rgr_wgt(rgr,trv_tbl); /* Regrid using ESMF library 20150701: On-line weight generation with ESMF never worked well and was abandoned */ if(flg_smf){ #ifdef ENABLE_ESMF (void)fprintf(stderr,"%s: %s calling nco_rgr_esmf() to generate and apply regridding map\n",nco_prg_nm_get(),fnc_nm); rcd=nco_rgr_esmf(rgr); /* Close output and free dynamic memory */ (void)nco_fl_out_cls(rgr->fl_out,rgr->fl_out_tmp,rgr->out_id); #else /* !ENABLE_ESMF */ (void)fprintf(stderr,"%s: ERROR %s reports attempt to use ESMF regridding without built-in support. Re-configure with --enable_esmf.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); #endif /* !ENABLE_ESMF */ } /* !flg_smf */ /* Regrid using TempestRemap regridding 20180314: Weight generation with Tempest is implemented off-line via ncremap, not internally on-line However, do not deprecate this since TempestRemap2 has a library that could be accessed on-line */ if(flg_tps) rcd=nco_rgr_tps(rgr); return rcd; } /* end nco_rgr_ctl() */ rgr_sct * /* O [sct] Pointer to free'd regridding structure */ nco_rgr_free /* [fnc] Deallocate regridding structure */ (rgr_sct *rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Free all dynamic memory in regridding structure */ /* free() standalone command-line arguments */ if(rgr->cmd_ln) rgr->cmd_ln=(char *)nco_free(rgr->cmd_ln); if(rgr->grd_ttl) rgr->grd_ttl=(char *)nco_free(rgr->grd_ttl); if(rgr->fl_grd_src) rgr->fl_grd_src=(char *)nco_free(rgr->fl_grd_src); if(rgr->fl_grd_dst) rgr->fl_grd_dst=(char *)nco_free(rgr->fl_grd_dst); if(rgr->fl_hrz) rgr->fl_hrz=(char *)nco_free(rgr->fl_hrz); if(rgr->fl_in) rgr->fl_in=(char *)nco_free(rgr->fl_in); if(rgr->fl_map) rgr->fl_map=(char *)nco_free(rgr->fl_map); if(rgr->fl_msh) rgr->fl_msh=(char *)nco_free(rgr->fl_msh); if(rgr->fl_out) rgr->fl_out=(char *)nco_free(rgr->fl_out); if(rgr->fl_out_tmp) rgr->fl_out_tmp=(char *)nco_free(rgr->fl_out_tmp); if(rgr->fl_vrt) rgr->fl_vrt=(char *)nco_free(rgr->fl_vrt); if(rgr->var_nm) rgr->var_nm=(char *)nco_free(rgr->var_nm); if(rgr->xtn_var) rgr->xtn_var=(char **)nco_sng_lst_free(rgr->xtn_var,rgr->xtn_nbr); /* free() strings associated with grid properties */ if(rgr->fl_grd) rgr->fl_grd=(char *)nco_free(rgr->fl_grd); if(rgr->fl_hnt_dst) rgr->fl_hnt_dst=(char *)nco_free(rgr->fl_hnt_dst); if(rgr->fl_hnt_src) rgr->fl_hnt_src=(char *)nco_free(rgr->fl_hnt_src); if(rgr->fl_skl) rgr->fl_skl=(char *)nco_free(rgr->fl_skl); if(rgr->fl_ugrid) rgr->fl_ugrid=(char *)nco_free(rgr->fl_ugrid); /* Tempest */ if(rgr->drc_tps) rgr->drc_tps=(char *)nco_free(rgr->drc_tps); /* free() memory used to construct KVMs */ if(rgr->rgr_nbr > 0) rgr->rgr_arg=nco_sng_lst_free(rgr->rgr_arg,rgr->rgr_nbr); /* free() memory copied from KVMs */ if(rgr->area_nm) rgr->area_nm=(char *)nco_free(rgr->area_nm); if(rgr->bnd_nm) rgr->bnd_nm=(char *)nco_free(rgr->bnd_nm); if(rgr->bnd_tm_nm) rgr->bnd_tm_nm=(char *)nco_free(rgr->bnd_tm_nm); if(rgr->col_nm_in) rgr->col_nm_in=(char *)nco_free(rgr->col_nm_in); if(rgr->col_nm_out) rgr->col_nm_out=(char *)nco_free(rgr->col_nm_out); if(rgr->frc_nm) rgr->frc_nm=(char *)nco_free(rgr->frc_nm); if(rgr->ilev_nm_in) rgr->ilev_nm_in=(char *)nco_free(rgr->ilev_nm_in); if(rgr->ilev_nm_out) rgr->ilev_nm_out=(char *)nco_free(rgr->ilev_nm_out); if(rgr->lat_bnd_nm) rgr->lat_bnd_nm=(char *)nco_free(rgr->lat_bnd_nm); if(rgr->lat_nm_in) rgr->lat_nm_in=(char *)nco_free(rgr->lat_nm_in); if(rgr->lat_nm_out) rgr->lat_nm_out=(char *)nco_free(rgr->lat_nm_out); if(rgr->lat_vrt_nm) rgr->lat_vrt_nm=(char *)nco_free(rgr->lat_vrt_nm); if(rgr->lat_wgt_nm) rgr->lat_wgt_nm=(char *)nco_free(rgr->lat_wgt_nm); if(rgr->lev_nm_in) rgr->lev_nm_in=(char *)nco_free(rgr->lev_nm_in); if(rgr->lev_nm_out) rgr->lev_nm_out=(char *)nco_free(rgr->lev_nm_out); if(rgr->lon_bnd_nm) rgr->lon_bnd_nm=(char *)nco_free(rgr->lon_bnd_nm); if(rgr->lon_nm_in) rgr->lon_nm_in=(char *)nco_free(rgr->lon_nm_in); if(rgr->lon_nm_out) rgr->lon_nm_out=(char *)nco_free(rgr->lon_nm_out); if(rgr->lon_vrt_nm) rgr->lon_vrt_nm=(char *)nco_free(rgr->lon_vrt_nm); if(rgr->msk_nm) rgr->msk_nm=(char *)nco_free(rgr->msk_nm); if(rgr->plev_nm_in) rgr->plev_nm_in=(char *)nco_free(rgr->plev_nm_in); if(rgr->vrt_nm) rgr->vrt_nm=(char *)nco_free(rgr->vrt_nm); /* Lastly, free() regrid structure itself */ if(rgr) rgr=(rgr_sct *)nco_free(rgr); return rgr; } /* end nco_rgr_free() */ rgr_sct * /* O [sct] Regridding structure */ nco_rgr_ini /* [fnc] Initialize regridding structure */ (const char * const cmd_ln, /* I [sng] Command-line */ const int in_id, /* I [id] Input netCDF file ID */ char **rgr_arg, /* [sng] Regridding arguments */ const int rgr_arg_nbr, /* [nbr] Number of regridding arguments */ char * const rgr_in, /* I [sng] File containing fields to be regridded */ char * const rgr_out, /* I [sng] File containing regridded fields */ char * const rgr_grd_src, /* I [sng] File containing input grid */ char * const rgr_grd_dst, /* I [sng] File containing destination grid */ char * const rgr_hrz, /* I [sng] File containing horizontal coordinate grid */ char * const rgr_map, /* I [sng] File containing mapping weights from source to destination grid */ char * const rgr_var, /* I [sng] Variable for special regridding treatment */ char * const rgr_vrt, /* I [sng] File containing vertical coordinate grid */ const double wgt_vld_thr, /* I [frc] Weight threshold for valid destination value */ char **xtn_var, /* [sng] I Extensive variables */ const int xtn_nbr) /* [nbr] I Number of extensive variables */ { /* Purpose: Initialize regridding structure */ const char fnc_nm[]="nco_rgr_ini()"; rgr_sct *rgr; /* Allocate */ rgr=(rgr_sct *)nco_malloc(sizeof(rgr_sct)); /* Initialize variables directly or indirectly set via command-line (except for key-value arguments) */ rgr->cmd_ln=strdup(cmd_ln); /* [sng] Command-line */ rgr->flg_usr_rqs=False; /* [flg] User requested regridding */ rgr->out_id=int_CEWI; /* [id] Output netCDF file ID */ rgr->in_id=in_id; /* [id] Input netCDF file ID */ rgr->rgr_arg=rgr_arg; /* [sng] Regridding arguments */ rgr->rgr_nbr=rgr_arg_nbr; /* [nbr] Number of regridding arguments */ rgr->drc_tps=NULL; /* [sng] Directory where Tempest grids, meshes, and weights are stored */ rgr->flg_grd_src= rgr_grd_src ? True : False; /* [flg] User-specified input grid */ rgr->fl_grd_src=rgr_grd_src; /* [sng] File containing input grid */ rgr->flg_grd_dst= rgr_grd_dst ? True : False; /* [flg] User-specified destination grid */ rgr->fl_grd_dst=rgr_grd_dst; /* [sng] File containing destination grid */ rgr->fl_in=rgr_in; /* [sng] File containing fields to be regridded */ rgr->fl_out=rgr_out; /* [sng] File containing regridded fields */ rgr->fl_out_tmp=NULL_CEWI; /* [sng] Temporary file containing regridded fields */ rgr->flg_wgt= rgr_map ? True : False; /* [flg] User-specified mapping weights */ rgr->fl_map=rgr_map; /* [sng] File containing mapping weights from source to destination grid */ rgr->fl_hrz=rgr_hrz; /* [sng] [sng] File containing horizontal coordinate grid (for S1D) */ rgr->fl_vrt=rgr_vrt; /* [sng] [sng] File containing vertical coordinate grid */ rgr->var_nm=rgr_var; /* [sng] Variable for special regridding treatment */ rgr->xtn_var=xtn_var; /* [sng] Extensive variables */ rgr->xtn_nbr=xtn_nbr; /* [nbr] Number of extensive variables */ /* Did user explicitly request regridding? */ if(rgr_arg_nbr > 0 || rgr_grd_src != NULL || rgr_grd_dst != NULL || rgr_map != NULL || rgr_vrt != NULL) rgr->flg_usr_rqs=True; /* Initialize arguments after copying */ if(!rgr->fl_out) rgr->fl_out=(char *)strdup("/data/zender/rgr/rgr_out.nc"); if(!rgr->fl_grd_dst) rgr->fl_grd_dst=(char *)strdup("/data/zender/scrip/grids/remap_grid_T42.nc"); // if(!rgr->var_nm) rgr->var_nm=(char *)strdup("ORO"); if(nco_dbg_lvl_get() >= nco_dbg_crr){ (void)fprintf(stderr,"%s: INFO %s reports ",nco_prg_nm_get(),fnc_nm); (void)fprintf(stderr,"flg_usr_rqs = %d, ",rgr->flg_usr_rqs); (void)fprintf(stderr,"rgr_nbr = %d, ",rgr->rgr_nbr); (void)fprintf(stderr,"fl_grd_src = %s, ",rgr->fl_grd_src ? rgr->fl_grd_src : "NULL"); (void)fprintf(stderr,"fl_grd_dst = %s, ",rgr->fl_grd_dst ? rgr->fl_grd_dst : "NULL"); (void)fprintf(stderr,"fl_hrz = %s, ",rgr->fl_hrz ? rgr->fl_hrz : "NULL"); (void)fprintf(stderr,"fl_in = %s, ",rgr->fl_in ? rgr->fl_in : "NULL"); (void)fprintf(stderr,"fl_out = %s, ",rgr->fl_out ? rgr->fl_out : "NULL"); (void)fprintf(stderr,"fl_out_tmp = %s, ",rgr->fl_out_tmp ? rgr->fl_out_tmp : "NULL"); (void)fprintf(stderr,"fl_map = %s, ",rgr->fl_map ? rgr->fl_map : "NULL"); (void)fprintf(stderr,"fl_vrt = %s, ",rgr->fl_vrt ? rgr->fl_vrt : "NULL"); (void)fprintf(stderr,"\n"); } /* endif dbg */ /* Flags */ if(wgt_vld_thr == NC_MIN_DOUBLE){ rgr->flg_rnr=False; }else if(wgt_vld_thr >= 0.0 && wgt_vld_thr <= 1.0){ /* NB: Weight thresholds of 0.0 or nearly zero can lead to underflow or divide-by-zero errors */ // const double wgt_vld_thr_min=1.0e-10; /* [frc] Minimum weight threshold for valid destination value */ rgr->flg_rnr=True; rgr->wgt_vld_thr=wgt_vld_thr; }else{ (void)fprintf(stderr,"%s: ERROR weight threshold must be in [0.0,1.0] and user supplied wgt_vld_thr = %g\n",nco_prg_nm_get(),wgt_vld_thr); nco_exit(EXIT_FAILURE); } /* endif */ /* Parse extended kvm options */ char *sng_fnl=NULL; int cnv_nbr; /* [nbr] Number of elements converted by sscanf() */ int rgr_var_idx; /* [idx] Index over rgr_lst (i.e., all names explicitly specified in all "--rgr var1[,var2]=val" options) */ int rgr_var_nbr=0; kvm_sct *rgr_lst=NULL; /* [sct] List of all regrid specifications */ if(rgr_arg_nbr > 0){ /* Join arguments together */ sng_fnl=nco_join_sng(rgr_arg,rgr_arg_nbr); rgr_lst=nco_arg_mlt_prs(sng_fnl); if(sng_fnl) sng_fnl=(char *)nco_free(sng_fnl); /* Count number of keys */ for(rgr_var_idx=0;(rgr_lst+rgr_var_idx)->key;rgr_var_idx++,rgr_var_nbr++);/* !rgr_var_idx */ } /* !rgr_arg_nbr */ /* NULL-initialize key-value properties required for string variables */ rgr->area_nm=NULL; /* [sng] Name of variable containing gridcell area */ rgr->bnd_nm=NULL; /* [sng] Name of dimension to employ for spatial bounds */ rgr->bnd_tm_nm=NULL; /* [sng] Name of dimension to employ for temporal bounds */ rgr->col_nm_in=NULL; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ rgr->col_nm_out=NULL; /* [sng] Name of horizontal spatial output dimension on unstructured grid */ rgr->frc_nm=NULL; /* [sng] Name of variable containing gridcell fraction */ rgr->ilev_nm_in=NULL; /* [sng] Name of input dimension to recognize as vertical dimension at layer interfaces */ rgr->ilev_nm_out=NULL; /* [sng] Name of output vertical dimension at layer interfaces */ rgr->lat_bnd_nm=NULL; /* [sng] Name of rectangular boundary variable for latitude */ rgr->lat_dmn_nm=NULL; /* [sng] Name of latitude dimension in inferred grid */ rgr->lat_nm_in=NULL; /* [sng] Name of input dimension to recognize as latitude */ rgr->lat_nm_out=NULL; /* [sng] Name of output dimension for latitude */ rgr->lat_vrt_nm=NULL; /* [sng] Name of non-rectangular boundary variable for latitude */ rgr->lat_wgt_nm=NULL; /* [sng] Name of variable containing latitude weights */ rgr->lev_nm_in=NULL; /* [sng] Name of input dimension to recognize as vertical dimension at layer midpoints */ rgr->lev_nm_out=NULL; /* [sng] Name of output vertical dimension at layer midpoints */ rgr->lon_bnd_nm=NULL; /* [sng] Name of rectangular boundary variable for longitude */ rgr->lon_dmn_nm=NULL; /* [sng] Name of longitude dimension in inferred grid */ rgr->lon_nm_in=NULL; /* [sng] Name of dimension to recognize as longitude */ rgr->lon_nm_out=NULL; /* [sng] Name of output dimension for longitude */ rgr->lon_vrt_nm=NULL; /* [sng] Name of non-rectangular boundary variable for longitude */ rgr->msk_nm=NULL; /* [sng] Name of variable containing destination mask */ rgr->plev_nm_in=NULL; /* [sng] Name of input variable recognize as pure-pressure coordinate */ rgr->sgs_frc_nm=NULL; /* [sng] Name of variable sub-gridscale fraction */ rgr->sgs_msk_nm=NULL; /* [sng] Name of variable sub-gridscale mask */ rgr->vrt_nm=NULL; /* [sng] Name of dimension to employ for vertices */ /* Initialize key-value properties used in grid and weight generation */ rgr->area_mth=1; /* [enm] Method to compute grid cell area */ rgr->edg_typ=nco_edg_nil; /* [enm] Edge/Arc-type for triangle edges */ rgr->fl_grd=NULL; /* [sng] Name of SCRIP grid file to create */ rgr->fl_hnt_dst=NULL; /* [sng] ERWG hint destination */ rgr->fl_hnt_src=NULL; /* [sng] ERWG hint source */ rgr->fl_msh=NULL; /* [sng] Name of SCRIP intersection mesh file to create */ rgr->fl_skl=NULL; /* [sng] Name of skeleton data file to create */ rgr->fl_ugrid=NULL; /* [sng] Name of UGRID grid file to create */ rgr->flg_add_fll=False; /* [flg] Add _FillValue to fields with empty destination cells */ rgr->flg_area_out=True; /* [flg] Add area to output */ rgr->flg_cf_units=False; /* [flg] Generate CF-compliant (breaks ERWG 7.1.0r-) units fields in SCRIP-format grid files */ rgr->flg_cll_msr=True; /* [flg] Add cell_measures attribute */ rgr->flg_crv=False; /* [flg] Use curvilinear coordinates */ rgr->flg_dgn_area=False; /* [flg] Diagnose rather than copy inferred area */ rgr->flg_dgn_bnd=False; /* [flg] Diagnose rather than copy inferred bounds */ rgr->flg_erwg_units=True; /* [flg] Generate ERWG 7.1.0r-compliant SCRIP-format grid files */ rgr->flg_grd=False; /* [flg] Create SCRIP-format grid file */ rgr->flg_msk_apl=False; /* [flg] Apply msk_out to variables after regridding */ rgr->flg_msk_out=False; /* [flg] Add mask to output */ rgr->flg_nfr=False; /* [flg] Infer SCRIP-format grid file */ rgr->flg_s1d=False; /* [flg] Unpack sparse-1D CLM/ELM variables */ rgr->flg_stg=True; /* [flg] Write staggered grid with FV output */ rgr->grd_ttl=strdup("None given (supply with --rgr grd_ttl=\"Grid Title\")"); /* [enm] Grid title */ rgr->grd_typ=nco_grd_2D_eqa; /* [enm] Grid type */ rgr->idx_dbg=0; /* [idx] Index of gridcell for debugging */ rgr->lat_drc=nco_grd_lat_drc_s2n; /* [enm] Latitude grid direction */ rgr->lat_typ=nco_grd_lat_eqa; /* [enm] Latitude grid type */ rgr->lon_typ=nco_grd_lon_Grn_ctr; /* [enm] Longitude grid type */ rgr->lat_nbr=180; /* [nbr] Number of latitudes in destination grid */ rgr->lon_nbr=360; /* [nbr] Number of longitudes in destination grid */ rgr->lat_crv=0.0; /* [dgr] Latitudinal curvilinearity */ rgr->lon_crv=0.0; /* [dgr] Longitudinal curvilinearity */ rgr->lat_sth=NC_MAX_DOUBLE; /* [dgr] Latitude of southern edge of grid */ rgr->lon_wst=NC_MAX_DOUBLE; /* [dgr] Longitude of western edge of grid */ rgr->lat_nrt=NC_MAX_DOUBLE; /* [dgr] Latitude of northern edge of grid */ rgr->lon_est=NC_MAX_DOUBLE; /* [dgr] Longitude of eastern edge of grid */ rgr->msk_var=NULL; /* [sng] Mask-template variable */ rgr->ply_tri_mth=nco_ply_tri_mth_csz; /* [enm] Polygon-to-triangle decomposition method */ rgr->sgs_nrm=1.0; /* [sng] Sub-gridscale normalization */ rgr->tst=0L; /* [enm] Generic key for testing (undocumented) */ rgr->ntp_mth=nco_ntp_log; /* [enm] Interpolation method */ rgr->xtr_mth=nco_xtr_fll_ngh; /* [enm] Extrapolation method */ rgr->xtr_nsp=8; /* [sng] Extrapolation number of source points */ rgr->xtr_xpn=2.0; /* [sng] Exponent of distance in extrapolation (absolute value) */ rgr->wgt_typ=nco_wgt_con; /* [enm] Weight generation method */ /* Parse key-value properties */ char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */ for(rgr_var_idx=0;rgr_var_idx<rgr_var_nbr;rgr_var_idx++){ if(!strcmp(rgr_lst[rgr_var_idx].key,"grid") || !strcasecmp(rgr_lst[rgr_var_idx].key,"scrip")){ rgr->fl_grd=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_grd=True; continue; } /* !grid */ if(!strcmp(rgr_lst[rgr_var_idx].key,"hnt_dst") || !strcmp(rgr_lst[rgr_var_idx].key,"fl_hnt_dst")){ rgr->fl_hnt_dst=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !hnt_dst */ if(!strcmp(rgr_lst[rgr_var_idx].key,"hnt_src") || !strcmp(rgr_lst[rgr_var_idx].key,"fl_hnt_src")){ rgr->fl_hnt_src=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !hnt_src */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_var") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_var") || !strcmp(rgr_lst[rgr_var_idx].key,"mask") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_variable")){ rgr->msk_var=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_msk_out=True; continue; } /* !msk_var */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msh") || !strcmp(rgr_lst[rgr_var_idx].key,"mesh")){ rgr->fl_msh=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !msh */ if(!strcmp(rgr_lst[rgr_var_idx].key,"skl")){ rgr->fl_skl=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_grd=True; continue; } /* !skl */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"ugrid")){ rgr->fl_ugrid=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_nfr=True; continue; } /* !ugrid */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"fl_hrz") || !strcasecmp(rgr_lst[rgr_var_idx].key,"hrz")){ rgr->fl_hrz=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !hrz */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"fl_vrt") || !strcasecmp(rgr_lst[rgr_var_idx].key,"vrt")){ rgr->fl_vrt=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !vrt */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_area") || !strcmp(rgr_lst[rgr_var_idx].key,"no_area_out")){ rgr->flg_area_out=False; continue; } /* !area */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_msk") || !strcmp(rgr_lst[rgr_var_idx].key,"no_msk_out") || !strcmp(rgr_lst[rgr_var_idx].key,"no_mask") || !strcmp(rgr_lst[rgr_var_idx].key,"no_mask_out")){ rgr->flg_msk_out=False; continue; } /* !msk */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_apl") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_apply")){ rgr->flg_msk_apl=True; /* Ensure masked fields regridded with TR maps have _FillValue to guarantee BFB arithmetic with masked fields regridded with other maps that adhere to SCRIP/ESMF mask rules */ rgr->flg_add_fll=True; continue; } /* !msk_apl */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_out") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_out")){ rgr->flg_msk_out=True; continue; } /* !mask */ if(!strcmp(rgr_lst[rgr_var_idx].key,"add_fll") || !strcmp(rgr_lst[rgr_var_idx].key,"add_fill_value") || !strcmp(rgr_lst[rgr_var_idx].key,"fll_mpt") || !strcmp(rgr_lst[rgr_var_idx].key,"fill_empty")){ rgr->flg_add_fll=True; continue; } /* !add_fll */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cell_measures") || !strcmp(rgr_lst[rgr_var_idx].key,"cll_msr")){ rgr->flg_cll_msr=True; continue; } /* !cell_measures */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_cell_measures") || !strcmp(rgr_lst[rgr_var_idx].key,"no_cll_msr")){ rgr->flg_cll_msr=False; continue; } /* !cell_measures */ if(!strcmp(rgr_lst[rgr_var_idx].key,"curvilinear") || !strcmp(rgr_lst[rgr_var_idx].key,"crv")){ rgr->flg_crv=True; continue; } /* !curvilinear */ if(!strcmp(rgr_lst[rgr_var_idx].key,"diagnose_area") || !strcmp(rgr_lst[rgr_var_idx].key,"dgn_area")){ rgr->flg_dgn_area=True; continue; } /* !diagnose_area */ if(!strcmp(rgr_lst[rgr_var_idx].key,"diagnose_bounds") || !strcmp(rgr_lst[rgr_var_idx].key,"dgn_bnd")){ rgr->flg_dgn_bnd=True; continue; } /* !diagnose_bounds */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cf_units") || !strcmp(rgr_lst[rgr_var_idx].key,"CF_units")){ rgr->flg_cf_units=True; rgr->flg_erwg_units=False; continue; } /* !erwg_units */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cell_area_quad")){ rgr->area_mth=2; continue; } /* !area_nco */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cell_area_nco")){ rgr->area_mth=1; continue; } /* !area_nco */ if(!strcmp(rgr_lst[rgr_var_idx].key,"edg_typ") || !strcmp(rgr_lst[rgr_var_idx].key,"tri_arc") || !strcmp(rgr_lst[rgr_var_idx].key,"vrt_cnc")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"grt_crc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"gtc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"great_circle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"geodesic") || !strcasecmp(rgr_lst[rgr_var_idx].val,"orthodrome")){ rgr->edg_typ=nco_edg_gtc; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"sml_crc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ltr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"small_circle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"latitude_triangle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"true")){ rgr->edg_typ=nco_edg_smc; (void)fprintf(stderr,"%s: WARNING Requested to run with small-circle edges. This option has not yet been tested and validated. Use only at your own risk.\n",nco_prg_nm_get()); }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"crt") || !strcasecmp(rgr_lst[rgr_var_idx].val,"cartesian") || !strcasecmp(rgr_lst[rgr_var_idx].val,"planar") || !strcasecmp(rgr_lst[rgr_var_idx].val,"flat")){ rgr->edg_typ=nco_edg_crt; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !edg_typ */ if(!strcmp(rgr_lst[rgr_var_idx].key,"erwg_units") || !strcmp(rgr_lst[rgr_var_idx].key,"esmf_units") || !strcmp(rgr_lst[rgr_var_idx].key,"degrees")){ rgr->flg_cf_units=False; rgr->flg_erwg_units=True; continue; } /* !erwg_units */ if(!strcmp(rgr_lst[rgr_var_idx].key,"infer") || !strcmp(rgr_lst[rgr_var_idx].key,"nfr")){ rgr->flg_nfr=True; continue; } /* !infer */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_stagger") || !strcmp(rgr_lst[rgr_var_idx].key,"no_stg")){ rgr->flg_stg=False; continue; } /* !stagger */ if(!strcmp(rgr_lst[rgr_var_idx].key,"grd_ttl") || !strcmp(rgr_lst[rgr_var_idx].key,"ttl")){ if(rgr->grd_ttl) rgr->grd_ttl=(char *)nco_free(rgr->grd_ttl); rgr->grd_ttl=(char *)strdup(rgr_lst[rgr_var_idx].val); /* 20180828 Replace unquoted tildes with spaces (like LaTeX, NCL) so ncremap users can put tildes in place of spaces in ttl 20180905 Reverted this since quoting command in ncremap is superior solution */ if(False){ size_t ttl_lng=strlen(rgr->grd_ttl); for(size_t ttl_idx=0L;ttl_idx<ttl_lng;ttl_idx++) if(rgr->grd_ttl[ttl_idx] == '~'){ if(ttl_idx == 0L) rgr->grd_ttl[ttl_idx]=' '; // Always convert tilde to space if first character else if(rgr->grd_ttl[ttl_idx-1L] != '\\') rgr->grd_ttl[ttl_idx]=' '; // Convert tilde in other locations unless backslash-quoted } /* !tilde */ } /* !0 */ continue; } /* !grd_ttl */ if(!strcmp(rgr_lst[rgr_var_idx].key,"idx_dbg")){ rgr->idx_dbg=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !idx_dbg */ if(!strcmp(rgr_lst[rgr_var_idx].key,"latlon")){ cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%ld,%ld",&rgr->lat_nbr,&rgr->lon_nbr); assert(cnv_nbr == 2); continue; } /* !latlon */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lonlat")){ cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%ld,%ld",&rgr->lon_nbr,&rgr->lat_nbr); assert(cnv_nbr == 2); continue; } /* !lonlat */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nbr")){ rgr->lat_nbr=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !lat_nbr */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nbr")){ rgr->lon_nbr=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !lon_nbr */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"snwe")){ cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%lf,%lf,%lf,%lf",&rgr->lat_sth,&rgr->lat_nrt,&rgr->lon_wst,&rgr->lon_est); if(cnv_nbr != 4) (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); assert(cnv_nbr == 4); if(cnv_nbr != 4) abort(); /* CEWI Use cnv_nbr at least once outside of assert() to avoid gcc 4.8.2 set-but-not-used warning */ continue; } /* !snwe */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"wesn")){ if(cnv_nbr != 4) cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%lf,%lf,%lf,%lf",&rgr->lon_wst,&rgr->lon_est,&rgr->lat_sth,&rgr->lat_nrt); assert(cnv_nbr == 4); continue; } /* !wesn */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_crv")){ rgr->lat_crv=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !lat_crv */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_crv")){ rgr->lon_crv=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !lon_crv */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_sth")){ rgr->lat_sth=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); // rgr->lat_typ=nco_grd_lat_bb; continue; } /* !lat_sth */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_wst")){ rgr->lon_wst=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); rgr->lon_typ=nco_grd_lon_bb; continue; } /* !lon_wst */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nrt")){ rgr->lat_nrt=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); //rgr->lat_typ=nco_grd_lat_bb; continue; } /* !lat_nrt */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_est")){ rgr->lon_est=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); rgr->lon_typ=nco_grd_lon_bb; continue; } /* !lon_est */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_drc")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"s2n") || !strcasecmp(rgr_lst[rgr_var_idx].val,"south2north") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ston") || !strcasecmp(rgr_lst[rgr_var_idx].val,"southnorth")){ rgr->lat_drc=nco_grd_lat_drc_s2n; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"n2s") || !strcasecmp(rgr_lst[rgr_var_idx].val,"north2south") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ntos") || !strcasecmp(rgr_lst[rgr_var_idx].val,"northsouth")){ rgr->lat_drc=nco_grd_lat_drc_n2s; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !lat_drc */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_typ")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"cap") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fv") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fix") || !strcasecmp(rgr_lst[rgr_var_idx].val,"yarmulke")){ rgr->lat_typ=nco_grd_lat_fv; rgr->grd_typ=nco_grd_2D_fv; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"eqa") || !strcasecmp(rgr_lst[rgr_var_idx].val,"rgl") || !strcasecmp(rgr_lst[rgr_var_idx].val,"unf") || !strcasecmp(rgr_lst[rgr_var_idx].val,"uni")){ rgr->lat_typ=nco_grd_lat_eqa; rgr->grd_typ=nco_grd_2D_eqa; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"gss")){ rgr->lat_typ=nco_grd_lat_gss; rgr->grd_typ=nco_grd_2D_gss; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !lat_typ */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_typ")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"180_wst") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wst_180")) rgr->lon_typ=nco_grd_lon_180_wst; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"180_ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ctr_180")) rgr->lon_typ=nco_grd_lon_180_ctr; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"Grn_wst") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wst_Grn")) rgr->lon_typ=nco_grd_lon_Grn_wst; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"Grn_ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ctr_Grn")) rgr->lon_typ=nco_grd_lon_Grn_ctr; else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !lon_typ */ if(!strcmp(rgr_lst[rgr_var_idx].key,"area_nm")){ rgr->area_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !area_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"bnd_nm")){ rgr->bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !bnd_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"bnd_tm_nm")){ rgr->bnd_tm_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !bnd_tm_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"col_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"col_nm")){ rgr->col_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !col_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"col_nm_out")){ rgr->col_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !col_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"frc_nm")){ rgr->frc_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !frc_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm")){ rgr->ilev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !ilev_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm_out")){ rgr->ilev_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !ilev_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_bnd_nm")){ rgr->lat_bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_bnd_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_dmn_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"lat_dmn")){ rgr->lat_dmn_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_dmn_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lat_nm")){ rgr->lat_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nm_out")){ rgr->lat_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_vrt_nm")){ rgr->lat_vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_vrt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_wgt_nm")){ rgr->lat_wgt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_wgt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lev_nm")){ rgr->lev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lev_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lev_nm_out")){ rgr->lev_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lev_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_bnd_nm")){ rgr->lon_bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_bnd_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_dmn_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"lon_dmn")){ rgr->lon_dmn_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_dmn_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lon_nm")){ rgr->lon_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nm_out")){ rgr->lon_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_vrt_nm")){ rgr->lon_vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_vrt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"plev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"plev_nm")){ rgr->plev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !plev_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"ply_tri")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"csz")){ rgr->ply_tri_mth=nco_ply_tri_mth_csz; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"centroid") || !strcasecmp(rgr_lst[rgr_var_idx].val,"snl") || !strcasecmp(rgr_lst[rgr_var_idx].val,"mat")){ rgr->ply_tri_mth=nco_ply_tri_mth_ctr; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !ply_tri */ if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_frc_nm")){ rgr->sgs_frc_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !sgs_frc */ if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_msk_nm")){ rgr->sgs_msk_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !sgs_msk */ if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_nrm")){ rgr->sgs_nrm=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !sgs_nrm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"tst")){ rgr->tst=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !tst */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_nm")){ rgr->msk_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_msk_out=True; continue; } /* !msk_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"vrt_nm")){ rgr->vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !vrt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"vrt_ntp") || !strcmp(rgr_lst[rgr_var_idx].key,"ntp_mth")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"lin") || !strcasecmp(rgr_lst[rgr_var_idx].val,"linear") || !strcasecmp(rgr_lst[rgr_var_idx].val,"lnr")){ rgr->ntp_mth=nco_ntp_lnr; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"log") || !strcasecmp(rgr_lst[rgr_var_idx].val,"logarithmic") || !strcasecmp(rgr_lst[rgr_var_idx].val,"lgr")){ rgr->ntp_mth=nco_ntp_log; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !ntp_mth */ if(!strcmp(rgr_lst[rgr_var_idx].key,"xtr_mth")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"nrs_ngh") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ngh") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nearest_neighbor") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nn")){ rgr->xtr_mth=nco_xtr_fll_ngh; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"mss_val") || !strcasecmp(rgr_lst[rgr_var_idx].val,"msv") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fll_val") || !strcasecmp(rgr_lst[rgr_var_idx].val,"missing_value")){ rgr->xtr_mth=nco_xtr_fll_msv; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !xtr_mth */ if(!strcmp(rgr_lst[rgr_var_idx].key,"xtr_nsp") || !strcmp(rgr_lst[rgr_var_idx].key,"xtr_nbr_src_pnt") || !strcmp(rgr_lst[rgr_var_idx].key,"number_source_points") || !strcmp(rgr_lst[rgr_var_idx].key,"extrapolation_number_source_points")){ rgr->xtr_nsp=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !xtr_nsp */ if(!strcmp(rgr_lst[rgr_var_idx].key,"xtr_xpn") || !strcmp(rgr_lst[rgr_var_idx].key,"extrapolation_exponent") || !strcmp(rgr_lst[rgr_var_idx].key,"exponent_of_distance_in_extrapolation")){ rgr->xtr_xpn=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !xtr_xpn */ if(!strcmp(rgr_lst[rgr_var_idx].key,"wgt_typ") || !strcmp(rgr_lst[rgr_var_idx].key,"weight_type")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"con") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nco_con") || !strcasecmp(rgr_lst[rgr_var_idx].val,"conservative") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wgt_con")) rgr->wgt_typ=nco_wgt_con; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"idw") || !strcasecmp(rgr_lst[rgr_var_idx].val,"dwe") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nco_idw") || !strcasecmp(rgr_lst[rgr_var_idx].val,"distance_weighted") || !strcasecmp(rgr_lst[rgr_var_idx].val,"inverse_distance") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wgt_idw")) rgr->wgt_typ=nco_wgt_idw; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"bln") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nco_bln") || !strcasecmp(rgr_lst[rgr_var_idx].val,"bilinear") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wgt_bln")) rgr->wgt_typ=nco_wgt_bln; else { (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !wgt_typ */ (void)fprintf(stderr,"%s: ERROR %s reports unrecognized key-value option to --rgr switch: %s\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key); nco_exit(EXIT_FAILURE); } /* !rgr_var_idx */ /* Eliminate sticky wickets: Give nfr precedence over grd */ if(rgr->flg_nfr && rgr->flg_grd) rgr->flg_grd=False; /* Revert to defaults for any names not specified on command-line */ if(!rgr->area_nm) rgr->area_nm=(char *)strdup("area"); /* [sng] Name of variable containing gridcell area */ if(!rgr->bnd_nm) rgr->bnd_nm=(char *)strdup("nvertices"); /* [sng] Name of dimension to employ for spatial bounds */ /* NB: CESM uses nbnd and ilev for temporal and vertical bounds, respectively (CESM outputs no horizontal spatial bounds). NCO defaults to nbnd for all bounds with two endpoints. */ if(!rgr->bnd_tm_nm) rgr->bnd_tm_nm=(char *)strdup("nbnd"); /* [sng] Name of dimension to employ for temporal bounds */ if(!rgr->col_nm_in) rgr->col_nm_in=(char *)strdup("ncol"); /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ if(!rgr->frc_nm) rgr->frc_nm=(char *)strdup("frac_b"); /* [sng] Name of variable containing gridcell fraction */ if(!rgr->ilev_nm_in) rgr->ilev_nm_in=(char *)strdup("ilev"); /* [sng] Name of input dimension to recognize as vertical dimension at layer interfaces */ if(!rgr->lat_bnd_nm) rgr->lat_bnd_nm=(char *)strdup("lat_bnds"); /* [sng] Name of rectangular boundary variable for latitude */ if(!rgr->lat_nm_in) rgr->lat_nm_in=(char *)strdup("lat"); /* [sng] Name of input dimension to recognize as latitude */ if(!rgr->lev_nm_in) rgr->lev_nm_in=(char *)strdup("lev"); /* [sng] Name of input dimension to recognize as vertical dimension at layer midpoints */ if(!rgr->lat_vrt_nm) rgr->lat_vrt_nm=(char *)strdup("lat_vertices"); /* [sng] Name of non-rectangular boundary variable for latitude */ if(!rgr->lat_wgt_nm) rgr->lat_wgt_nm=(char *)strdup("gw"); /* [sng] Name of variable containing latitude weights */ if(!rgr->lon_bnd_nm) rgr->lon_bnd_nm=(char *)strdup("lon_bnds"); /* [sng] Name of rectangular boundary variable for longitude */ if(!rgr->lon_nm_in) rgr->lon_nm_in=(char *)strdup("lon"); /* [sng] Name of dimension to recognize as longitude */ if(!rgr->lon_vrt_nm) rgr->lon_vrt_nm=(char *)strdup("lon_vertices"); /* [sng] Name of non-rectangular boundary variable for longitude */ if(!rgr->msk_nm) rgr->msk_nm=(char *)strdup("mask_b"); /* [sng] Name of variable containing destination mask */ if(!rgr->vrt_nm) rgr->vrt_nm=(char *)strdup("nv"); /* [sng] Name of dimension to employ for vertices */ if(!rgr->plev_nm_in) rgr->plev_nm_in=(char *)strdup("plev"); /* [sng] Name of variable to recognize as pure pressure coordinate */ /* Derived from defaults and command-line arguments */ // On second thought, do not strdup() these here. This way, NULL means user never specified lon/lat-out names // if(!rgr->col_nm_out) rgr->col_nm_out=(char *)strdup("ncol"); /* [sng] Name of dimension to output as horizontal spatial dimension on unstructured grid */ // if(!rgr->lat_nm_out) rgr->lat_nm_out=(char *)strdup("lat"); /* [sng] Name of dimension to output as latitude */ // if(!rgr->lon_nm_out) rgr->lon_nm_out=(char *)strdup("lon"); /* [sng] Name of dimension to output as longitude */ // if(!rgr->lat_nm_out) rgr->lat_nm_out=(char *)strdup(rgr_lat_nm_in); /* [sng] Name of output dimension for latitude */ // if(!rgr->lon_nm_out) rgr->lon_nm_out=(char *)strdup(rgr_lon_nm_in); /* [sng] Name of output dimension for longitude */ /* Free kvms */ if(rgr_lst) rgr_lst=nco_kvm_lst_free(rgr_lst,rgr_var_nbr); return rgr; } /* end nco_rgr_ini() */ int /* O [enm] Return code */ nco_ntp_vrt /* [fnc] Interpolate vertically */ (rgr_sct * const rgr, /* I/O [sct] Regridding structure */ trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */ { /* Purpose: Interpolate fields to new vertical grid specified in a vertical file */ const char fnc_nm[]="nco_ntp_vrt()"; /* [sng] Function name */ char *fl_tpl; /* [sng] Template file (vertical grid file) */ char *fl_pth_lcl=NULL; int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int in_id; /* I [id] Input netCDF file ID */ int tpl_id; /* [id] Input netCDF file ID (for vertical grid template) */ int md_open; /* [enm] Mode flag for nc_open() call */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int dmn_idx; /* [idx] Dimension index */ int rec_idx; /* [idx] Record dimension index */ nco_bool FL_RTR_RMT_LCN; nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s obtaining vertical grid from %s\n",nco_prg_nm_get(),fnc_nm,rgr->fl_vrt); /* Duplicate (because nco_fl_mk_lcl() free()'s its fl_in) */ fl_tpl=(char *)strdup(rgr->fl_vrt); /* Make sure file is on local system and is readable or die trying */ fl_tpl=nco_fl_mk_lcl(fl_tpl,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_tpl,md_open,&bfr_sz_hnt,&tpl_id); /* Formula-terms for hybrid pressure vertical grid on unstructured CAM/EAM horizontal grid: prs_mdp[time,lev,col]=P0*hyam[lev] +PS[time,col]*hybm[lev] prs_ntf[time,lev,col]=P0*hyai[ilev]+PS[time,col]*hybi[ilev] */ /* Formula-terms for hybrid pressure vertical grid on ECMWF RLL horizontal grid: prs_mdp[time,lev,lat,lon]=hyam[lev] +exp(lnsp[time,lat,lon])*hybm[lev] prs_ntf[time,lev,lat,lon]=hyai[ilev]+exp(lnsp[time,lat,lon])*hybi[ilev] */ /* For simplicity and code re-use, all single-variable (not hybrid-variable) coordinate systems adopt "lev" semantics This includes pure pressure coordinates and eventually will include sigma, depth, and height coordinates Only hybrid coordinates will refer to the "ilev" levels and indices All single coordinate systems will refer to "lev" levels and indices */ int dpt_id; /* [id] Ocean depth ID */ int hyai_id=NC_MIN_INT; /* [id] Hybrid A coefficient at layer interfaces ID */ int hyam_id=NC_MIN_INT; /* [id] Hybrid A coefficient at layer midpoints ID */ int hybi_id=NC_MIN_INT; /* [id] Hybrid B coefficient at layer interfaces ID */ int hybm_id=NC_MIN_INT; /* [id] Hybrid B coefficient at layer midpoints ID */ int ilev_id=NC_MIN_INT; /* [id] Interface pressure ID */ int lev_id=NC_MIN_INT; /* [id] Midpoint pressure ID */ int p0_id=NC_MIN_INT; /* [id] Reference pressure ID */ int ps_id=NC_MIN_INT; /* [id] Surface pressure ID */ int plev_id; /* [id] Air pressure ID */ nco_bool flg_grd_hyb_cameam=False; /* [flg] Hybrid coordinate vertical grid uses CAM/EAM conventions */ nco_bool flg_grd_hyb_ecmwf=False; /* [flg] Hybrid coordinate vertical grid uses ECMWF conventions */ nco_bool flg_grd_in_dpt=False; /* [flg] Input depth coordinate vertical grid */ nco_bool flg_grd_in_hyb=False; /* [flg] Input hybrid coordinate vertical grid */ nco_bool flg_grd_in_prs=False; /* [flg] Input pressure coordinate vertical grid */ nco_bool flg_grd_out_dpt=False; /* [flg] Output depth coordinate vertical grid */ nco_bool flg_grd_out_hyb=False; /* [flg] Output hybrid coordinate vertical grid */ nco_bool flg_grd_out_prs=False; /* [flg] Output pressure coordinate vertical grid */ nco_bool flg_vrt_tm=False; /* [flg] Output depends on time-varying vertical grid */ nco_grd_vrt_typ_enm nco_vrt_grd_in=nco_vrt_grd_nil; /* [enm] Vertical grid type for input grid */ nco_grd_vrt_typ_enm nco_vrt_grd_out=nco_vrt_grd_nil; /* [enm] Vertical grid type for output grid */ nco_ntp_typ_enm ntp_mth=rgr->ntp_mth; /* [enm] Interpolation method */ nco_xtr_typ_enm xtr_mth=rgr->xtr_mth; /* [enm] Extrapolation method */ /* Determine output grid type */ if((rcd=nco_inq_varid_flg(tpl_id,"hyai",&hyai_id)) == NC_NOERR){ nco_vrt_grd_out=nco_vrt_grd_hyb; /* EAM */ flg_grd_out_hyb=True; }else if((rcd=nco_inq_varid_flg(tpl_id,"plev",&plev_id)) == NC_NOERR){ nco_vrt_grd_out=nco_vrt_grd_prs; /* NCEP */ flg_grd_out_prs=True; }else if((rcd=nco_inq_varid_flg(tpl_id,"depth",&dpt_id)) == NC_NOERR){ nco_vrt_grd_out=nco_vrt_grd_dpt; /* MPAS */ flg_grd_out_dpt=True; }else{ /* !hyai */ (void)fprintf(stdout,"%s: ERROR %s Unable to locate hybrid-sigma/pressure or pure-pressure vertical grid coordinate information in vertical grid file\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT ensure vertical grid coordinate file contains a valid vertical grid coordinate\n",nco_prg_nm_get()); return NCO_ERR; } /* !hyai */ if(flg_grd_out_hyb){ rcd=nco_inq_varid(tpl_id,"hyai",&hyai_id); rcd=nco_inq_varid(tpl_id,"hyam",&hyam_id); rcd=nco_inq_varid(tpl_id,"hybi",&hybi_id); rcd=nco_inq_varid(tpl_id,"hybm",&hybm_id); rcd=nco_inq_varid(tpl_id,"P0",&p0_id); rcd=nco_inq_varid_flg(tpl_id,"ilev",&ilev_id); rcd=nco_inq_varid_flg(tpl_id,"lev",&lev_id); rcd=nco_inq_varid_flg(tpl_id,"PS",&ps_id); } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ rcd=nco_inq_varid(tpl_id,"plev",&lev_id); } /* !flg_grd_out_prs */ if(flg_grd_out_dpt){ rcd=nco_inq_varid(tpl_id,"depth",&lev_id); } /* !flg_grd_out_dpt */ const int hyai_id_tpl=hyai_id; /* [id] Hybrid A coefficient at layer interfaces ID */ const int hyam_id_tpl=hyam_id; /* [id] Hybrid A coefficient at layer midpoints ID */ const int hybi_id_tpl=hybi_id; /* [id] Hybrid B coefficient at layer interfaces ID */ const int hybm_id_tpl=hybm_id; /* [id] Hybrid B coefficient at layer midpoints ID */ const int p0_id_tpl=p0_id; /* [id] Reference pressure ID */ const int ilev_id_tpl=ilev_id; /* [id] Interface pressure ID */ const int lev_id_tpl=lev_id; /* [id] Midpoint pressure ID */ const int ps_id_tpl=ps_id; /* [id] Surface pressure ID */ char *ilev_nm_in=NULL; /* [sng] Interface level name */ char *lev_nm_in; char *ilev_nm_out; char *lev_nm_out; char *plev_nm_in; /* [sng] Pure-pressure coordnate name */ char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */ int *dmn_ids_in=NULL; /* [nbr] Input file dimension IDs */ int *dmn_ids_out=NULL; /* [nbr] Output file dimension IDs */ int *dmn_ids_rec=NULL; /* [id] Unlimited dimension IDs */ int dmn_nbr_ps; /* [nbr] Number of dimensions in PS variable */ int dmn_nbr_in; /* [nbr] Number of dimensions in input file */ int dmn_nbr_out; /* [nbr] Number of dimensions in output file */ int dmn_id_ilev_out=NC_MIN_INT; /* [id] Dimension ID for interface level in output file */ int dmn_id_lev_out=NC_MIN_INT; /* [id] Dimension ID for midpoint level in output file */ int dmn_id_ilev_in=NC_MIN_INT; /* [id] Dimension ID for interface level in file to be interpolated */ int dmn_id_lev_in=NC_MIN_INT; /* [id] Dimension ID for midpoint level in file to be interpolated */ int dmn_id_tm_in=NC_MIN_INT; /* [id] Dimension ID for time in file to be interpolated */ int dmn_nbr_rec; /* [nbr] Number of unlimited dimensions */ int dmn_idx_tm_in=NC_MIN_INT; /* [idx] Index of record coordinate in input hybrid coordinate PS field */ long *dmn_cnt_in=NULL; long *dmn_cnt_out=NULL; long *dmn_srt=NULL; long ilev_nbr_in; long lev_nbr_in; long ilev_nbr_out; long lev_nbr_out; long tm_idx=0L; /* [idx] Current timestep */ long tm_nbr=1L; /* [idx] Number of timesteps in vertical grid */ long tm_nbr_in=1L; /* [nbr] Number of timesteps in input vertical grid definition */ long tm_nbr_out=1L; /* [nbr] Number of timesetps in output vertical grid definition */ size_t grd_idx; /* [idx] Gridcell index */ size_t grd_sz_in=1L; /* [nbr] Number of elements in single layer of input grid */ size_t grd_sz_out=1L; /* [nbr] Number of elements in single layer of output grid */ size_t idx_fst; /* [idx] Index-offset to current surface pressure timeslice */ if(flg_grd_out_hyb){ /* Interrogate hyai/hyam to obtain ilev/lev dimensions */ rcd=nco_inq_vardimid(tpl_id,hyai_id,&dmn_id_ilev_out); rcd=nco_inq_vardimid(tpl_id,hyam_id,&dmn_id_lev_out); rcd=nco_inq_dimlen(tpl_id,dmn_id_ilev_out,&ilev_nbr_out); rcd=nco_inq_dimlen(tpl_id,dmn_id_lev_out,&lev_nbr_out); rcd=nco_inq_dimname(tpl_id,dmn_id_ilev_out,dmn_nm); ilev_nm_out=strdup(dmn_nm); rcd=nco_inq_dimname(tpl_id,dmn_id_lev_out,dmn_nm); lev_nm_out=strdup(dmn_nm); /* Interrogate PS, if any, for horizontal dimensions */ if(ps_id_tpl != NC_MIN_INT){ rcd=nco_inq_varndims(tpl_id,ps_id,&dmn_nbr_ps); dmn_nbr_out=dmn_nbr_ps; dmn_ids_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_cnt_out=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long)); dmn_srt=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long)); rcd=nco_inq_vardimid(tpl_id,ps_id,dmn_ids_out); rcd=nco_inq_unlimdims(tpl_id,&dmn_nbr_rec,(int *)NULL); if(dmn_nbr_rec > 0){ dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int)); rcd=nco_inq_unlimdims(tpl_id,&dmn_nbr_rec,dmn_ids_rec); } /* !dmn_nbr_rec */ for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ rcd=nco_inq_dimlen(tpl_id,dmn_ids_out[dmn_idx],dmn_cnt_out+dmn_idx); /* 20190330: Allow possibility that PS has time dimension > 1 We want horizontal not temporal dimensions to contribute to grd_sz Temporal dimension is usually unlimited Only multiply grd_sz by fixed (non-unlimited) dimension sizes Corner-case exception when PS spatial dimension on unstructured grid is unlimited */ for(rec_idx=0;rec_idx<dmn_nbr_rec;rec_idx++) if(dmn_ids_out[dmn_idx] == dmn_ids_rec[rec_idx]) break; if(rec_idx == dmn_nbr_rec || dmn_nbr_out == 1) grd_sz_out*=dmn_cnt_out[dmn_idx]; if(rec_idx != dmn_nbr_rec && dmn_nbr_out > 1 && dmn_cnt_out[dmn_idx] > 1L){ tm_nbr_out=dmn_cnt_out[dmn_idx]; if(tm_nbr_out > 1L) flg_vrt_tm=True; } /* tm_nbr_out > 1 */ dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec); } /* !ps_id_tpl */ } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ /* Interrogate plev to obtain plev dimensions */ rcd=nco_inq_vardimid(tpl_id,lev_id,&dmn_id_lev_out); rcd=nco_inq_dimlen(tpl_id,dmn_id_lev_out,&lev_nbr_out); rcd=nco_inq_dimname(tpl_id,dmn_id_lev_out,dmn_nm); ilev_nbr_out=lev_nbr_out; } /* !flg_grd_out_prs */ double *hyai_out=NULL; /* [frc] Hybrid A coefficient at layer interfaces on output grid */ double *hyam_out=NULL; /* [frc] Hybrid A coefficient at layer midpoints on output grid */ double *hybi_out=NULL; /* [frc] Hybrid B coefficient at layer interfaces on output grid */ double *hybm_out=NULL; /* [frc] Hybrid B coefficient at layer midpoints on output grid */ double *ilev_out=NULL; /* [hPa] Interface pressure on output grid */ double *lev_out=NULL; /* [hPa] Midpoint pressure on output grid */ double *ps_out=NULL; /* [Pa] Surface pressure on output grid */ double *prs_mdp_out=NULL; /* [Pa] Midpoint pressure on output grid */ double *prs_ntf_out=NULL; /* [Pa] Interface pressure on output grid */ double p0_out; /* [Pa] Reference pressure on output grid */ long ilev_idx; /* [idx] Interface level index */ long lev_idx; /* [idx] Level index */ const nc_type crd_typ_out=NC_DOUBLE; nc_type var_typ_rgr; /* [enm] Variable type used during regridding */ var_typ_rgr=NC_DOUBLE; /* NB: Perform interpolation in double precision */ if(flg_grd_out_hyb){ hyai_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr)); hyam_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); hybi_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr)); hybm_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); ilev_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr)); lev_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(tpl_id,hyai_id,hyai_out,crd_typ_out); rcd=nco_get_var(tpl_id,hyam_id,hyam_out,crd_typ_out); rcd=nco_get_var(tpl_id,hybi_id,hybi_out,crd_typ_out); rcd=nco_get_var(tpl_id,hybm_id,hybm_out,crd_typ_out); rcd=nco_get_var(tpl_id,p0_id,&p0_out,crd_typ_out); if(ilev_id_tpl != NC_MIN_INT){ rcd=nco_get_var(tpl_id,ilev_id,ilev_out,crd_typ_out); }else{ /* p0 is in Pa but ilev traditionally given in hPa */ for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++) ilev_out[ilev_idx]=p0_out*(hyai_out[ilev_idx]+hybi_out[ilev_idx])/100.0; } /* !ilev_id_tpl */ if(lev_id_tpl != NC_MIN_INT){ rcd=nco_get_var(tpl_id,lev_id,lev_out,crd_typ_out); }else{ /* p0 is in Pa but lev traditionally given in hPa */ for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++) lev_out[lev_idx]=p0_out*(hyam_out[lev_idx]+hybm_out[lev_idx])/100.0; } /* !ilev_id_tpl */ } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ lev_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(tpl_id,lev_id,lev_out,crd_typ_out); } /* !flg_grd_out_prs */ /* For vertical interpolation (unlike horizontal regridding), the destination grid is known a priori Straightforward copy all variables and attributes that define grid from fl_tpl to output would work in theory, but would not allow dynamic identification and relabeling of names */ /* if(flg_grd_out_hyb){ const int vrt_grd_lst_nbr=8; const char *vrt_grd_lst[]={"/hyai","/hyam","/hybi","/hybm","/ilev","/lev","/P0","/PS"}; } if(flg_grd_out_prs){ const int vrt_grd_lst_nbr=1; const char *vrt_grd_lst[]={"/plev"}; } */ /* Above this line, fl_tpl and tpl_id refer to vertical coordinate file (i.e., template file) Below this line, fl_in and in_id refer to input file to be vertically regridded Do not close template file until all grid variables have been copied For maximum efficiency, do this after defining all interpolated variables in output That way no file needs to exit define mode or enter data mode more than once However this requires keeping template file, input data file, and output file simulataneously open */ in_id=rgr->in_id; out_id=rgr->out_id; /* Determine input grid type */ if(rgr->plev_nm_in) plev_nm_in=rgr->plev_nm_in; if((rcd=nco_inq_varid_flg(in_id,"hyai",&hyai_id)) == NC_NOERR){ nco_vrt_grd_in=nco_vrt_grd_hyb; /* EAM */ flg_grd_in_hyb=True; }else if((rcd=nco_inq_varid_flg(in_id,plev_nm_in,&plev_id)) == NC_NOERR){ nco_vrt_grd_in=nco_vrt_grd_prs; /* NCEP */ flg_grd_in_prs=True; }else if((rcd=nco_inq_varid_flg(in_id,"depth",&dpt_id)) == NC_NOERR){ nco_vrt_grd_in=nco_vrt_grd_dpt; /* NCEP */ flg_grd_in_dpt=True; }else{ /* !hyai */ (void)fprintf(stdout,"%s: ERROR %s Unable to locate hybrid-sigma/pressure or pure-pressure vertical grid coordinate information in input file\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT only invoke vertical interpolation on files that contain variables with vertical dimensions, and with known vertical coordinate variable names. These default to \"hyai\" for hybrid, \"plev\" for pressure, \"depth\" for depth. See http://nco.sf.net/nco.html#lev_nm for options to change these names at run-time, e.g., \"--rgr plev_nm=vrt_nm\"\n",nco_prg_nm_get()); return NCO_ERR; } /* !hyai */ /* Sanity checks: One type of input and one type of output grid detected */ assert(!(flg_grd_in_hyb && flg_grd_in_prs)); assert(!(flg_grd_in_hyb && flg_grd_in_dpt)); assert(!(flg_grd_in_prs && flg_grd_in_dpt)); assert(flg_grd_in_hyb || flg_grd_in_prs || flg_grd_in_dpt); assert(!(flg_grd_out_hyb && flg_grd_out_prs)); assert(!(flg_grd_out_hyb && flg_grd_out_dpt)); assert(!(flg_grd_out_prs && flg_grd_out_dpt)); assert(flg_grd_out_hyb || flg_grd_out_prs || flg_grd_out_dpt); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG Input grid flags : flg_grd_in_hyb = %d, flg_grd_in_prs = %d, flg_grd_in_dpt = %d\n",nco_prg_nm_get(),flg_grd_in_hyb,flg_grd_in_prs,flg_grd_in_dpt); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG Output grid flags: flg_grd_out_hyb = %d, flg_grd_out_prs = %d, flg_grd_out_dpt = %d\n",nco_prg_nm_get(),flg_grd_out_hyb,flg_grd_out_prs,flg_grd_out_dpt); /* 20191219: This block is not used, deprecate it? Or use once new coordinates like altitude, depth supported? */ nco_vrt_ntp_typ_enm nco_vrt_ntp_typ=nco_ntp_nil; /* Vertical interpolation type */ if(nco_vrt_grd_in == nco_vrt_grd_hyb && nco_vrt_grd_out == nco_vrt_grd_hyb) nco_vrt_ntp_typ=nco_ntp_hyb_to_hyb; if(nco_vrt_grd_in == nco_vrt_grd_hyb && nco_vrt_grd_out == nco_vrt_grd_prs) nco_vrt_ntp_typ=nco_ntp_hyb_to_prs; if(nco_vrt_grd_in == nco_vrt_grd_prs && nco_vrt_grd_out == nco_vrt_grd_hyb) nco_vrt_ntp_typ=nco_ntp_prs_to_hyb; if(nco_vrt_grd_in == nco_vrt_grd_prs && nco_vrt_grd_out == nco_vrt_grd_prs) nco_vrt_ntp_typ=nco_ntp_prs_to_prs; assert(nco_vrt_ntp_typ != nco_ntp_nil); /* Variables on input grid, i.e., on grid in data file to be interpolated */ if(flg_grd_in_hyb){ rcd=nco_inq_varid(in_id,"hyai",&hyai_id); rcd=nco_inq_varid(in_id,"hyam",&hyam_id); rcd=nco_inq_varid(in_id,"hybi",&hybi_id); rcd=nco_inq_varid(in_id,"hybm",&hybm_id); /* 20190602: ECMWF hybrid vertical grid parameters and dimensions differ from CAM/EAM: ECMWF defines vertical dimensions "nhym" and "nhyi" specifically for hy[ab][im] and uses "lev" and "lev_2" for all other variables, whereas CAM/EAM uses same dimensions "lev" and "ilev" for all vertical variables including hybrid coefficients ECMWF provides "hya?" as a constant in Pa and "hyb?" as a dimensionless coefficient of PS, whereas CAM/EAM provides "hya?" and "hyb?" both as dimensionless coefficients of P0 and PS ECMWF provides "lev" and "lev_2" with midpoint and surface pressure indices (not values), respectively, whereas CAM/EAM provides "lev" and "ilev" coordinate values in hPa ECMWF provides dimensionless "lnsp" for log(surface pressure) whereas CAM/EAM provides "PS" for surface pressure in Pa ECMWF "lnsp" has degenerate level dimension "lev_2" whereas CAM/EAM "PS" has no "ilev" dimension ECMWF uses hya? instead of reference pressure whereas CAM/EAM provides "P0" in hPa */ if((rcd=nco_inq_varid_flg(in_id,"lnsp",&ps_id)) == NC_NOERR) flg_grd_hyb_ecmwf=True; else if((rcd=nco_inq_varid_flg(in_id,"PS",&ps_id)) == NC_NOERR) flg_grd_hyb_cameam=True; else{ (void)fprintf(stderr,"%s: ERROR %s Unable to find surface pressure variable required for hybrid grid in input file\n",nco_prg_nm_get(),fnc_nm); abort(); } /* !rcd */ if(flg_grd_hyb_cameam){ rcd=nco_inq_varid(in_id,"P0",&p0_id); ilev_id=NC_MIN_INT; lev_id=NC_MIN_INT; if(ilev_id_tpl == NC_MIN_INT) rcd=nco_inq_varid_flg(in_id,"ilev",&ilev_id); if(lev_id_tpl == NC_MIN_INT) rcd=nco_inq_varid_flg(in_id,"lev",&lev_id); } /* !flg_grd_hyb_cameam */ /* 20190603: We require ECMWF IFS input to have a "lev" coordinate so we can use "lev" dimension not "nhyb" */ if(flg_grd_hyb_ecmwf) rcd=nco_inq_varid(in_id,"lev",&lev_id); } /* !flg_grd_in_hyb */ if(flg_grd_in_prs){ rcd=nco_inq_varid(in_id,plev_nm_in,&lev_id); if((rcd=nco_inq_varid_flg(in_id,"PS",&ps_id)) == NC_NOERR){ /* Output file creation procedure discriminates between input surface pressure dimensioned as CAM/EAM vs. ECMWF */ flg_grd_hyb_cameam=True; if(flg_grd_out_hyb && (ps_id_tpl == NC_MIN_INT)) (void)fprintf(stderr,"%s: INFO %s detects variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in pure-pressure input data file. PS will be copied directly from pure-pressure grid input dataset to, and used to construct the pressures of, the output hybrid-coordinate data file.\n",nco_prg_nm_get(),fnc_nm); if(flg_grd_out_hyb && (ps_id_tpl != NC_MIN_INT)) (void)fprintf(stderr,"%s: INFO %s detects variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in both vertical-grid file and pure-pressure input data file. The vertical grid-file takes precedence. PS will be copied directly from vertical-grid file to, and used to construct the pressures of, the output hybrid-coordinate data file. PS in input pure-pressure file will be ignored.\n",nco_prg_nm_get(),fnc_nm); }else{ if(flg_grd_out_hyb && (ps_id_tpl == NC_MIN_INT)){ (void)fprintf(stderr,"%s: ERROR %s does not find variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in pure-pressure input data file or in vertical grid-file for hybrid-pressure output. PS must be present in at least one of these files in order to construct the output hybrid-coordinate pressures.\nHINT: Append a valid PS to the inpud data file or vertical grid-file.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !ps_id_tpl */ } /* !ps_id */ } /* !flg_grd_in_prs */ if(flg_grd_in_dpt){ rcd=nco_inq_varid(in_id,"depth",&lev_id); } /* !flg_grd_in_dpt */ const int ilev_id_in=ilev_id; /* [id] Interface pressure ID */ const int lev_id_in=lev_id; /* [id] Midpoint pressure ID */ const int ps_id_in=ps_id; /* [id] Surface pressure ID */ /* Identify all record-dimensions in input file */ rcd=nco_inq_unlimdims(in_id,&dmn_nbr_rec,(int *)NULL); if(dmn_nbr_rec > 0){ dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int)); rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec); } /* !dmn_nbr_rec */ if(flg_grd_in_hyb){ /* Get hybrid vertical information first */ rcd=nco_inq_varndims(in_id,ps_id,&dmn_nbr_in); rcd=nco_inq_vardimid(in_id,hyai_id,&dmn_id_ilev_in); if(flg_grd_hyb_cameam) rcd=nco_inq_vardimid(in_id,hyam_id,&dmn_id_lev_in); if(flg_grd_hyb_ecmwf) rcd=nco_inq_vardimid(in_id,lev_id,&dmn_id_lev_in); rcd=nco_inq_dimlen(in_id,dmn_id_ilev_in,&ilev_nbr_in); rcd=nco_inq_dimlen(in_id,dmn_id_lev_in,&lev_nbr_in); rcd=nco_inq_dimname(in_id,dmn_id_ilev_in,dmn_nm); ilev_nm_in=strdup(dmn_nm); rcd=nco_inq_dimname(in_id,dmn_id_lev_in,dmn_nm); lev_nm_in=strdup(dmn_nm); } /* !flg_grd_in_hyb */ if(flg_grd_in_prs){ /* Interrogate plev to obtain plev dimensions */ rcd=nco_inq_vardimid(in_id,lev_id,&dmn_id_lev_in); rcd=nco_inq_dimlen(in_id,dmn_id_lev_in,&lev_nbr_in); rcd=nco_inq_dimname(in_id,dmn_id_lev_in,dmn_nm); lev_nm_in=strdup(dmn_nm); /* Define horizontal grid if no PS is provided (i.e., pure-pressure to pure-pressure interpolation) */ if(!flg_grd_out_hyb){ /* Problem: What is horizontal grid size of pressure grid file? Algorithm: Examine first multi-dimensional variable that includes plev dimension Assume horizontal dimensions vary more rapidly than (i.e., follow) plev Compute horizontal grid size accordingly Set output horizontal size to input horizontal size */ int var_nbr; /* [nbr] Number of variables in file */ int var_idx; /* [idx] Index over variables in file */ rcd=nco_inq(in_id,&dmn_nbr_in,&var_nbr,(int *)NULL,(int *)NULL); dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_cnt_in=(long *)nco_malloc(dmn_nbr_in*sizeof(long)); for(var_idx=0;var_idx<var_nbr;var_idx++){ rcd=nco_inq_varndims(in_id,var_idx,&dmn_nbr_in); rcd=nco_inq_vardimid(in_id,var_idx,dmn_ids_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++) if(dmn_ids_in[dmn_idx] == dmn_id_lev_in) break; /* Does current variable have lev dimension? */ if(dmn_idx < dmn_nbr_in){ /* Yes. Do any dimensions vary more rapidly than lev? */ if(dmn_idx < dmn_nbr_in-1){ /* Yes. Assume remaining dimension are horizontal spatial dimensions */ char var_nm[NC_MAX_NAME+1L]; (void)nc_inq_varname(in_id,var_idx,var_nm); for(int dmn_idx_hrz=dmn_idx+1;dmn_idx_hrz<dmn_nbr_in;dmn_idx_hrz++){ rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx_hrz],dmn_cnt_in+dmn_idx_hrz); grd_sz_in*=dmn_cnt_in[dmn_idx_hrz]; } /* !dmn_idx_hrz */ break; } /* !dmn_idx */ } /* !dmn_idx */ } /* !var_idx */ assert(var_idx != var_nbr); grd_sz_out=grd_sz_in; } /* !flg_grd_out_hyb */ } /* !flg_grd_in_prs */ double *hyai_in=NULL; /* [frc] Hybrid A coefficient at layer interfaces on input grid */ double *hyam_in=NULL; /* [frc] Hybrid A coefficient at layer midpoints on input grid */ double *hybi_in=NULL; /* [frc] Hybrid B coefficient at layer interfaces on input grid */ double *hybm_in=NULL; /* [frc] Hybrid B coefficient at layer midpoints on input grid */ double *lev_in=NULL; /* [Pa] Air pressure on input grid */ double *prs_mdp_in=NULL; /* [Pa] Midpoint pressure on input grid */ double *prs_ntf_in=NULL; /* [Pa] Interface pressure on input grid */ double *ps_in=NULL; /* [Pa] Surface pressure on input grid */ double p0_in; /* [Pa] Reference pressure on input grid */ if(flg_grd_in_hyb){ hyai_in=(double *)nco_malloc(ilev_nbr_in*nco_typ_lng(var_typ_rgr)); hyam_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr)); hybi_in=(double *)nco_malloc(ilev_nbr_in*nco_typ_lng(var_typ_rgr)); hybm_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(in_id,hyai_id,hyai_in,crd_typ_out); rcd=nco_get_var(in_id,hyam_id,hyam_in,crd_typ_out); rcd=nco_get_var(in_id,hybi_id,hybi_in,crd_typ_out); rcd=nco_get_var(in_id,hybm_id,hybm_in,crd_typ_out); if(flg_grd_hyb_cameam) rcd=nco_get_var(in_id,p0_id,&p0_in,crd_typ_out); /* ECMWF distributes IFS forecasts with lnsp = log(surface pressure) */ if(flg_grd_hyb_ecmwf){ /* Decompose ECMWF hya? convention into CAM/EAM-like product of P0 and hya? */ p0_in=100000.0; for(size_t idx=0;idx<lev_nbr_in;idx++){ hyai_in[idx]/=p0_in; hyam_in[idx]/=p0_in; } /* !idx */ } /* flg_grd_hyb_ecmwf */ } /* !flg_grd_in_hyb */ if(flg_grd_in_prs){ lev_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(in_id,lev_id,lev_in,crd_typ_out); } /* !flg_grd_in_prs */ /* Always obtain surface pressure if input or output grid is hybrid */ if(flg_grd_in_hyb || flg_grd_out_hyb){ /* Copy horizontal grid information from input file LHS variables were set above if PS is in template file */ if(ps_id_tpl == NC_MIN_INT){ /* NB: dmn_nbr_in/out in this block refer only to horizontal dimensions necessary to define PS */ rcd=nco_inq_varndims(in_id,ps_id,&dmn_nbr_in); /* This is harmlessly repeated for hybrid input files */ dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_cnt_in=(long *)nco_malloc((dmn_nbr_in+1)*sizeof(long)); if(!dmn_srt) dmn_srt=(long *)nco_malloc((dmn_nbr_in+1)*sizeof(long)); /* NB: Only allocate dmn_srt once */ rcd=nco_inq_vardimid(in_id,ps_id,dmn_ids_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_in+dmn_idx); /* 20190330: Allow possibility that PS has time dimension > 1 We want horizontal not temporal dimensions to contribute to grd_sz Temporal dimension is usually unlimited Only multiply grd_sz by fixed (non-unlimited) dimension sizes Corner-case exception when PS spatial dimension on unstructured grid is unlimited */ for(rec_idx=0;rec_idx<dmn_nbr_rec;rec_idx++) if(dmn_ids_in[dmn_idx] == dmn_ids_rec[rec_idx]) break; if(rec_idx == dmn_nbr_rec || dmn_nbr_in == 1) grd_sz_in*=dmn_cnt_in[dmn_idx]; if(rec_idx != dmn_nbr_rec && dmn_nbr_in > 1 && dmn_cnt_in[dmn_idx] > 1L){ dmn_id_tm_in=dmn_ids_in[dmn_idx]; dmn_idx_tm_in=dmn_idx; tm_nbr_in=dmn_cnt_in[dmn_idx_tm_in]; if(tm_nbr_in > 1L) flg_vrt_tm=True; } /* tm_nbr_in > 1 */ dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ /* Given all input PS information, define output PS information */ dmn_nbr_ps=dmn_nbr_out=dmn_nbr_in; dmn_ids_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_cnt_out=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long)); /* fxm: next line works for hyb_in and is buggy for prs_in */ memcpy(dmn_ids_out,dmn_ids_in,dmn_nbr_in*sizeof(int)); memcpy(dmn_cnt_out,dmn_cnt_in,dmn_nbr_in*sizeof(long)); grd_sz_out=grd_sz_in; tm_nbr_out=tm_nbr_in; }else{ /* !ps_id_tpl */ /* 20200825: We have already defined grd_sz_out if PS is in template file We have already defined grd_sz_in and grd_sz_out := grd_sz_in when PS not in template file We have already defined grd_sz_in if input file is pure-pressure However, we have not yet defined grd_sz_in if input file is hybrid Expectation is that grd_sz_in (from input file) = grd_sz_out (from template file) An independent check on this would examine dimension sizes in input file Such a check would immediately flag horizontal mismatches between vertical file and input file The check could not rely on PS being present in input file The check could/should examine the first horizontal variable in input file This would require a lot of code, so we just assume it is true */ grd_sz_in=grd_sz_out; } /* !ps_id_tpl */ /* Timestep sequencing NB: tm_nbr_??? variables count timesteps in vertical grid definitions These are not necessarily the same as the number of timesteps in either file Time-invariant hybrid or pure-pressure coordinates are valid vertical grids for timeseries Usually hybrid grids have as many timesteps in the grids as in the timeseries Usually pressure grids are time-invariant (as of 20190511 time-varying pure pressure grids are still not supported) This implementation interpolates timeseries to/from time-invariant vertical grids in one OpenMP call! */ if(tm_nbr_in > 1L || tm_nbr_out > 1L){ if(tm_nbr_in > tm_nbr_out) assert((float)tm_nbr_in/(float)tm_nbr_out == tm_nbr_in/tm_nbr_out); else assert((float)tm_nbr_out/(float)tm_nbr_in == tm_nbr_out/tm_nbr_in); } /* !tm_nbr_in */ tm_nbr=tm_nbr_in > tm_nbr_out ? tm_nbr_in : tm_nbr_out; /* Sanity checks */ if(grd_sz_in != grd_sz_out || tm_nbr_in != tm_nbr_out) (void)fprintf(stdout,"%s: ERROR %s reports that temporal or horizontal spatial dimensions differ: grd_sz_in = %ld != %ld = grd_sz_out, and/or tm_nbr_in = %ld != %ld = tm_nbr_out\n",nco_prg_nm_get(),fnc_nm,grd_sz_in,grd_sz_out,tm_nbr_in,tm_nbr_out); assert(grd_sz_in == grd_sz_out); assert(tm_nbr_in == tm_nbr_out); ps_in=(double *)nco_malloc_dbg(tm_nbr_in*grd_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() ps_in value buffer"); /* Surface pressure comes from either hybrid vertical grid-files, hybrid data files, or pressure data files that provide surface pressure */ if(flg_grd_in_hyb || (flg_grd_in_prs && ps_id_tpl == NC_MIN_INT)) rcd=nco_get_var(in_id,ps_id,ps_in,crd_typ_out); /* ECMWF distributes IFS forecasts with lnsp = log(surface pressure) */ if(flg_grd_hyb_ecmwf){ /* Convert ECMWF-provided log(surface_pressure) to surface_pressure */ const size_t ps_sz_in=tm_nbr_in*grd_sz_in; /* [nbr] Number of elements in ps_in */ for(size_t idx=0;idx<ps_sz_in;idx++) ps_in[idx]=exp(ps_in[idx]); } /* flg_grd_hyb_ecmwf */ /* Finally have enough information to allocate output pressure grid */ ps_out=(double *)nco_malloc_dbg(tm_nbr_out*grd_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() ps_out value buffer"); /* Get PS from output horizontal grid, if available, otherwise copy from input horizontal grid */ if(ps_id_tpl != NC_MIN_INT){ rcd=nco_get_var(tpl_id,ps_id_tpl,ps_out,crd_typ_out); /* NB: Here we read from tpl_id one last time */ }else{ memcpy(ps_out,ps_in,tm_nbr_in*grd_sz_in*nco_typ_lng(var_typ_rgr)); } /* !ps_id_tpl */ } /* ! */ /* Compare input and output surface pressure fields to determine whether subterranean extrapolation required */ nco_bool flg_add_msv_att; /* [flg] Extrapolation requires _FillValue */ flg_add_msv_att=False; /* Extrapolation type xtr_fll_msv may cause need to create _FillValue attributes */ if(xtr_mth == nco_xtr_fll_msv){ const size_t ps_sz=tm_nbr*grd_sz_in; // [nbr] Size of surface-pressure field double *prs_max_in=NULL; /* [Pa] Maximum midpoint pressure on input grid */ double *prs_max_out=NULL; /* [Pa] Maximum midpoint pressure on output grid */ double *prs_min_in=NULL; /* [Pa] Minimum midpoint pressure on input grid */ double *prs_min_out=NULL; /* [Pa] Minimum midpoint pressure on output grid */ long idx_lev_max; // [idx] Index of midpoint level with greatest pressure long idx_lev_min; // [idx] Index of midpoint level with lowest pressure size_t idx; // [idx] Counting index prs_max_in=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_max_in value buffer"); prs_max_out=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_max_out value buffer"); prs_min_in=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_min_in value buffer"); prs_min_out=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_min_out value buffer"); if(flg_grd_in_hyb){ // fxm: assumes hybrid grid has least/greatest pressure at top/bottom level idx_lev_max=lev_nbr_in-1; idx_lev_min=0L; for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){ idx_fst=tm_idx*grd_sz_in; for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++){ prs_max_in[grd_idx+idx_fst]=p0_in*hyam_in[idx_lev_max]+ps_in[idx_fst+grd_idx]*hybm_in[idx_lev_max]; prs_min_in[grd_idx+idx_fst]=p0_in*hyam_in[idx_lev_min]+ps_in[idx_fst+grd_idx]*hybm_in[idx_lev_min]; } /* !grd_idx */ } /* !tm_idx */ } /* !flg_grd_in_hyb */ if(flg_grd_out_hyb){ // fxm: assumes hybrid grid has least/greatest pressure at top/bottom level idx_lev_max=lev_nbr_out-1; idx_lev_min=0L; for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){ idx_fst=tm_idx*grd_sz_out; for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++){ prs_max_out[grd_idx+idx_fst]=p0_out*hyam_out[idx_lev_max]+ps_out[idx_fst+grd_idx]*hybm_out[idx_lev_max]; prs_min_out[grd_idx+idx_fst]=p0_out*hyam_out[idx_lev_min]+ps_out[idx_fst+grd_idx]*hybm_out[idx_lev_min]; } /* !grd_idx */ } /* !tm_idx */ } /* !flg_grd_out_hyb */ if(flg_grd_in_prs){ double lev_in_max; double lev_in_min; if(lev_in[0] < lev_in[1]) lev_in_max=lev_in[lev_nbr_in-1]; else lev_in_max=lev_in[0]; if(lev_in[0] < lev_in[1]) lev_in_min=lev_in[0]; else lev_in_max=lev_in[lev_nbr_in-1]; for(size_t idx_in=0;idx_in<ps_sz;idx_in++) prs_max_in[idx_in]=lev_in_max; for(size_t idx_in=0;idx_in<ps_sz;idx_in++) prs_min_in[idx_in]=lev_in_min; } /* !flg_grd_in_prs */ if(flg_grd_out_prs){ double lev_out_max; double lev_out_min; if(lev_out[0] < lev_out[1]) lev_out_max=lev_out[lev_nbr_out-1]; else lev_out_max=lev_out[0]; if(lev_out[0] < lev_out[1]) lev_out_min=lev_out[0]; else lev_out_min=lev_out[lev_nbr_out-1]; for(size_t idx_out=0;idx_out<ps_sz;idx_out++) prs_max_out[idx_out]=lev_out_max; for(size_t idx_out=0;idx_out<ps_sz;idx_out++) prs_min_out[idx_out]=lev_out_min; } /* !flg_grd_out_prs */ for(idx=0;idx<ps_sz;idx++) if(prs_max_out[idx] > prs_max_in[idx]) break; if(idx < ps_sz) flg_add_msv_att=True; for(idx=0;idx<ps_sz;idx++) if(prs_min_out[idx] < prs_min_in[idx]) break; if(idx < ps_sz) flg_add_msv_att=True; if(flg_add_msv_att && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports at least one point in at least one output level requires extrapolation (not interpolation). Will ensure that all interpolated fields have _FillValue attribute.\n",nco_prg_nm_get(),fnc_nm); if(prs_max_in) prs_max_in=(double *)nco_free(prs_max_in); if(prs_max_out) prs_max_out=(double *)nco_free(prs_max_out); if(prs_min_in) prs_min_in=(double *)nco_free(prs_min_in); if(prs_min_out) prs_min_out=(double *)nco_free(prs_min_out); } /* !xtr_mth */ /* Lay-out regridded file */ //(void)fprintf(stdout,"%s: DEBUG quark1 dmn_nbr_out = %d, dmn_nbr_ps = %d\n",nco_prg_nm_get(),dmn_nbr_out,dmn_nbr_ps); /* Use explicitly specified output names, if any, otherwise use template names (either explicitly specified or discovered by fuzzing) */ if(rgr->lev_nm_out) lev_nm_out=rgr->lev_nm_out; if(rgr->ilev_nm_out){ if(flg_grd_out_hyb) ilev_nm_out=rgr->ilev_nm_out; if(flg_grd_out_prs) lev_nm_out=rgr->ilev_nm_out; } /* !ilev_nm_out */ if(flg_grd_out_prs){ /* Unless user explicitly specifies output name, use same name as input */ if(!rgr->lev_nm_out) lev_nm_out=(char *)strdup(plev_nm_in); /* Hybrid-sigma/pressure interface variables, if any, must also be output to pure-pressure files on lev grid */ ilev_nm_out=(char *)strdup(lev_nm_out); } /* !flg_grd_out_prs */ /* Define new vertical dimensions before all else */ if(flg_grd_out_hyb){ rcd=nco_def_dim(out_id,ilev_nm_out,ilev_nbr_out,&dmn_id_ilev_out); rcd=nco_def_dim(out_id,lev_nm_out,lev_nbr_out,&dmn_id_lev_out); /* Horizontal dimensions necessary to define PS variable */ for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ if(ps_id_tpl != NC_MIN_INT){ rcd=nco_inq_dimname(tpl_id,dmn_ids_out[dmn_idx],dmn_nm); }else{ rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm); rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_out+dmn_idx); } /* !ps_id_tpl */ if(flg_grd_hyb_cameam) rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx); /* 20190602: ECMWF IFS PS variable has degenerate vertical dimension (lev_2). Avoid re-definition */ if(flg_grd_hyb_ecmwf) if(strcmp(dmn_nm,ilev_nm_out)) if(strcmp(dmn_nm,lev_nm_out)) rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx); } /* !dmn_idx */ } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ rcd=nco_def_dim(out_id,lev_nm_out,lev_nbr_out,&dmn_id_lev_out); } /* !flg_grd_out_prs */ /* Do not extract grid variables (that are also extensive variables) like ilev, lev, hyai, hyam, hybi, hybm */ /* Exception list source: CAM: hyai, hyam, hybi, hybm, ilev, lev, P0, PS EAM: hyai, hyam, hybi, hybm, ilev, lev, P0, PS ECMWF: hyai, hyam, hybi, hybm, lev, lnsp NCEP: plev */ const int var_xcl_lst_nbr=10; /* [nbr] Number of objects on exclusion list */ const char *var_xcl_lst[]={"/hyai","/hyam","/hybi","/hybm","/ilev","/lev","/P0","/plev","/PS","/lnsp"}; int var_cpy_nbr=0; /* [nbr] Number of copied variables */ int var_rgr_nbr=0; /* [nbr] Number of regridded variables */ int var_xcl_nbr=0; /* [nbr] Number of deleted variables */ int var_crt_nbr=0; /* [nbr] Number of created variables */ long idx; /* [idx] Generic index */ unsigned int idx_tbl; /* [idx] Counter for traversal table */ const unsigned int trv_nbr=trv_tbl->nbr; /* [idx] Number of traversal table entries */ for(idx=0;idx<var_xcl_lst_nbr;idx++){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,var_xcl_lst[idx])) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* !idx_tbl */ } /* !idx */ /* 20191001: Do not automatically define plev_nm_in in pressure-grid output files The variable named lev_nm_out in the input data file is always defined in the output file So if plev_nm_in == lev_nm_out it will be defined anyway */ if(flg_grd_in_prs && flg_grd_out_prs && strcmp(plev_nm_in,lev_nm_out)){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm,plev_nm_in)) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* !idx_tbl */ } /* !idx */ char *var_nm; /* [sng] Variable name */ int *dmn_id_in=NULL; /* [id] Dimension IDs */ int *dmn_id_out=NULL; /* [id] Dimension IDs */ int var_id_in; /* [id] Variable ID */ int var_id_out; /* [id] Variable ID */ nc_type var_typ_out; /* [enm] Variable type to write to disk */ nco_bool PCK_ATT_CPY=True; /* [flg] Copy attributes "scale_factor", "add_offset" */ int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; dfl_lvl=rgr->dfl_lvl; fl_out_fmt=rgr->fl_out_fmt; /* Define new coordinates and grid variables in regridded file */ const int dmn_nbr_0D=0; /* [nbr] Rank of 0-D grid variables (scalars) */ const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ //const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ //const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */ //const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */ if(flg_grd_out_hyb){ rcd+=nco_def_var(out_id,"hyai",crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&hyai_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hyai_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"hyam",crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&hyam_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hyam_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"hybi",crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&hybi_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hybi_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"hybm",crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&hybm_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hybm_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,ilev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&ilev_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ilev_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&lev_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lev_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"P0",crd_typ_out,dmn_nbr_0D,(int *)NULL,&p0_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,p0_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; // for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ // rcd=nco_inq_dimname(out_id,dmn_ids_out[dmn_idx],dmn_nm); // (void)fprintf(stdout,"%s: DEBUG quark5 dmn_nbr_out = %d, dmn_nbr_ps = %d, dmn_idx = %d, dmn_ids_out[%d] = %d, dmn_nm = %s\n",nco_prg_nm_get(),dmn_nbr_out,dmn_nbr_ps,dmn_idx,dmn_idx,dmn_ids_out[dmn_idx],dmn_nm); // } /* !dmn_idx */ if(flg_grd_hyb_cameam) rcd+=nco_def_var(out_id,"PS",crd_typ_out,dmn_nbr_ps,dmn_ids_out,&ps_id); if(flg_grd_hyb_ecmwf){ /* Remove degenerate ECMWF vertical dimension so that output PS has dmn_nbr_ps-1 not dmn_nbr_ps dimensions */ int dmn_nbr_out_ecmwf=0; for(dmn_idx=0;dmn_idx<dmn_nbr_ps;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm); if(strcmp(dmn_nm,ilev_nm_out) && strcmp(dmn_nm,lev_nm_out) && strcmp(dmn_nm,"lev_2")) rcd=nco_inq_dimid(out_id,dmn_nm,dmn_ids_out+dmn_nbr_out_ecmwf++); } /* !dmn_idx */ rcd+=nco_def_var(out_id,"PS",crd_typ_out,dmn_nbr_out_ecmwf,dmn_ids_out,&ps_id); } /* !flg_grd_hyb_ecmwf */ if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ps_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; (void)nco_att_cpy(tpl_id,out_id,hyai_id_tpl,hyai_id,PCK_ATT_CPY); (void)nco_att_cpy(tpl_id,out_id,hyam_id_tpl,hyam_id,PCK_ATT_CPY); (void)nco_att_cpy(tpl_id,out_id,hybi_id_tpl,hybi_id,PCK_ATT_CPY); (void)nco_att_cpy(tpl_id,out_id,hybm_id_tpl,hybm_id,PCK_ATT_CPY); if(p0_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,p0_id_tpl,p0_id,PCK_ATT_CPY); /* p0 not expected to be in ECMWF grids */ if(ilev_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,ilev_id_tpl,ilev_id,PCK_ATT_CPY); else if(ilev_id_in != NC_MIN_INT) (void)nco_att_cpy(in_id,out_id,ilev_id_in,ilev_id,PCK_ATT_CPY); if(lev_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,lev_id_tpl,lev_id,PCK_ATT_CPY); else if(lev_id_in != NC_MIN_INT) (void)nco_att_cpy(in_id,out_id,lev_id_in,lev_id,PCK_ATT_CPY); if(ps_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,ps_id_tpl,ps_id,PCK_ATT_CPY); else (void)nco_att_cpy(in_id,out_id,ps_id_in,ps_id,PCK_ATT_CPY); } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ rcd+=nco_def_var(out_id,lev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&lev_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lev_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; (void)nco_att_cpy(tpl_id,out_id,lev_id_tpl,lev_id,PCK_ATT_CPY); dmn_id_ilev_out=dmn_id_lev_out; } /* !flg_grd_out_prs */ /* No further access to template file, close it */ nco_close(tpl_id); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_tpl); char *dmn_nm_cp; /* [sng] Dimension name as char * to reduce indirection */ nco_bool has_ilev; /* [flg] Contains interface level dimension */ nco_bool has_lev; /* [flg] Contains midpoint level dimension */ nco_bool has_tm; /* [flg] Contains time dimension */ nco_bool need_prs_ntf=False; /* [flg] At least one variable to regrid is on interface levels */ nco_bool need_prs_mdp=False; /* [flg] At least one variable to regrid is on midpoint levels */ trv_sct trv; /* [sct] Traversal table object structure to reduce indirection */ /* Define regridding flag for each variable */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ dmn_nbr_in=trv_tbl->lst[idx_tbl].nbr_dmn; has_ilev=False; has_lev=False; for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ /* Pre-determine flags necessary during next loop */ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; /* fxm: Generalize to include any variable containing coordinates with "standard_name" = "atmosphere_hybrid_sigma_pressure_coordinate" */ if(!has_ilev && ilev_nm_in) has_ilev=!strcmp(dmn_nm_cp,ilev_nm_in); if(!has_lev) has_lev=!strcmp(dmn_nm_cp,lev_nm_in); } /* end loop over dimensions */ /* Regrid variables that contain either vertical dimension */ if(has_ilev || has_lev){ trv_tbl->lst[idx_tbl].flg_rgr=True; var_rgr_nbr++; if(has_ilev) need_prs_ntf=True; if(has_lev) need_prs_mdp=True; } /* endif */ assert(!(has_ilev && has_lev)); /* Copy all variables that are not regridded or omitted */ if(!trv_tbl->lst[idx_tbl].flg_rgr) var_cpy_nbr++; } /* end nco_obj_typ_var */ } /* end idx_tbl */ if(!var_rgr_nbr) (void)fprintf(stdout,"%s: WARNING %s reports no variables fit interpolation criteria. The vertical interpolator expects something to interpolate, and variables not interpolated are copied straight to output. HINT: If the name(s) of the input vertical grid dimensions (e.g., ilev and lev) do not match NCO's preset defaults (case-insensitive unambiguous forms and abbreviations of \"ilev\", \"lev\", and/or \"plev\", respectively) then change the dimension names that NCO looks for. Instructions are at http://nco.sf.net/nco.html#regrid. For hybrid-pressure coordinate grids, ensure that the \"ilev\" and \"lev\" variable names are known with, e.g., \"ncks --rgr ilev_nm=interface_level --rgr lev_nm=midpoint_level\" or \"ncremap -R '--rgr ilev=interface_level --rgr lev=midpoint_level'\". For pure pressure grids, ensure the \"plev\" coordinate name is defined with, e.g., \"ncks --rgr plev_nm=pressure_level\" or \"ncremap -R '--rgr plev=pressure_level'\".\n",nco_prg_nm_get(),fnc_nm); if(nco_dbg_lvl_get() >= nco_dbg_fl){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr) (void)fprintf(stderr,"Interpolate %s? %s\n",trv.nm,trv.flg_rgr ? "Yes" : "No"); } /* end idx_tbl */ } /* end dbg */ /* Pre-allocate dimension ID and cnt/srt space */ int dmn_nbr_max; /* [nbr] Maximum number of dimensions variable can have in input or output */ rcd+=nco_inq_ndims(in_id,&dmn_nbr_max); dmn_id_in=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); aed_sct aed_mtd_fll_val; char *att_nm_fll_val=strdup("_FillValue"); int flg_pck; /* [flg] Variable is packed on disk */ nco_bool has_mss_val; /* [flg] Has numeric missing value attribute */ double mss_val_dbl; double mss_val_cmp_dbl; /* Missing value for comparison to double precision values */ float mss_val_flt; if(flg_add_msv_att){ aed_mtd_fll_val.att_nm=att_nm_fll_val; aed_mtd_fll_val.mode=aed_create; aed_mtd_fll_val.sz=1L; mss_val_dbl=NC_FILL_DOUBLE; mss_val_flt=NC_FILL_FLOAT; } /* !flg_add_msv_att */ /* Define interpolated and copied variables in output file */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ var_nm=trv.nm; /* Preserve input type in output type */ var_typ_out=trv.var_typ; dmn_nbr_in=trv.nbr_dmn; dmn_nbr_out=trv.nbr_dmn; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid_flg(out_id,var_nm,&var_id_out); /* If variable has not been defined, define it */ if(rcd != NC_NOERR){ if(trv.flg_rgr){ /* Interpolate */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); rcd=nco_inq_var_packing(in_id,var_id_in,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports variable \"%s\" is packed so results unpredictable. HINT: If regridded values seems weird, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,var_nm); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); if(ilev_nm_in && !strcmp(dmn_nm,ilev_nm_in)){ /* Change ilev dimension */ dmn_id_out[dmn_idx]=dmn_id_ilev_out; dmn_cnt_out[dmn_idx]=ilev_nbr_out; }else if(!strcmp(dmn_nm,lev_nm_in)){ /* Change lev dimension */ dmn_id_out[dmn_idx]=dmn_id_lev_out; dmn_cnt_out[dmn_idx]=lev_nbr_out; }else{ /* Dimensions ilev/lev_nm_in have already been defined as ilev/lev_nm_out, replicate all other dimensions */ rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx); } /* !ilev */ if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_out+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt_out[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_id_out+dmn_idx); } /* !rcd */ } /* !dmn_idx */ }else{ /* !flg_rgr */ /* Replicate non-interpolated variables */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_out+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt_out[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_id_out+dmn_idx); } /* !rcd */ } /* !dmn_idx */ } /* !flg_rgr */ rcd=nco_def_var(out_id,var_nm,var_typ_out,dmn_nbr_out,dmn_id_out,&var_id_out); /* Duplicate netCDF4 settings when possible */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC){ /* Deflation */ if(dmn_nbr_out > 0){ int dfl_lvl_in; /* [enm] Deflate level [0..9] */ rcd=nco_inq_var_deflate(in_id,var_id_in,&shuffle,&deflate,&dfl_lvl_in); /* Copy original deflation settings */ if(deflate || shuffle) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl_in); /* Overwrite HDF Lempel-Ziv compression level, if requested */ if(dfl_lvl == 0) deflate=(int)False; else deflate=(int)True; /* Turn-off shuffle when uncompressing otherwise chunking requests may fail */ if(dfl_lvl == 0) shuffle=NC_NOSHUFFLE; /* Shuffle never, to my knowledge, increases filesize, so shuffle by default when manually deflating */ if(dfl_lvl >= 0) shuffle=NC_SHUFFLE; if(dfl_lvl >= 0) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl); } /* !dmn_nbr_out */ } /* !NC_FORMAT_NETCDF4 */ (void)nco_att_cpy(in_id,out_id,var_id_in,var_id_out,PCK_ATT_CPY); /* Variables with subterranean levels and missing-value extrapolation must have _FillValue attribute */ if(flg_add_msv_att && trv.flg_rgr){ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl); if(!has_mss_val){ nco_bool flg_att_chg; /* [flg] _FillValue attribute was written */ aed_mtd_fll_val.var_nm=var_nm; aed_mtd_fll_val.id=var_id_out; aed_mtd_fll_val.type=var_typ_out; if(var_typ_out == NC_FLOAT) aed_mtd_fll_val.val.fp=&mss_val_flt; else if(var_typ_out == NC_DOUBLE) aed_mtd_fll_val.val.dp=&mss_val_dbl; flg_att_chg=nco_aed_prc(out_id,var_id_out,aed_mtd_fll_val); if(!flg_att_chg && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: WARNING %s reports unsuccessful attempt to create _FillValue attribute for variable %s\n",nco_prg_nm_get(),fnc_nm,var_nm); } /* !has_mss_val */ } /* !flg_add_msv_att */ } /* !rcd */ } /* !var */ } /* !idx_tbl */ /* Free pre-allocated array space */ if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec); /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Begin data mode */ (void)nco_enddef(out_id); /* Copy all grid variables */ if(flg_grd_out_hyb){ (void)nco_put_var(out_id,hyai_id,hyai_out,crd_typ_out); (void)nco_put_var(out_id,hyam_id,hyam_out,crd_typ_out); (void)nco_put_var(out_id,hybi_id,hybi_out,crd_typ_out); (void)nco_put_var(out_id,hybm_id,hybm_out,crd_typ_out); (void)nco_put_var(out_id,ilev_id,ilev_out,crd_typ_out); (void)nco_put_var(out_id,lev_id,lev_out,crd_typ_out); (void)nco_put_var(out_id,p0_id,&p0_out,crd_typ_out); (void)nco_put_var(out_id,ps_id,ps_out,crd_typ_out); } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ (void)nco_put_var(out_id,lev_id,lev_out,crd_typ_out); } /* !flg_grd_out_prs */ nco_bool flg_ntp_log=True; /* [flg] Interpolate in log(vertical_coordinate) */ if(ntp_mth == nco_ntp_lnr) flg_ntp_log=False; size_t idx_in; /* [idx] Index into 3D input variables */ size_t idx_out; /* [idx] Index into 3D output variables */ size_t var_sz_in; /* [nbr] Number of elements in variable (will be self-multiplied) */ size_t var_sz_out; /* [nbr] Number of elements in variable (will be self-multiplied) */ /* Interpolate or copy variable values */ double *var_val_dbl_in=NULL; double *var_val_dbl_out=NULL; double *prs_ntp_in; /* [Pa] Interpolated pressure array on input grid */ double *prs_ntp_out; /* [Pa] Interpolated pressure array on output grid */ int lvl_idx_in; /* [idx] Level index on input grid */ int lvl_idx_out; /* [idx] Level index on output grid */ int lvl_nbr_in; /* [nbr] Number of levels for current interpolated variable on input grid */ int lvl_nbr_out; /* [nbr] Number of levels for current interpolated variable on output grid */ int thr_idx; /* [idx] Thread index */ size_t grd_nbr=grd_sz_in; /* [nbr] Horizonal grid size */ size_t idx_dbg=rgr->idx_dbg; /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped as shared in parallel clause */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ /* Repeating above documentation for the forgetful: NB: tm_nbr is max(timesteps) in vertical grid definitions, not number of records in either file This implementation interpolates timeseries to/from time-invariant vertical grids in one OpenMP call! */ for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){ /* Index-offset to current surface pressure timeslice */ idx_fst=tm_idx*grd_sz_in; if(need_prs_mdp){ /* Allocated and define midpoint pressures */ if(tm_idx == 0) prs_mdp_in=(double *)nco_malloc_dbg(grd_sz_in*lev_nbr_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_mdp_in value buffer"); if(tm_idx == 0) prs_mdp_out=(double *)nco_malloc_dbg(grd_sz_out*lev_nbr_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_mdp_out value buffer"); if(flg_grd_in_hyb) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_in;lev_idx++) prs_mdp_in[grd_idx+lev_idx*grd_sz_in]=p0_in*hyam_in[lev_idx]+ps_in[idx_fst+grd_idx]*hybm_in[lev_idx]; if(flg_grd_out_hyb) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++) prs_mdp_out[grd_idx+lev_idx*grd_sz_out]=p0_out*hyam_out[lev_idx]+ps_out[idx_fst+grd_idx]*hybm_out[lev_idx]; if(flg_grd_in_prs) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_in;lev_idx++) prs_mdp_in[grd_idx+lev_idx*grd_sz_in]=lev_in[lev_idx]; if(flg_grd_out_prs) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++) prs_mdp_out[grd_idx+lev_idx*grd_sz_out]=lev_out[lev_idx]; if(flg_ntp_log){ var_sz_in=grd_sz_in*lev_nbr_in; for(idx_in=0;idx_in<var_sz_in;idx_in++) prs_mdp_in[idx_in]=log(prs_mdp_in[idx_in]); var_sz_out=grd_sz_out*lev_nbr_out; for(idx_out=0;idx_out<var_sz_out;idx_out++) prs_mdp_out[idx_out]=log(prs_mdp_out[idx_out]); } /* !flg_ntp_log */ } /* !need_prs_mdp */ if(need_prs_ntf){ /* Allocate and define interface pressures */ if(tm_idx == 0) prs_ntf_in=(double *)nco_malloc_dbg(grd_sz_in*ilev_nbr_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_ntf_in value buffer"); if(tm_idx == 0) prs_ntf_out=(double *)nco_malloc_dbg(grd_sz_out*ilev_nbr_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_ntf_out value buffer"); if(flg_grd_in_hyb) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_in;ilev_idx++) prs_ntf_in[grd_idx+ilev_idx*grd_sz_in]=p0_in*hyai_in[ilev_idx]+ps_in[idx_fst+grd_idx]*hybi_in[ilev_idx]; if(flg_grd_out_hyb) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++) prs_ntf_out[grd_idx+ilev_idx*grd_sz_out]=p0_out*hyai_out[ilev_idx]+ps_out[idx_fst+grd_idx]*hybi_out[ilev_idx]; if(flg_grd_in_prs) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_in;ilev_idx++) prs_ntf_in[grd_idx+ilev_idx*grd_sz_in]=lev_in[ilev_idx]; if(flg_grd_out_prs) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++) prs_ntf_out[grd_idx+ilev_idx*grd_sz_out]=lev_out[ilev_idx]; if(flg_ntp_log){ var_sz_in=grd_sz_in*ilev_nbr_in; for(idx_in=0;idx_in<var_sz_in;idx_in++) prs_ntf_in[idx_in]=log(prs_ntf_in[idx_in]); var_sz_out=grd_sz_out*ilev_nbr_out; for(idx_out=0;idx_out<var_sz_out;idx_out++) prs_ntf_out[idx_out]=log(prs_ntf_out[idx_out]); } /* !flg_ntp_log */ } /* !need_prs_ntf */ /* Set firstprivate variables to initial values */ has_ilev=False; has_lev=False; has_tm=False; if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"Interpolation progress: # means interpolated, ~ means copied\n"); #ifdef __GNUG__ # define GCC_LIB_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ ) # if GCC_LIB_VERSION < 490 # define GXX_OLD_OPENMP_SHARED_TREATMENT 1 # endif /* 480 */ # if GCC_LIB_VERSION >= 900 # define GXX_WITH_OPENMP5_GPU_SUPPORT 1 # endif /* 900 */ #endif /* !__GNUC__ */ #if defined( __INTEL_COMPILER) #else /* !__INTEL_COMPILER */ # ifdef GXX_OLD_OPENMP_SHARED_TREATMENT # else /* !old g++ */ # if defined(GXX_WITH_OPENMP5_GPU_SUPPORT) && 0 # else # endif /* !GCC > 9.0 */ # endif /* !GCC < 4.9 */ #endif /* !__INTEL_COMPILER */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; thr_idx=omp_get_thread_num(); in_id=trv_tbl->in_id_arr[thr_idx]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s%s ",trv.flg_rgr ? "#" : "~",trv.nm); if(trv.flg_rgr){ /* Interpolate variable */ var_nm=trv.nm; if(!strcmp(var_nm,"US") || !strcmp(var_nm,"VS")) (void)fprintf(fp_stdout,"%s: WARNING %s reports attempt to vertically interpolate a variable named \"%s\". If this variable is from a CESM CAM or E3SM EAM output or initial condition file on a rectangular grid (e.g., FV 0.9x1.25), then expect this program to fail and dump core when interpolating US and to produce slightly incorrect answers for VS. The vertical interpolation routine requires that interpolated variables be on the same horizontal grid as the supplied pressure field. However, the CAM/EAM US and VS variables from rectangular grid simulations are often on a horizontal grid, called the staggered grid, that is offset from the rest of the variables including the surface pressure. US usually sits on a grid that is staggered in latitude from, and is a slightly different size than, the surface pressure grid. This leads to a core dump. VS sits on a grid staggered in longitude from, though the same size as, the surface pressure field. The resulting interpolation will be based on surface pressure half a gridcell to the east rather than centered with VS. The correct procedure to vertically interpolate US and VS is to 1) horizontally regrid the supplied surface pressure (often \"PS\") to the staggered grid, then 2) vertically interpolate US and VS to the desired vertical grid based on the surface pressure on the staggered grid, then 3) re-combine the interpolated US and VS with the interpolated versions of the rest of the variables. The best solution to this dilemma is to script this workflow. Contact Charlie if you need help with this.\n",nco_prg_nm_get(),fnc_nm,var_nm); var_typ_rgr=NC_DOUBLE; /* NB: Perform regridding in double precision */ var_typ_out=trv.var_typ; /* NB: Output type in file is same as input type */ var_sz_in=1L; var_sz_out=1L; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid(out_id,var_nm,&var_id_out); rcd=nco_inq_varndims(in_id,var_id_in,&dmn_nbr_in); rcd=nco_inq_varndims(out_id,var_id_out,&dmn_nbr_out); dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out; dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */ dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); rcd=nco_inq_vardimid(out_id,var_id_out,dmn_id_out); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx); if(dmn_id_in[dmn_idx] == dmn_id_ilev_in) has_ilev=True; if(dmn_id_in[dmn_idx] == dmn_id_lev_in) has_lev=True; if(dmn_id_in[dmn_idx] == dmn_id_tm_in) has_tm=True; if(flg_vrt_tm && has_tm && dmn_id_in[dmn_idx] == dmn_id_tm_in){ dmn_cnt_in[dmn_idx]=1L; dmn_srt[dmn_idx]=tm_idx; }else{ dmn_srt[dmn_idx]=0L; } /* !flg_vrt_tm */ var_sz_in*=dmn_cnt_in[dmn_idx]; } /* !dmn_idx */ var_val_dbl_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() input value buffer"); rcd=nco_get_vara(in_id,var_id_in,dmn_srt,dmn_cnt_in,var_val_dbl_in,var_typ_rgr); for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ /* Dimension count vector is same as input except for lvl dimension */ dmn_cnt_out[dmn_idx]=dmn_cnt_in[dmn_idx]; if(has_ilev && dmn_id_out[dmn_idx] == dmn_id_ilev_out) dmn_cnt_out[dmn_idx]=ilev_nbr_out; if(has_lev && dmn_id_out[dmn_idx] == dmn_id_lev_out) dmn_cnt_out[dmn_idx]=lev_nbr_out; var_sz_out*=dmn_cnt_out[dmn_idx]; } /* !dmn_idx */ var_val_dbl_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output value buffer"); /* Missing value setup */ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl); if(has_mss_val) mss_val_cmp_dbl=mss_val_dbl; else mss_val_cmp_dbl=NC_FILL_DOUBLE; if(has_ilev){ /* Interpolate current variable from input interface pressure grid to output interface pressure grid */ lvl_nbr_in=ilev_nbr_in; lvl_nbr_out=ilev_nbr_out; prs_ntp_in=prs_ntf_in; prs_ntp_out=prs_ntf_out; }else{ /* Interpolate current variable from input midpoint pressure grid to output midpoint pressure grid */ lvl_nbr_in=lev_nbr_in; lvl_nbr_out=lev_nbr_out; prs_ntp_in=prs_mdp_in; prs_ntp_out=prs_mdp_out; } /* !ilev */ /* Procedure: Extract input/output coordinate/data arrays into 1D column order This enables actual interpolation code to be written for, or take advantage of, 1D interpolation routines After interpolating into 1D sequential memory, copy back to ND output and repeat */ double *crd_in=NULL; /* Input vertical coordinate (must be monotonic) */ double *crd_out=NULL; /* Output vertical coordinate (must be monotonic) */ double *dat_in=NULL; /* Input data (to be interpolated) on input vertical coordinate grid */ double *dat_out=NULL; /* Output data (interpolated) output vertical coordinate grid (i.e., the answer) */ double *crd_in_mnt; /* Input vertical coordinate reversed if necessary to be monotonically increasing */ double *crd_out_mnt; /* Output vertical coordinate reversed if necessary to be monotonically increasing */ double *dat_in_mnt; /* Input data (to be interpolated) reversed if necessary along with input grid */ double *dat_out_mnt; /* Output data (interpolated) reversed if necessary along with output grid */ nco_xtr_sct xtr_LHS; nco_xtr_sct xtr_RHS; size_t brk_lft_idx; size_t brk_rgt_idx; size_t in_idx; size_t in_nbr; size_t out_nbr; size_t out_idx; /* Default extrapolation uses nearest valid neighbor */ xtr_LHS.xtr_fll=True; xtr_LHS.xtr_vrb=False; xtr_LHS.typ_fll=xtr_mth; xtr_RHS.xtr_fll=True; xtr_RHS.xtr_vrb=False; xtr_RHS.typ_fll=xtr_mth; /* Special-case extrapolation methods allowed for all except missing-value extrapolation types */ if(xtr_mth != nco_xtr_fll_msv){ if(!strcmp(var_nm,"T") || !strcmp(var_nm,"ta")) xtr_RHS.typ_fll=nco_xtr_fll_tpt; else if(!strcmp(var_nm,"Z3") || !strcmp(var_nm,"zg")) xtr_LHS.typ_fll=xtr_RHS.typ_fll=nco_xtr_fll_gph; } /* !xtr_mth */ crd_in=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); crd_out=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); dat_in=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); dat_out=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); in_nbr=lvl_nbr_in; out_nbr=lvl_nbr_out; nco_bool in_ncr; /* [flg] Input coordinate monotonically increases */ nco_bool out_ncr; /* [flg] Output coordinate monotonically increases */ /* Determine monotonicity direction only once, based on first vertical column */ if(prs_ntp_in[grd_nbr]-prs_ntp_in[0] > 0.0) in_ncr=True; else in_ncr=False; out_ncr=True; if(out_nbr > 1) if(prs_ntp_out[grd_nbr]-prs_ntp_out[0] < 0.0) out_ncr=False; /* If necessary, allocate (once, and re-use it) additional memory to hold reversed arrays */ if(!in_ncr){ crd_in_mnt=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); dat_in_mnt=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); } /* !in_ncr */ if(!out_ncr){ crd_out_mnt=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); dat_out_mnt=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); } /* !out_ncr */ /* Constants and parameters for extrapolation */ const double gamma_moist=6.5/10000.0; /* [K/Pa] Temperature extrapolation assumes constant moist adiabatic lower atmosphere lapse rate dT/dp=constant=(6.5 K)/(100 mb) = (6.5 K)/(10000 Pa) */ const double Rd_rcp_g0=287.0/9.81; /* [K/Pa] Geopotential height extrapolation uses hypsometric equation Z2-Z1=(Rd*Tv_avg/g0)*ln(p1/p2)=(Rd*Tv_avg/g0)*(ln(p1)-ln(p2)) */ const double tpt_vrt_avg=288.0; /* [K] Mean virtual temperature assumed for geopotential height extrapolation */ nco_bool FIRST_WARNING_LHS; /* [flg] First warning for LHS extrapolation */ nco_bool FIRST_WARNING_RHS; /* [flg] First warning for RHS extrapolation */ if(tm_idx == 0){ /* Only print extrapolation warnings for first timestep to prevent noisy output NB: Algorithm prevents any warnings for extrapolations that appear after first timestep */ FIRST_WARNING_LHS=True; FIRST_WARNING_RHS=True; } /* !tm_idx */ /* Outer loop over columns */ for(grd_idx=0;grd_idx<grd_nbr;grd_idx++){ /* Initialize pseudo-1D variables with consecutive memory addresses to avoid indirection */ for(lvl_idx_in=0;lvl_idx_in<lvl_nbr_in;lvl_idx_in++){ idx_in=grd_idx+lvl_idx_in*grd_nbr; crd_in[lvl_idx_in]=prs_ntp_in[idx_in]; dat_in[lvl_idx_in]=var_val_dbl_in[idx_in]; } /* !lvl_idx_in */ for(lvl_idx_out=0;lvl_idx_out<lvl_nbr_out;lvl_idx_out++){ idx_out=grd_idx+lvl_idx_out*grd_nbr; crd_out[lvl_idx_out]=prs_ntp_out[idx_out]; } /* !lvl_idx_out */ /* Interpolation code easier to write/debug if crd_in and crd_out both monotonically increase However, monotonically decreasing coordinates useful in many cases, such as depth coordinate, and pressure levels arranged largest to smallest (favored by CMIP) Next code block reverses array(s) if necessary so coordinates monotonically increase Code uses crd_in_mnt, dat_in_mnt, crd_out_mnt where "_mnt" reminds of "monotonically increasing" assumption Following code lifted from CSZ's libcsz.a library source code ~/sw/c++/vec.hh */ if(in_ncr){ crd_in_mnt=crd_in; dat_in_mnt=dat_in; }else{ for(in_idx=0;in_idx<in_nbr;in_idx++){ crd_in_mnt[in_idx]=crd_in[in_nbr-in_idx-1]; dat_in_mnt[in_idx]=dat_in[in_nbr-in_idx-1]; } /* !in_idx */ } /* !in_ncr */ if(out_ncr){ crd_out_mnt=crd_out; dat_out_mnt=dat_out; }else{ for(out_idx=0;out_idx<out_nbr;out_idx++) crd_out_mnt[out_idx]=crd_out[out_nbr-out_idx-1]; } /* !out_ncr */ // Initialize bracketing index brk_lft_idx=0; // Loop over desired output coordinates for(out_idx=0;out_idx<out_nbr;out_idx++){ // Order of conditions is important since second condition is illegal if brk_lft_idx >= in_nbr while((brk_lft_idx < in_nbr) && (crd_in_mnt[brk_lft_idx] < crd_out_mnt[out_idx])){ brk_lft_idx++; } // !while brk_lft_idx--; // Handle identity interpolation separately to preserve symmetry in extrapolation code if(brk_lft_idx != in_nbr-1){ if(crd_in_mnt[brk_lft_idx+1] == crd_out_mnt[out_idx]){ dat_out_mnt[out_idx]=dat_in_mnt[brk_lft_idx+1]; if(brk_lft_idx == -1) brk_lft_idx=0; // Reset brk_lft_idx to 0 so next while loop works continue; // Jump to next iteration } // !crd_in_mnt } // !brk_lft_idx if(brk_lft_idx == -1){ // LHS Extrapolation required // Degenerate case: crd_out_mnt[out_idx] < crd_in_mnt[0] brk_lft_idx=0; // Reset brk_lft_idx to 0 so next while loop works if(xtr_LHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: WARNING %s reports variable %s column %lu output value dat_out_mnt[%lu] at coordinate crd_out_mnt[%lu] = %g requires LHS extrapolation beyond leftmost valid coordinate at crd_in_mnt[%lu] = %g. Nearest valid datum is dat_in_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,var_nm,grd_idx,out_idx,out_idx,crd_out_mnt[out_idx],brk_lft_idx,crd_in_mnt[brk_lft_idx],brk_lft_idx,dat_in_mnt[brk_lft_idx]); // Extrapolation options are presented in decreasing order of preference if(!xtr_LHS.xtr_fll){ (void)fprintf(fp_stdout,"%s: ERROR %s Full LHS extrapolation required but not permitted\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; } /* !xtr_LHS.xtr_fll */ switch(xtr_LHS.typ_fll){ case nco_xtr_fll_nil: dat_out_mnt[out_idx]=0.0; break; case nco_xtr_fll_msv: dat_out_mnt[out_idx]=mss_val_cmp_dbl; break; case nco_xtr_fll_ngh: dat_out_mnt[out_idx]=dat_in_mnt[0]; break; case nco_xtr_fll_lnr: dat_out_mnt[out_idx]=dat_in_mnt[0]- (crd_in_mnt[0]-crd_out_mnt[out_idx])* (dat_in_mnt[1]-dat_in_mnt[0])/(crd_in_mnt[1]-crd_in_mnt[0]); break; case nco_xtr_fll_gph: if(flg_ntp_log) /* Coordinates are already logarithmic in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[0]+ Rd_rcp_g0*tpt_vrt_avg*(crd_in_mnt[0]-crd_out_mnt[out_idx]); else /* Interpolate with logarithm of pressure coordinates */ dat_out_mnt[out_idx]=dat_in_mnt[0]+ Rd_rcp_g0*tpt_vrt_avg*log(crd_in_mnt[0]/crd_out_mnt[out_idx]); if(FIRST_WARNING_LHS) (void)fprintf(fp_stdout,"%s: INFO %s geopotential height extrapolated upward towards space using hypsometric equation with constant global mean virtual temperature = %g for variable %s\n",nco_prg_nm_get(),fnc_nm,tpt_vrt_avg,var_nm); FIRST_WARNING_LHS=False; break; default: (void)fprintf(fp_stdout,"%s: ERROR %s Unknown xtr_LHS.typ_fll\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; break; } // !xtr_LHS.typ_fll if(xtr_LHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: INFO %s LHS extrapolation yields dat_out_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,out_idx,dat_out_mnt[out_idx]); }else if(brk_lft_idx < in_nbr-1){ // Normal case: crd_out_mnt is interpolable brk_rgt_idx=brk_lft_idx+1; // NB: brk_rgt_idx is ALWAYS greater than brk_lft_idx // This simulaneously meets two criteria: // 1. Divide-by-zero errors are impossible in the next step // 2. The identity interpolation is satisfied since crd_dlt == 0.0: // i.e., If crd_out_mnt[idx] == crd_in_mnt[brk_lft_idx] then dat_out_mnt[out_idx] := dat_in_mnt[brk_lft_idx] // Linearly interpolate dat_out_mnt[out_idx]= dat_in_mnt[brk_lft_idx]+ (crd_out_mnt[out_idx]-crd_in_mnt[brk_lft_idx])* (dat_in_mnt[brk_rgt_idx]-dat_in_mnt[brk_lft_idx])/ (crd_in_mnt[brk_rgt_idx]-crd_in_mnt[brk_lft_idx]); }else if(brk_lft_idx == in_nbr-1){ // RHS Extrapolation required // Degenerate case: brk_lft_idx is last element of crd_in_mnt brk_rgt_idx=brk_lft_idx; if(xtr_RHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: WARNING %s reports variable %s column %lu output value dat_out_mnt[%lu] at coordinate crd_out_mnt[%lu] = %g requires RHS extrapolation beyond rightmost valid coordinate at crd_in_mnt[%lu] = %g. Nearest valid datum is dat_in_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,var_nm,grd_idx,out_idx,out_idx,crd_out_mnt[out_idx],brk_rgt_idx,crd_in_mnt[brk_rgt_idx],brk_rgt_idx,dat_in_mnt[brk_rgt_idx]); // Extrapolation options are presented in decreasing order of preference if(!xtr_RHS.xtr_fll){ (void)fprintf(fp_stdout,"%s: ERROR %s Full RHS extrapolation required but not permitted\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; } /* !xtr_RHS.xtr_fll */ switch(xtr_RHS.typ_fll){ case nco_xtr_fll_nil: dat_out_mnt[out_idx]=0.0; break; case nco_xtr_fll_msv: dat_out_mnt[out_idx]=mss_val_cmp_dbl; break; case nco_xtr_fll_ngh: dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]; break; case nco_xtr_fll_lnr: dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+ (crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1])* (dat_in_mnt[in_nbr-1]-dat_in_mnt[in_nbr-2])/ (crd_in_mnt[in_nbr-1]-crd_in_mnt[in_nbr-2]); break; case nco_xtr_fll_tpt: if(flg_ntp_log) /* Exponentiate so coordinates are linear in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+ (exp(crd_out_mnt[out_idx])-exp(crd_in_mnt[in_nbr-1]))*gamma_moist; else /* Coordinates are already linear in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+ (crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1])*gamma_moist; if(FIRST_WARNING_RHS) (void)fprintf(fp_stdout,"%s: INFO %s temperature extrapolated toward/into surface assuming constant moist adiabatic lapse rate = %g K/(100 mb) for variable %s\n",nco_prg_nm_get(),fnc_nm,gamma_moist*10000.0,var_nm); FIRST_WARNING_RHS=False; break; case nco_xtr_fll_gph: if(flg_ntp_log) /* Coordinates are already logarithmic in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]- Rd_rcp_g0*tpt_vrt_avg*(crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1]); else /* Interpolate with logarithm of pressure coordinates */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]- Rd_rcp_g0*tpt_vrt_avg*log(crd_out_mnt[out_idx]/crd_in_mnt[in_nbr-1]); if(FIRST_WARNING_RHS) (void)fprintf(fp_stdout,"%s: INFO %s geopotential height extrapolated toward/into surface using hypsometric equation with constant global mean virtual temperature = %g for variable %s\n",nco_prg_nm_get(),fnc_nm,tpt_vrt_avg,var_nm); FIRST_WARNING_RHS=False; break; default: (void)fprintf(fp_stdout,"%s: ERROR %s Unknown xtr_RHS\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; break; } // !xtr_RHS.typ_fll if(xtr_RHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: INFO %s RHS extrapolation yields dat_out_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,out_idx,dat_out_mnt[out_idx]); }else{ (void)fprintf(fp_stdout,"%s: ERROR %s Unforeseen value of brk_lft_idx\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; } // !RHS } // !out_idx /* Un-reverse output data to be on original grid */ if(!out_ncr) for(out_idx=0;out_idx<out_nbr;out_idx++) dat_out[out_idx]=dat_out_mnt[out_nbr-out_idx-1]; // End of vec.hh code /* Copy answers into output array */ for(lvl_idx_out=0;lvl_idx_out<lvl_nbr_out;lvl_idx_out++){ idx_out=grd_idx+lvl_idx_out*grd_nbr; var_val_dbl_out[idx_out]=dat_out[lvl_idx_out]; } /* !lvl_idx_out */ if(nco_dbg_lvl_get() >= nco_dbg_io && grd_idx == idx_dbg){ (void)fprintf(fp_stdout,"%s: DEBUG %s variable %s at idx_dbg = %lu\n",nco_prg_nm_get(),fnc_nm,var_nm,idx_dbg); for(out_idx=0;out_idx<out_nbr;out_idx++){ (void)fprintf(fp_stdout,"out_idx = %lu dat_out = %g\n",out_idx,dat_out[out_idx]); } /* !out_idx */ } /* !dbg */ } /* !grd_idx */ if(crd_in) crd_in=(double *)nco_free(crd_in); if(crd_out) crd_out=(double *)nco_free(crd_out); if(dat_in) dat_in=(double *)nco_free(dat_in); if(dat_out) dat_out=(double *)nco_free(dat_out); if(!in_ncr){ if(crd_in_mnt) crd_in_mnt=(double *)nco_free(crd_in_mnt); if(dat_in_mnt) dat_in_mnt=(double *)nco_free(dat_in_mnt); } /* !in_ncr */ if(!out_ncr){ if(crd_out_mnt) crd_out_mnt=(double *)nco_free(crd_out_mnt); if(dat_out_mnt) dat_out_mnt=(double *)nco_free(dat_out_mnt); } /* !out_ncr */ if(nco_typ_ntg(var_typ_out)){ /* 20210407: Round, with rint(), integer fields before sending to netCDF for output Otherwise implicit type conversion will truncate (rather than round) output values This is critical for masks where rounding errors produce near integer values (e.g., 0.999...) that could then be truncated to zero by implicit conversion instead of rounded up to 1. */ for(idx_out=0;idx_out<var_sz_out;idx_out++) if(var_val_dbl_out[idx_out] != mss_val_cmp_dbl) var_val_dbl_out[idx_out]=rint(var_val_dbl_out[idx_out]); } /* !nco_typ_ntg() */ /* begin OpenMP critical */ rcd=nco_put_vara(out_id,var_id_out,dmn_srt,dmn_cnt_out,var_val_dbl_out,var_typ_rgr); /* end OpenMP critical */ if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); if(var_val_dbl_in) var_val_dbl_in=(double *)nco_free(var_val_dbl_in); if(var_val_dbl_out) var_val_dbl_out=(double *)nco_free(var_val_dbl_out); }else{ /* !trv.flg_rgr */ /* Use standard NCO copy routine for variables that are not regridded 20190511: Copy them only once */ if(tm_idx == 0){ /* begin OpenMP critical */ (void)nco_cpy_var_val(in_id,out_id,(FILE *)NULL,(md5_sct *)NULL,trv.nm,trv_tbl); /* end OpenMP critical */ } /* !tm_idx */ } /* !flg_rgr */ } /* !xtr */ } /* end (OpenMP parallel for) loop over idx_tbl */ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"\n"); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s completion report: Variables interpolated = %d, copied unmodified = %d, omitted = %d, created = %d\n",nco_prg_nm_get(),fnc_nm,var_rgr_nbr,var_cpy_nbr,var_xcl_nbr,var_crt_nbr); } /* !tm_idx */ if(att_nm_fll_val) att_nm_fll_val=(char *)nco_free(att_nm_fll_val); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in); if(dmn_ids_out) dmn_ids_out=(int *)nco_free(dmn_ids_out); if(ilev_nm_in) ilev_nm_in=(char *)nco_free(ilev_nm_in); if(lev_nm_in) lev_nm_in=(char *)nco_free(lev_nm_in); if(hyai_in) hyai_in=(double *)nco_free(hyai_in); if(hyam_in) hyam_in=(double *)nco_free(hyam_in); if(hybi_in) hybi_in=(double *)nco_free(hybi_in); if(hybm_in) hybm_in=(double *)nco_free(hybm_in); if(ps_in) ps_in=(double *)nco_free(ps_in); if(prs_mdp_in) prs_mdp_in=(double *)nco_free(prs_mdp_in); if(prs_ntf_in) prs_ntf_in=(double *)nco_free(prs_ntf_in); if(hyai_out) hyai_out=(double *)nco_free(hyai_out); if(hyam_out) hyam_out=(double *)nco_free(hyam_out); if(hybi_out) hybi_out=(double *)nco_free(hybi_out); if(hybm_out) hybm_out=(double *)nco_free(hybm_out); if(ilev_out) ilev_out=(double *)nco_free(ilev_out); if(lev_in) lev_in=(double *)nco_free(lev_in); if(lev_out) lev_out=(double *)nco_free(lev_out); if(ps_out) ps_out=(double *)nco_free(ps_out); if(prs_mdp_out) prs_mdp_out=(double *)nco_free(prs_mdp_out); if(prs_ntf_out) prs_ntf_out=(double *)nco_free(prs_ntf_out); return rcd; } /* !nco_ntp_vrt() */ int /* O [enm] Return code */ nco_rgr_wgt /* [fnc] Regrid with external weights */ (rgr_sct * const rgr, /* I/O [sct] Regridding structure */ trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */ { /* Purpose: Regrid fields using external weights contained in a mapfile Examine ESMF, SCRIP, Tempest map-files: ncks --cdl -M -m ${DATA}/scrip/rmp_T42_to_POP43_conserv.nc | m ncks --cdl -M -m ${DATA}/maps/map_t42_to_fv129x256_aave.20150621.nc | m ncks --cdl -M -m ${DATA}/maps/map_ne30np4_to_ne120np4_tps.20150618.nc | m Test ESMF, SCRIP, Tempest map-files: ncks -D 5 -O --map=${DATA}/scrip/rmp_T42_to_POP43_conserv.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc ncks -D 5 -O --map=${DATA}/maps/map_t42_to_fv129x256_aave.20150621.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc ncks -D 5 -O --map=${DATA}/maps/map_ne30np4_to_ne120np4_tps.20150618.nc ${DATA}/ne30/rgr/ne30_1D.nc ~/foo.nc Mapfile formats ESMF, GRIDSPEC, SCRIP, and UGRID described here: http://www.earthsystemmodeling.org/esmf_releases/public/ESMF_6_3_0rp1/ESMF_refdoc/node3.html#sec:fileformat:scrip Conventions: grid_size: Number of gridcells (product of lat*lon) address: Source and destination index for each link pair num_links: Number of unique address pairs in remapping, i.e., size of sparse matrix num_wgts: Number of weights per vertice for given remapping (we only handle num_wgts == 1 below) = 1 Bilinear Destination grid value determined by weights times known source grid values at vertices of source quadrilateral that bounds destination point P One weight per vertice guarantees fxm but is not conservative Bilinear requires logically rectangular grid = 1 Distance-based: Distance-weighted uses values at num_neighbors points The weight is inversely proportional to the angular distance from the destination point to each neighbor on the source grid = 3 Second-order conservative: Described in Jones, P. W. (1999), Monthly Weather Review, 127, 2204-2210 First-order conservative schemes assume fluxes are constant within gridcell Destination fluxes are simple summations of sources fluxes weighted by overlap areas Old clm and bds remappers use a first-order algorithm Second-order improves this by using a first-order Taylor expansion of flux Source flux is centroid value plus directional offset determined by dot product of directional gradient and vector pointing from vertice to centroid. Three weights per vertice are centroid weight, weight times local theta-gradient from centroid to vertice, and weight times local phi-gradient from centroid to vertice. = 4 Bicubic: The four weights are gradients in each direction plus a cross-gradient term Same principle as bilinear, but more weights per vertice Bicubic requires logically rectangular grid wgt: Maximum number of source cells contributing to destination cell is not a dimension in SCRIP remapping files because SCRIP stores everying in 1-D sparse matrix arrays Definition of sparse matrix formulations and normalization terminology, SCRIP manual p. 8, 13, 16: for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ // Remap source function f = 1 in all unmasked source gridcells, zero elsewhere, to function F on destination grid // Normalization: fractional area (fracarea) (F = 1 where destination overlaps umasked source grid) dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0]; // Normalization: destination area (destarea) (weights in each destination cell sum to its area frcation) dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0]/dst_area[ddr_dst[lnk_idx]]; // Normalization: none (F = angular area that participates in remapping) dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0]/(dst_area[ddr_dst[lnk_idx]]*dst_frc[ddr_dst[lnk_idx]); } // end loop over lnk Documentation: NCL special cases described in popRemap.ncl, e.g., at https://github.com/yyr/ncl/blob/master/ni/src/examples/gsun/popRemap.ncl ESMF Regridding Status: https://www.earthsystemcog.org/projects/esmf Sample regrid T42->POP43, SCRIP: ncks -O --map=${DATA}/scrip/rmp_T42_to_POP43_conserv.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc */ const char fnc_nm[]="nco_rgr_wgt()"; /* [sng] Function name */ char *fl_in; char *fl_pth_lcl=NULL; const double rdn2dgr=180.0/M_PI; const double dgr2rdn=M_PI/180.0; const double eps_rlt=1.0e-14; /* [frc] Round-off error tolerance */ double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */ double area_out_ttl=0.0; /* [frc] Exact sum of area */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int in_id; /* I [id] Input netCDF file ID */ int md_open; /* [enm] Mode flag for nc_open() call */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int dmn_idx; /* [idx] Dimension index */ int dst_grid_corners_id; /* [id] Destination grid corners dimension ID */ int dst_grid_rank_id; /* [id] Destination grid rank dimension ID */ int dst_grid_size_id; /* [id] Destination grid size dimension ID */ int num_links_id; /* [id] Number of links dimension ID */ int num_wgts_id=NC_MIN_INT; /* [id] Number of weights dimension ID */ int src_grid_corners_id; /* [id] Source grid corners dimension ID */ int src_grid_rank_id; /* [id] Source grid rank dimension ID */ int src_grid_size_id; /* [id] Source grid size dimension ID */ long int lat_idx; long int lon_idx; short int bnd_idx; nco_bool FL_RTR_RMT_LCN; nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool flg_dgn_area_out=False; /* [flg] Diagnose area_out from grid boundaries */ nco_bool flg_bnd_1D_usable=False; /* [flg] Usable 1D cell vertices exist */ nco_bool flg_stg=rgr->flg_stg; /* [flg] Write staggered grid with FV output */ nco_grd_2D_typ_enm nco_grd_2D_typ=nco_grd_2D_nil; /* [enm] Two-dimensional grid-type enum */ nco_grd_lat_typ_enm nco_grd_lat_typ=nco_grd_lat_nil; /* [enm] Latitude grid-type enum */ nco_grd_lon_typ_enm nco_grd_lon_typ=nco_grd_lon_nil; /* [enm] Longitude grid-type enum */ nco_mpf_sct mpf; size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s obtaining mapping weights from %s\n",nco_prg_nm_get(),fnc_nm,rgr->fl_map); /* Duplicate (because nco_fl_mk_lcl() free()'s fl_in) */ fl_in=(char *)strdup(rgr->fl_map); /* Make sure file is on local system and is readable or die trying */ fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id); /* Identify mapping file type using string generated by weight-generator: ESMF: title = "ESMF Offline Regridding Weight Generator" ESMF_weight_only: title = "ESMF Regrid Weight Generator" NCO: Title = "netCDF Operators (NCO) Offline Regridding Weight Generator" MBTR: Title = "MOAB-TempestRemap Online Regridding Weight Generator" SCRIP: conventions = "SCRIP" Tempest: Title = "TempestRemap Offline Regridding Weight Generator" */ char *att_val; char *att_cnv_val=NULL; char *att_gnr_val=NULL; char *att_ttl_val=NULL; char *cnv_sng=NULL; /* netCDF standard is uppercase Conventions, though some models user lowercase */ char att_sng_Cnv[]="Conventions"; /* [sng] Unidata standard string (uppercase) */ char att_sng_cnv[]="conventions"; /* [sng] Unidata non-standard string (lowercase) */ char att_sng_gnr[]="weight_generator"; /* [sng] CMIP6 standard string */ char att_sng_Ttl[]="Title"; /* [sng] MBTR, NCO, and Tempest use "Title" attribute. MBTR and Tempest do not use "Conventions" */ char att_sng_ttl[]="title"; /* [sng] ERWG 7.1 weight_only uses "title" not "Conventions" attribute */ char name0_sng[]="name0"; /* [sng] Attribute where Tempest stores least-rapidly-varying dimension name */ nco_rgr_mpf_typ_enm nco_rgr_mpf_typ=nco_rgr_mpf_nil; /* [enm] Type of remapping file */ nco_rgr_typ_enm nco_rgr_typ=nco_rgr_grd_nil; /* [enm] Type of grid conversion */ /* Look for map-type signature in [cC]onventions or [tT]itle attribute */ att_cnv_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_cnv); if(!att_cnv_val) att_cnv_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_Cnv); att_gnr_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_gnr); att_ttl_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_ttl); if(!att_ttl_val) att_ttl_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_Ttl); /* Either "[cC]onventions" or "[tT]itle" attribute determines map-file type... */ if(att_cnv_val && strstr(att_cnv_val,"SCRIP")) nco_rgr_mpf_typ=nco_rgr_mpf_SCRIP; if(nco_rgr_mpf_typ == nco_rgr_mpf_nil && att_ttl_val){ if(strstr(att_ttl_val,"ESMF Offline Regridding Weight Generator")) nco_rgr_mpf_typ=nco_rgr_mpf_ESMF; else if(strstr(att_ttl_val,"netCDF Operators")) nco_rgr_mpf_typ=nco_rgr_mpf_NCO; else if(strstr(att_ttl_val,"MOAB-TempestRemap")) nco_rgr_mpf_typ=nco_rgr_mpf_MBTR; else if(strstr(att_ttl_val,"Tempest")) nco_rgr_mpf_typ=nco_rgr_mpf_Tempest; else if(strstr(att_ttl_val,"ESMF Regrid Weight Generator")) nco_rgr_mpf_typ=nco_rgr_mpf_ESMF_weight_only; } /* !att_ttl_val */ if(nco_rgr_mpf_typ == nco_rgr_mpf_nil && att_cnv_val){ if(strstr(att_cnv_val,"NCO")) nco_rgr_mpf_typ=nco_rgr_mpf_NCO; } /* !att_gnr_val */ if(nco_rgr_mpf_typ == nco_rgr_mpf_nil && att_gnr_val){ if(strstr(att_gnr_val,"NCO")) nco_rgr_mpf_typ=nco_rgr_mpf_NCO; } /* !att_gnr_val */ if(nco_rgr_mpf_typ == nco_rgr_mpf_nil){ (void)fprintf(stderr,"%s: WARNING %s unable to discern map-file type from global attributes \"[cC]onventions\" = \"%s\" and/or \"[tT]itle\" = \"%s\" and/or \"weight_generator\" = \"%s\"\n",nco_prg_nm_get(),fnc_nm,att_cnv_val ? att_cnv_val : "",att_ttl_val ? att_ttl_val : "",att_gnr_val ? att_gnr_val : ""); nco_rgr_mpf_typ=nco_rgr_mpf_unknown; } /* !nco_rgr_mpf_typ */ if(att_cnv_val) att_cnv_val=(char *)nco_free(att_cnv_val); if(att_gnr_val) att_gnr_val=(char *)nco_free(att_gnr_val); if(att_ttl_val) att_ttl_val=(char *)nco_free(att_ttl_val); switch(nco_rgr_mpf_typ){ case nco_rgr_mpf_SCRIP: rcd+=nco_inq_dimid(in_id,"src_grid_size",&src_grid_size_id); rcd+=nco_inq_dimid(in_id,"dst_grid_size",&dst_grid_size_id); rcd+=nco_inq_dimid(in_id,"src_grid_corners",&src_grid_corners_id); rcd+=nco_inq_dimid(in_id,"dst_grid_corners",&dst_grid_corners_id); rcd+=nco_inq_dimid(in_id,"src_grid_rank",&src_grid_rank_id); rcd+=nco_inq_dimid(in_id,"dst_grid_rank",&dst_grid_rank_id); rcd+=nco_inq_dimid(in_id,"num_links",&num_links_id); rcd+=nco_inq_dimid(in_id,"num_wgts",&num_wgts_id); break; case nco_rgr_mpf_ESMF_weight_only: rcd+=nco_inq_dimid(in_id,"n_s",&num_links_id); break; case nco_rgr_mpf_ESMF: case nco_rgr_mpf_MBTR: case nco_rgr_mpf_NCO: case nco_rgr_mpf_Tempest: case nco_rgr_mpf_unknown: rcd+=nco_inq_dimid(in_id,"n_a",&src_grid_size_id); rcd+=nco_inq_dimid(in_id,"n_b",&dst_grid_size_id); rcd+=nco_inq_dimid(in_id,"nv_a",&src_grid_corners_id); rcd+=nco_inq_dimid(in_id,"nv_b",&dst_grid_corners_id); rcd+=nco_inq_dimid(in_id,"src_grid_rank",&src_grid_rank_id); rcd+=nco_inq_dimid(in_id,"dst_grid_rank",&dst_grid_rank_id); if(nco_rgr_mpf_typ != nco_rgr_mpf_Tempest){ rcd+=nco_inq_dimid_flg(in_id,"num_wgts",&num_wgts_id); if(rcd != NC_NOERR){ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s reports map-file does not contain \"num_wgts\" dimension. ERWG always produces this as an orphan dimension, so post-processing could have removed it without harming other map-file fields. No harm, no foul.\n",nco_prg_nm_get(),fnc_nm); rcd=NC_NOERR; } /* !rcd */ } /* !nco_rgr_mpf_Tempest */ rcd+=nco_inq_dimid(in_id,"n_s",&num_links_id); break; default: (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unknown map-file type\n",nco_prg_nm_get(),fnc_nm); nco_dfl_case_generic_err(); /* NB: This return never executes because nco_dfl_case_generic_err() calls exit() Return placed here to suppress clang -Wsometimes-uninitialized warnings This is done many other times throughout the code, though explained only once, here */ return NCO_ERR; break; } /* end switch */ /* Use dimension IDs to get dimension sizes */ rcd+=nco_inq_dimlen(in_id,num_links_id,&mpf.num_links); if(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only){ rcd+=nco_inq_dimlen(in_id,src_grid_size_id,&mpf.src_grid_size); rcd+=nco_inq_dimlen(in_id,dst_grid_size_id,&mpf.dst_grid_size); rcd+=nco_inq_dimlen(in_id,src_grid_corners_id,&mpf.src_grid_corners); rcd+=nco_inq_dimlen(in_id,dst_grid_corners_id,&mpf.dst_grid_corners); rcd+=nco_inq_dimlen(in_id,src_grid_rank_id,&mpf.src_grid_rank); rcd+=nco_inq_dimlen(in_id,dst_grid_rank_id,&mpf.dst_grid_rank); /* TempestRemap does not generate num_wgts */ if(nco_rgr_mpf_typ == nco_rgr_mpf_MBTR || nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || num_wgts_id == NC_MIN_INT){ mpf.num_wgts=int_CEWI; }else{ rcd+=nco_inq_dimlen(in_id,num_wgts_id,&mpf.num_wgts); } /* !num_wgts_id */ assert(mpf.src_grid_size < INT_MAX && mpf.dst_grid_size < INT_MAX); }else{ mpf.src_grid_size=long_CEWI; mpf.dst_grid_size=long_CEWI; mpf.src_grid_corners=long_CEWI; mpf.dst_grid_corners=long_CEWI; mpf.src_grid_rank=long_CEWI; mpf.dst_grid_rank=long_CEWI; mpf.num_wgts=int_CEWI; } /* !ESMF_weight_only */ cnv_sng=strdup("normalization"); nco_rgr_nrm_typ_enm nco_rgr_nrm_typ=nco_rgr_nrm_nil; att_val=nco_char_att_get(in_id,NC_GLOBAL,cnv_sng); if(att_val){ if(strstr(att_val,"fracarea")) nco_rgr_nrm_typ=nco_rgr_nrm_fracarea; /* 20190912: map_gx1v6T_to_1x1_bilin.nc and map_0.1T_tripole_to_0.1x0.1_bilin.nc store "fracarea" in normalization attribute. I think NCAR created both maps for POP, probably by running ERWG with option --norm_type=fracarea. Hence "fracarea" seems to be the NCAR-way of guaranteeing that ESMF re-normalization is not performed by default. */ if(strstr(att_val,"destarea")) nco_rgr_nrm_typ=nco_rgr_nrm_destarea; /* ESMF conserve "aave" and bilinear "bilin" generate "destarea" by default */ if(strstr(att_val,"none")) nco_rgr_nrm_typ=nco_rgr_nrm_none; if(att_val) att_val=(char *)nco_free(att_val); }else{ /* 20150712: Tempest does not store a normalization attribute 20170620: ESMF weight_only does not store a normalization attribute 20190312: NCO does not yet store a normalization attribute */ if(nco_rgr_mpf_typ == nco_rgr_mpf_MBTR || nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || nco_rgr_mpf_typ == nco_rgr_mpf_NCO || nco_rgr_mpf_typ == nco_rgr_mpf_unknown || nco_rgr_mpf_typ == nco_rgr_mpf_ESMF_weight_only) nco_rgr_nrm_typ=nco_rgr_nrm_unknown; } /* endif normalization */ assert(nco_rgr_nrm_typ != nco_rgr_nrm_nil); if(cnv_sng) cnv_sng=(char *)nco_free(cnv_sng); cnv_sng=strdup("map_method"); nco_rgr_mth_typ_enm nco_rgr_mth_typ=nco_rgr_mth_nil; att_val=nco_char_att_get(in_id,NC_GLOBAL,cnv_sng); if(att_val){ if(strcasestr(att_val,"Conservative")) nco_rgr_mth_typ=nco_rgr_mth_conservative; if(strcasestr(att_val,"Bilinear")) nco_rgr_mth_typ=nco_rgr_mth_bilinear; if(strcasestr(att_val,"none")) nco_rgr_mth_typ=nco_rgr_mth_none; if(att_val) att_val=(char *)nco_free(att_val); }else{ /* Tempest does not store a map_method attribute */ if(nco_rgr_mpf_typ == nco_rgr_mpf_MBTR || nco_rgr_mpf_typ == nco_rgr_mpf_NCO || nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || nco_rgr_mpf_typ == nco_rgr_mpf_unknown) nco_rgr_mth_typ=nco_rgr_mth_unknown; } /* endif */ if(nco_rgr_mth_typ == nco_rgr_mth_nil) (void)fprintf(stdout,"%s: WARNING %s reports map global attribute %s = %s does not match SCRIP/ESMF conventions that support only values of \"Conservative\" and \"Bilinear\" for this attribute. Proceeding anyway...\n",nco_prg_nm_get(),fnc_nm,cnv_sng,att_val); if(cnv_sng) cnv_sng=(char *)nco_free(cnv_sng); if(nco_dbg_lvl_get() >= nco_dbg_scl){ (void)fprintf(stderr,"%s: INFO %s regridding input metadata and grid sizes: ",nco_prg_nm_get(),fnc_nm); (void)fprintf(stderr,"mapfile_generator = %s, map_method = %s, normalization = %s, src_grid_size = n_a = %li, dst_grid_size = n_b = %li, src_grid_corners = nv_a = %li, dst_grid_corners = nv_b = %li, src_grid_rank = %li, dst_grid_rank = %li, num_links = n_s = %li, num_wgts = %li\n",nco_rgr_mpf_sng(nco_rgr_mpf_typ),nco_rgr_mth_sng(nco_rgr_mth_typ),nco_rgr_nrm_sng(nco_rgr_nrm_typ),mpf.src_grid_size,mpf.dst_grid_size,mpf.src_grid_corners,mpf.dst_grid_corners,mpf.src_grid_rank,mpf.dst_grid_rank,mpf.num_links,mpf.num_wgts); } /* endif dbg */ /* 20190726: Allow normalization type to be "none" for bilinear regridding which UKMO SCRIP files set to "none"*/ if(nco_rgr_mth_typ == nco_rgr_mth_conservative && nco_rgr_nrm_typ == nco_rgr_nrm_none){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports requested normalization type = %s is not yet supported. Specifically, masks specified by a mask variable (dst_grid_imask,mask_b) are ignored. More specifically, any destination mask information is assumed to be built into the weight array so that no source points will contribute to masked locations. Talk to Charlie if you want this changed.\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ)); nco_exit(EXIT_FAILURE); } /* !msk */ /* Got to here in bullet-proofing code for weight-only map-files */ if(nco_rgr_mpf_typ == nco_rgr_mpf_ESMF_weight_only) (void)fprintf(stderr,"%s: WARNING %s reached end of ESMF_weight_only section\n",nco_prg_nm_get(),fnc_nm); assert(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only); /* Set type of grid conversion */ if(mpf.src_grid_rank == 1 && mpf.dst_grid_rank == 1) nco_rgr_typ=nco_rgr_grd_1D_to_1D; if(mpf.src_grid_rank == 1 && mpf.dst_grid_rank == 2) nco_rgr_typ=nco_rgr_grd_1D_to_2D; if(mpf.src_grid_rank == 2 && mpf.dst_grid_rank == 1) nco_rgr_typ=nco_rgr_grd_2D_to_1D; if(mpf.src_grid_rank == 2 && mpf.dst_grid_rank == 2) nco_rgr_typ=nco_rgr_grd_2D_to_2D; assert(nco_rgr_typ != nco_rgr_grd_nil); /* Save typing later */ nco_bool flg_grd_in_1D_dat_in_2D=False; nco_bool flg_grd_in_1D=False; nco_bool flg_grd_in_2D=False; nco_bool flg_grd_out_1D=False; nco_bool flg_grd_out_2D=False; if(nco_rgr_typ == nco_rgr_grd_1D_to_1D || nco_rgr_typ == nco_rgr_grd_1D_to_2D) flg_grd_in_1D=True; if(nco_rgr_typ == nco_rgr_grd_2D_to_1D || nco_rgr_typ == nco_rgr_grd_2D_to_2D) flg_grd_in_2D=True; if(nco_rgr_typ == nco_rgr_grd_1D_to_1D || nco_rgr_typ == nco_rgr_grd_2D_to_1D) flg_grd_out_1D=True; if(nco_rgr_typ == nco_rgr_grd_1D_to_2D || nco_rgr_typ == nco_rgr_grd_2D_to_2D) flg_grd_out_2D=True; int dmn_nbr_hrz_crd; /* [nbr] Number of horizontal dimensions in output grid */ if(flg_grd_out_2D) dmn_nbr_hrz_crd=2; else dmn_nbr_hrz_crd=1; /* Obtain grid values necessary to compute output latitude and longitude coordinates */ int area_dst_id; /* [id] Area variable ID */ int col_src_adr_id; /* [id] Source address (col) variable ID */ int dmn_sz_in_int_id; /* [id] Source grid dimension sizes ID */ int dmn_sz_out_int_id; /* [id] Destination grid dimension sizes ID */ int dst_grd_crn_lat_id; /* [id] Destination grid corner latitudes variable ID */ int dst_grd_crn_lon_id; /* [id] Destination grid corner longitudes variable ID */ int dst_grd_ctr_lat_id; /* [id] Destination grid center latitudes variable ID */ int dst_grd_ctr_lon_id; /* [id] Destination grid center longitudes variable ID */ int frc_dst_id; /* [id] Fraction variable ID */ int msk_dst_id=NC_MIN_INT; /* [id] Mask variable ID */ int row_dst_adr_id; /* [id] Destination address (row) variable ID */ int wgt_raw_id; /* [id] Remap matrix variable ID */ switch(nco_rgr_mpf_typ){ /* Obtain fields whose name depends on mapfile type */ case nco_rgr_mpf_SCRIP: rcd+=nco_inq_varid(in_id,"dst_grid_area",&area_dst_id); /* ESMF: area_b */ rcd+=nco_inq_varid(in_id,"dst_grid_center_lon",&dst_grd_ctr_lon_id); /* ESMF: xc_b */ rcd+=nco_inq_varid(in_id,"dst_grid_center_lat",&dst_grd_ctr_lat_id); /* ESMF: yc_b */ rcd+=nco_inq_varid(in_id,"dst_grid_corner_lon",&dst_grd_crn_lon_id); /* ESMF: xv_b */ rcd+=nco_inq_varid(in_id,"dst_grid_corner_lat",&dst_grd_crn_lat_id); /* ESMF: yv_b */ rcd+=nco_inq_varid(in_id,"dst_grid_frac",&frc_dst_id); /* ESMF: frac_b */ rcd+=nco_inq_varid(in_id,"dst_address",&row_dst_adr_id); /* ESMF: row */ rcd+=nco_inq_varid(in_id,"src_address",&col_src_adr_id); /* ESMF: col */ rcd+=nco_inq_varid(in_id,"remap_matrix",&wgt_raw_id); /* NB: remap_matrix[num_links,num_wgts] != S[n_s] */ break; case nco_rgr_mpf_ESMF: case nco_rgr_mpf_ESMF_weight_only: case nco_rgr_mpf_MBTR: case nco_rgr_mpf_NCO: case nco_rgr_mpf_Tempest: case nco_rgr_mpf_unknown: if(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only){ rcd+=nco_inq_varid(in_id,"area_b",&area_dst_id); /* SCRIP: dst_grid_area */ rcd+=nco_inq_varid(in_id,"xc_b",&dst_grd_ctr_lon_id); /* SCRIP: dst_grid_center_lon */ rcd+=nco_inq_varid(in_id,"yc_b",&dst_grd_ctr_lat_id); /* SCRIP: dst_grid_center_lat */ rcd+=nco_inq_varid(in_id,"xv_b",&dst_grd_crn_lon_id); /* SCRIP: dst_grid_corner_lon */ rcd+=nco_inq_varid(in_id,"yv_b",&dst_grd_crn_lat_id); /* SCRIP: dst_grid_corner_lat */ rcd+=nco_inq_varid(in_id,"frac_b",&frc_dst_id); /* SCRIP: dst_grid_frac */ } /* !nco_rgr_mpf_ESMF_weight_only */ rcd+=nco_inq_varid(in_id,"row",&row_dst_adr_id); /* SCRIP: dst_address */ rcd+=nco_inq_varid(in_id,"col",&col_src_adr_id); /* SCRIP: src_address */ rcd+=nco_inq_varid(in_id,"S",&wgt_raw_id); /* NB: remap_matrix[num_links,num_wgts] != S[n_s] */ break; default: (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unknown map file type\n",nco_prg_nm_get(),fnc_nm); nco_dfl_case_generic_err(); /* NB: This return never executes because nco_dfl_case_generic_err() calls exit() Return placed here to suppress clang -Wsometimes-uninitialized warnings This is done many other times throughout the code, though explained only once, here */ return NCO_ERR; break; } /* end switch */ /* Obtain fields whose presence depends on mapfile type */ nco_bool flg_msk_out=rgr->flg_msk_out; /* [flg] Add mask to output */ nco_bool flg_msk_apl=rgr->flg_msk_apl; /* [flg] Apply msk_out to variables after regridding */ msk_dst_id=NC_MIN_INT; if(flg_msk_out || flg_msk_apl){ switch(nco_rgr_mpf_typ){ case nco_rgr_mpf_SCRIP: rcd=nco_inq_varid_flg(in_id,"dst_grid_imask",&msk_dst_id); /* ESMF: mask_b */ break; case nco_rgr_mpf_ESMF: case nco_rgr_mpf_MBTR: case nco_rgr_mpf_NCO: case nco_rgr_mpf_Tempest: case nco_rgr_mpf_unknown: /* 20190315: TempestRemap did not propagate mask_a/b until ~201902 20210519: MBTR did not propagate mask_a/b as of ~202105 */ rcd=nco_inq_varid_flg(in_id,"mask_b",&msk_dst_id); /* SCRIP: dst_grid_imask */ break; default: (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unknown map-file type\n",nco_prg_nm_get(),fnc_nm); nco_dfl_case_generic_err(); } /* !nco_rgr_mpf_typ */ if(rcd == NC_ENOTVAR){ if(flg_msk_apl){ (void)fprintf(stderr,"%s: ERROR %s reports that user requested (with --mask_apply) the regridder to apply the destination mask field to variables after regridding. Unfortunately, the map-file lacks a destination mask of the expected name (usually \"mask_b\").\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* flg_msk_apl */ (void)fprintf(stderr,"%s: INFO %s reports map-file lacks mask_b. %sContinuing anyway without masks...\n",nco_prg_nm_get(),fnc_nm,(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || nco_rgr_mpf_typ == nco_rgr_mpf_MBTR) ? "Probably this is either a TempestRemap map-file created before ~201902 when TR began to propagate mask_a/b variables, or it is a MOAB-TempestRemap file which has never (as of 202105) propagated mask_a/b variables" : ""); rcd=NC_NOERR; } /* !rcd */ if(msk_dst_id == NC_MIN_INT) flg_msk_out=False; } /* !flg_msk_out */ /* Obtain fields whose names are independent of mapfile type */ rcd+=nco_inq_varid(in_id,"src_grid_dims",&dmn_sz_in_int_id); rcd+=nco_inq_varid(in_id,"dst_grid_dims",&dmn_sz_out_int_id); int lon_psn_src; /* [idx] Ordinal position of longitude in rectangular source grid dimension-size array */ int lat_psn_src; /* [idx] Ordinal position of latitude in rectangular source grid dimension-size array */ int lon_psn_dst=int_CEWI; /* [idx] Ordinal position of longitude in rectangular destination grid dimension-size array */ int lat_psn_dst=int_CEWI; /* [idx] Ordinal position of latitude in rectangular destination grid dimension-size array */ if(flg_grd_in_2D){ lon_psn_src=0; /* SCRIP introduced [lon,lat] convention because more natural for Fortran */ lat_psn_src=1; if(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest){ /* Until 20150814, Tempest stored [src/dst]_grid_dims as [lat,lon] unlike SCRIP's [lon,lat] order Newer behavior follows SCRIP [lon,lat] order Challenge: Support both older and newer Tempest mapfiles Tempest (unlike SCRIP and ESMF) annotates mapfile [src/dst]_grid_dims with attributes that identify axis to which each element of [src/dst]_grid_dims refers Solution: Use Tempest mapfile [src/dst]_grid_dims attributes "name0" and/or "name1" to determine if axes' positions follow old order */ att_val=nco_char_att_get(in_id,dmn_sz_in_int_id,name0_sng); if(att_val){ if(strstr(att_val,"lat")){ lon_psn_src=1; lat_psn_src=0; } /* !lat */ if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ } /* !Tempest */ } /* !flg_grd_in_2D */ if(flg_grd_out_2D){ lon_psn_dst=0; lat_psn_dst=1; if(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest){ att_val=nco_char_att_get(in_id,dmn_sz_in_int_id,name0_sng); if(att_val){ if(strstr(att_val,"lat")){ lon_psn_dst=1; lat_psn_dst=0; } /* !lat */ if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ } /* !Tempest */ } /* !flg_grd_out_2D */ const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */ const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */ double *area_out; /* [sr] Area of destination grid */ double *frc_out=NULL; /* [frc] Fraction of destination grid */ double *lat_bnd_out=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular destination grid */ double *lat_crn_out=NULL; /* [dgr] Latitude corners of rectangular destination grid */ double *lat_ctr_out=NULL_CEWI; /* [dgr] Latitude centers of rectangular destination grid */ double *lat_ntf_out=NULL; /* [dgr] Latitude interfaces of rectangular destination grid */ double *lat_wgt_out=NULL; /* [dgr] Latitude weights of rectangular destination grid */ double *lon_bnd_out=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular destination grid */ double *lon_crn_out=NULL; /* [dgr] Longitude corners of rectangular destination grid */ double *lon_ctr_out=NULL_CEWI; /* [dgr] Longitude centers of rectangular destination grid */ double *lon_ntf_out=NULL; /* [dgr] Longitude interfaces of rectangular destination grid */ double *slat_ctr_out=NULL_CEWI; /* [dgr] Latitude centers of staggered FV destination grid */ double *slat_wgt_out=NULL_CEWI; /* [frc] Latitude weights of staggered FV destination grid */ double *slon_ctr_out=NULL_CEWI; /* [dgr] Longitude centers of staggered FV destination grid */ double *wgt_raw; /* [frc] Remapping weights */ int *col_src_adr; /* [idx] Source address (col) */ int *row_dst_adr; /* [idx] Destination address (row) */ int *msk_out=NULL; /* [flg] Mask on destination grid */ int *dmn_sz_in_int; /* [nbr] Array of dimension sizes of source grid */ int *dmn_sz_out_int; /* [nbr] Array of dimension sizes of destination grid */ long *dmn_cnt_in=NULL; long *dmn_cnt_out=NULL; long *dmn_cnt=NULL; long *dmn_srt=NULL; long *dmn_srd=NULL; long idx; /* [idx] Counting index for unrolled grids */ /* Allocate space to hold dimension metadata for destination grid */ dmn_srt=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long)); dmn_cnt=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long)); dmn_srd=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long)); dmn_srt[0]=0L; dmn_cnt[0]=mpf.src_grid_rank; dmn_sz_in_int=(int *)nco_malloc(mpf.src_grid_rank*nco_typ_lng((nc_type)NC_INT)); rcd=nco_get_vara(in_id,dmn_sz_in_int_id,dmn_srt,dmn_cnt,dmn_sz_in_int,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=mpf.dst_grid_rank; dmn_sz_out_int=(int *)nco_malloc(mpf.dst_grid_rank*nco_typ_lng((nc_type)NC_INT)); rcd=nco_get_vara(in_id,dmn_sz_out_int_id,dmn_srt,dmn_cnt,dmn_sz_out_int,(nc_type)NC_INT); /* Check-for and workaround faulty Tempest and MPAS-O/I grid sizes */ if(flg_grd_in_1D && (mpf.src_grid_size != dmn_sz_in_int[0])){ (void)fprintf(stdout,"%s: INFO %s reports input grid dimension sizes disagree: mpf.src_grid_size = %ld != %d = dmn_sz_in[0]. Problem may be caused by incorrect src_grid_dims variable. This is a known issue with some TempestRemap mapfiles generated prior to ~20150901, and in some ESMF mapfiles for MPAS-O/I. This problem can be safely ignored if workaround succeeds. Attempting workaround ...\n",nco_prg_nm_get(),fnc_nm,mpf.src_grid_size,dmn_sz_in_int[0]); dmn_sz_in_int[0]=mpf.src_grid_size; } /* !bug */ if(flg_grd_out_1D && (mpf.dst_grid_size != dmn_sz_out_int[0])){ (void)fprintf(stdout,"%s: INFO %s reports output grid dimension sizes disagree: mpf.dst_grid_size = %ld != %d = dmn_sz_out[0]. Problem may be caused by incorrect dst_grid_dims variable. This is a known issue with some TempestRemap mapfiles generated prior to ~20150901, and in some ESMF mapfiles for MPAS-O/I. This problem can be safely ignored if workaround succeeds. Attempting workaround ...\n",nco_prg_nm_get(),fnc_nm,mpf.dst_grid_size,dmn_sz_out_int[0]); dmn_sz_out_int[0]=mpf.dst_grid_size; } /* !bug */ long col_nbr_in; /* [idx] Number of columns in source grid */ long lon_nbr_in; /* [idx] Number of longitudes in rectangular source grid */ long lat_nbr_in; /* [idx] Number of latitudes in rectangular source grid */ const size_t grd_sz_in=mpf.src_grid_size; /* [nbr] Number of elements in single layer of input grid */ const size_t grd_sz_out=mpf.dst_grid_size; /* [nbr] Number of elements in single layer of output grid */ if(flg_grd_in_1D){ col_nbr_in=dmn_sz_in_int[0]; lon_nbr_in=dmn_sz_in_int[0]; lat_nbr_in=dmn_sz_in_int[0]; }else if(flg_grd_in_2D){ col_nbr_in=0; lon_nbr_in=dmn_sz_in_int[lon_psn_src]; lat_nbr_in=dmn_sz_in_int[lat_psn_src]; /* Sanity-check */ assert(lat_nbr_in*lon_nbr_in == (long)grd_sz_in); } /* !src_grid_rank */ const int bnd_tm_nbr_out=2; /* [nbr] Number of boundaries for output time */ int bnd_nbr_out=int_CEWI; /* [nbr] Number of boundaries for output time and rectangular grid coordinates, and number of vertices for output non-rectangular grid coordinates */ long col_nbr_out=long_CEWI; /* [nbr] Number of columns in destination grid */ long lon_nbr_out=long_CEWI; /* [nbr] Number of longitudes in rectangular destination grid */ long lat_nbr_out=long_CEWI; /* [nbr] Number of latitudes in rectangular destination grid */ long slat_nbr_out=long_CEWI; /* [nbr] Number of latitudes in staggered FV grid destination grid */ long slon_nbr_out=long_CEWI; /* [nbr] Number of longitudes in staggered FV grid destination grid */ if(flg_grd_out_1D){ bnd_nbr_out=mpf.dst_grid_corners; col_nbr_out=dmn_sz_out_int[0]; lat_nbr_out=dmn_sz_out_int[0]; lon_nbr_out=dmn_sz_out_int[0]; /* Sanity-check */ assert(col_nbr_out == (long)grd_sz_out); }else if(flg_grd_out_2D){ col_nbr_out=lat_nbr_out*lon_nbr_out; lat_nbr_out=dmn_sz_out_int[lat_psn_dst]; lon_nbr_out=dmn_sz_out_int[lon_psn_dst]; slat_nbr_out=lat_nbr_out-1L; slon_nbr_out=lon_nbr_out; /* Sanity-check */ assert(lat_nbr_out*lon_nbr_out == (long)grd_sz_out); } /* !dst_grid_rank */ /* Ensure coordinates are in degrees not radians for simplicity and CF-compliance NB: ${DATA}/scrip/rmp_T42_to_POP43_conserv.nc has [xy]?_a in degrees and [xy]?_b in radians! */ nco_bool flg_crd_rdn=False; /* [flg] Destination coordinates are in radians not degrees */ char unt_sng[]="units"; /* [sng] netCDF-standard units attribute name */ att_val=nco_char_att_get(in_id,dst_grd_ctr_lat_id,unt_sng); if(att_val){ /* Match "radian" and "radians" */ if(strstr(att_val,"radian")) flg_crd_rdn=True; if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ nco_bool flg_grd_out_crv=False; /* [flg] Curvilinear coordinates */ nco_bool flg_grd_out_rct=False; /* [flg] Rectangular coordinates */ const nc_type crd_typ_out=NC_DOUBLE; if(flg_grd_out_2D){ lon_ctr_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); lat_ctr_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); lon_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*grd_sz_out*nco_typ_lng(crd_typ_out)); lat_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*grd_sz_out*nco_typ_lng(crd_typ_out)); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out); rcd=nco_get_vara(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,lat_ctr_out,crd_typ_out); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_out; dmn_cnt[1]=mpf.dst_grid_corners; rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_crn_out,crd_typ_out); rcd=nco_get_vara(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,lat_crn_out,crd_typ_out); /* User may specify curvilinear grid (with --rgr crv). Otherwise, manually test for curvilinear source grid. */ flg_grd_out_crv=rgr->flg_crv; /* [flg] Curvilinear coordinates */ if(flg_grd_out_crv){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Output grid specified to be %s\n",nco_prg_nm_get(),flg_grd_out_crv ? "Curvilinear" : "Rectangular"); }else{ long idx_tst=long_CEWI; /* [idx] Index of first latitude or longitude */ for(idx=0;idx<(long)grd_sz_out;idx++){ if(idx%lon_nbr_out == 0) idx_tst=idx; if(lat_ctr_out[idx] != lat_ctr_out[idx_tst]) break; // (void)fprintf(stdout,"%s: DEBUG lat_ctr_out[%li] = %g, lat_ctr_out[%li] = %g\n",nco_prg_nm_get(),idx,lat_ctr_out[idx],idx_tst,lat_ctr_out[idx_tst]); /* fxm: also test lon */ } /* !rectangular */ if(idx != (long)grd_sz_out) flg_grd_out_crv=True; else flg_grd_out_rct=True; if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO Output grid detected to be %s\n",nco_prg_nm_get(),flg_grd_out_crv ? "Curvilinear" : "Rectangular"); } /* !flg_grd_out_crv */ if(flg_grd_out_crv) bnd_nbr_out=mpf.dst_grid_corners; if(flg_grd_out_rct) bnd_nbr_out=2; /* NB: Assumes rectangular latitude and longitude and is invalid for other quadrilaterals */ } /* !flg_grd_out_2D */ if(nco_dbg_lvl_get() >= nco_dbg_scl){ (void)fprintf(stderr,"%s: INFO %s grid conversion type = %s with expected input and prescribed output grid sizes: ",nco_prg_nm_get(),fnc_nm,nco_rgr_grd_sng(nco_rgr_typ)); (void)fprintf(stderr,"lat_in = %li, lon_in = %li, col_in = %li, lat_out = %li, lon_out = %li, col_out = %li\n",lat_nbr_in,lon_nbr_in,col_nbr_in,lat_nbr_out,lon_nbr_out,col_nbr_out); } /* endif dbg */ /* Allocate space for and obtain coordinates */ if(flg_grd_out_1D){ lon_ctr_out=(double *)nco_malloc(col_nbr_out*nco_typ_lng(crd_typ_out)); lat_ctr_out=(double *)nco_malloc(col_nbr_out*nco_typ_lng(crd_typ_out)); lon_bnd_out=(double *)nco_malloc(col_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); lat_bnd_out=(double *)nco_malloc(col_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); } /* !flg_grd_out_1D */ if(flg_grd_out_rct){ if(lat_ctr_out) lat_ctr_out=(double *)nco_free(lat_ctr_out); if(lon_ctr_out) lon_ctr_out=(double *)nco_free(lon_ctr_out); if(lat_crn_out) lat_crn_out=(double *)nco_free(lat_crn_out); if(lon_crn_out) lon_crn_out=(double *)nco_free(lon_crn_out); lon_ctr_out=(double *)nco_malloc(lon_nbr_out*nco_typ_lng(crd_typ_out)); lat_ctr_out=(double *)nco_malloc(lat_nbr_out*nco_typ_lng(crd_typ_out)); lon_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*lon_nbr_out*nco_typ_lng(crd_typ_out)); lat_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*lat_nbr_out*nco_typ_lng(crd_typ_out)); lat_wgt_out=(double *)nco_malloc(lat_nbr_out*nco_typ_lng(crd_typ_out)); lon_ntf_out=(double *)nco_malloc((lon_nbr_out+1L)*nco_typ_lng(crd_typ_out)); lat_ntf_out=(double *)nco_malloc((lat_nbr_out+1L)*nco_typ_lng(crd_typ_out)); lon_bnd_out=(double *)nco_malloc(lon_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); lat_bnd_out=(double *)nco_malloc(lat_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); } /* !flg_grd_out_rct */ /* Arrays unroll into all longitudes for first latitude, then second latitude, ... Obtain longitudes by reading first block contiguously (unstrided) Obtain latitudes by reading unrolled data with stride of lon_nbr */ if(flg_grd_out_1D){ dmn_srt[0]=0L; dmn_cnt[0]=col_nbr_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out); dmn_srt[0]=0L; dmn_cnt[0]=col_nbr_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,lat_ctr_out,crd_typ_out); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr_out; dmn_cnt[1]=bnd_nbr_out; rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_bnd_out,crd_typ_out); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr_out; dmn_cnt[1]=bnd_nbr_out; rcd=nco_get_vara(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,lat_bnd_out,crd_typ_out); if(flg_crd_rdn){ for(idx=0;idx<col_nbr_out;idx++){ lon_ctr_out[idx]*=rdn2dgr; lat_ctr_out[idx]*=rdn2dgr; } /* !idx */ for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++){ lon_bnd_out[idx]*=rdn2dgr; lat_bnd_out[idx]*=rdn2dgr; } /* !idx */ } /* !rdn */ /* Is 1D interface information usable? Yes, unless if all interfaces are zeros NB: fxm Better algorithm for "usable" is that not all interfaces in any cell are equal */ flg_bnd_1D_usable=True; for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++) if(lon_bnd_out[idx] != 0.0) break; if(idx == col_nbr_out*bnd_nbr_out){ flg_bnd_1D_usable=False; }else{ for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++) if(lat_bnd_out[idx] != 0.0) break; if(idx == col_nbr_out*bnd_nbr_out) flg_bnd_1D_usable=False; } /* !usable */ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0;idx<lat_nbr_out;idx++){ (void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr_out[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr_out;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd_out[bnd_nbr_out*idx+bnd_idx],bnd_idx == bnd_nbr_out-1 ? "]\n" : ", "); } /* end loop over lat */ for(idx=0;idx<lon_nbr_out;idx++){ (void)fprintf(stdout,"lon[%li] = %g, vertices = ",idx,lon_ctr_out[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr_out;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lon_bnd_out[bnd_nbr_out*idx+bnd_idx],bnd_idx == bnd_nbr_out-1 ? "]\n" : ", "); } /* end loop over lon */ } /* endif dbg */ } /* !flg_grd_out_1D */ if(flg_grd_out_rct){ /* fxm: sub-sample these from the already-read ctr/crn arrays */ dmn_srt[0L]=0L; dmn_cnt[0L]=lon_nbr_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out); dmn_srt[0L]=0L; dmn_cnt[0L]=lat_nbr_out; dmn_srd[0L]=lon_nbr_out; rcd=nco_get_vars(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,dmn_srd,lat_ctr_out,crd_typ_out); dmn_srt[0L]=dmn_srt[1]=0L; dmn_cnt[0L]=lon_nbr_out; dmn_cnt[1]=mpf.dst_grid_corners; rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_crn_out,crd_typ_out); dmn_srt[0L]=0L; dmn_cnt[0L]=lat_nbr_out; dmn_srd[0L]=lon_nbr_out; dmn_srt[1]=0L; dmn_cnt[1]=mpf.dst_grid_corners; dmn_srd[1]=1L; rcd=nco_get_vars(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,dmn_srd,lat_crn_out,crd_typ_out); if(flg_crd_rdn){ for(idx=0L;idx<lon_nbr_out;idx++) lon_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<lat_nbr_out;idx++) lat_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<lon_nbr_out*mpf.dst_grid_corners;idx++) lon_crn_out[idx]*=rdn2dgr; for(idx=0L;idx<lat_nbr_out*mpf.dst_grid_corners;idx++) lat_crn_out[idx]*=rdn2dgr; } /* !rdn */ } /* !flg_grd_out_rct */ if(flg_grd_out_crv){ if(flg_crd_rdn){ for(idx=0L;idx<(long)grd_sz_out;idx++) lon_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<(long)grd_sz_out;idx++) lat_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<(long)grd_sz_out*mpf.dst_grid_corners;idx++) lon_crn_out[idx]*=rdn2dgr; for(idx=0L;idx<(long)grd_sz_out*mpf.dst_grid_corners;idx++) lat_crn_out[idx]*=rdn2dgr; } /* !rdn */ } /* !flg_grd_out_crv */ /* Allocate space for and obtain area, fraction, and mask, which are needed for both 1D and 2D grids */ area_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); dmn_srt[0L]=0L; dmn_cnt[0L]=grd_sz_out; rcd=nco_get_vara(in_id,area_dst_id,dmn_srt,dmn_cnt,area_out,crd_typ_out); frc_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); dmn_srt[0L]=0L; dmn_cnt[0L]=grd_sz_out; rcd=nco_get_vara(in_id,frc_dst_id,dmn_srt,dmn_cnt,frc_out,crd_typ_out); if(msk_dst_id != NC_MIN_INT){ msk_out=(int *)nco_malloc(grd_sz_out*nco_typ_lng(NC_INT)); dmn_srt[0L]=0L; dmn_cnt[0L]=grd_sz_out; rcd=nco_get_vara(in_id,msk_dst_id,dmn_srt,dmn_cnt,msk_out,(nc_type)NC_INT); } /* !msk */ /* Derive 2D interface boundaries from lat and lon grid-center values NB: Procedures to derive interfaces from midpoints on rectangular grids are theoretically possible However, ESMF often outputs interfaces values (e.g., yv_b) for midpoint coordinates (e.g., yc_b) For example, ACME standard map from ne120np4 to 181x360 has yc_b[0] = yv_b[0] = -90.0 Latitude = -90 is, by definition, not a midpoint coordinate This appears to be an artifact of the non-physical representation of the FV grid, i.e., a grid center located at the pole where longitudes collapse in the model, but cannot be represented as collapsed on a rectangular 2D grid with non-zero areas. Unfortunately, ESMF supports this nonsense by labeling the grid center as at the pole so that applications can easily diagnose an FV grid when they read-in datasets. A superior application could diagnose FV just fine from actual non-polar gridcell centers Maybe ESMF could introduce a flag or something to indicate/avoid this special case? Safer to read boundary interfaces directly from grid corner/vertice arrays in map file Derivation of boundaries xv_b, yv_b from _correct_ xc_b, yc_b is follows Do not implement this procedure until resolving midpoint/center issue described above: lon_ntf_out[0L]=0.5*(lon_ctr_out[0L]+lon_ctr_out[lon_nbr_out-1L])-180.0; // Extrapolation lat_ntf_out[0L]=lat_ctr_out[0L]-0.5*(lat_ctr_out[1L]-lat_ctr_out[0L]); // Extrapolation for(idx=1L;idx<lon_nbr_out;idx++) lon_ntf_out[idx]=0.5*(lon_ctr_out[idx-1L]+lon_ctr_out[idx]); for(idx=1L;idx<lat_nbr_out;idx++) lat_ntf_out[idx]=0.5*(lat_ctr_out[idx-1L]+lat_ctr_out[idx]); lon_ntf_out[lon_nbr_out]=lon_ntf_out[0L]+360.0; lat_ntf_out[lat_nbr_out]=lat_ctr_out[lat_nbr_out-1L]+0.5*(lat_ctr_out[lat_nbr_out-1L]-lat_ctr_out[lat_nbr_out-2L]); */ if(flg_grd_out_rct){ double lon_spn; /* [dgr] Longitude span */ double lat_spn; /* [dgr] Latitude span */ nco_bool flg_s2n=True; /* I [enm] Latitude grid-direction is South-to-North */ if(lat_ctr_out[1L] < lat_ctr_out[0L]) flg_s2n=False; /* Obtain 1-D rectangular interfaces from unrolled 1-D vertice arrays */ for(idx=0L;idx<lon_nbr_out;idx++) lon_ntf_out[idx]=lon_crn_out[mpf.dst_grid_corners*idx]; /* 20201009 The four possible CCW RLL orderings start with the ul, ll, lr, or ur vertice NCO grid generators store vertices in order (0,1,2,3)=(ul,ll,lr,ur) NCO final latitude is in upper vertices (0,3) for S2N grids, lower vertices (1,2) for N2S grids NCO final longitude is in RHS vertices (2,3) for S2N and N2S grids Need generic algorithm to pick easternmost longitude for any of the four CCW orderings What is ESMF vertice ordering? or does ESMF always copy from input grid? Most grid generators probably start with ul or ll so vertice 2 is good choice for easternmost */ // lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-(mpf.dst_grid_corners-1L)]; // ESMF? lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-2L]; // NCO lr if(lon_ntf_out[lon_nbr_out-1] == lon_ntf_out[lon_nbr_out]) lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-1L]; // NCO ur if(lon_ntf_out[lon_nbr_out-1] == lon_ntf_out[lon_nbr_out]) lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-3L]; // NCO ll assert(lon_ntf_out[lon_nbr_out-1] != lon_ntf_out[lon_nbr_out]); lon_spn=lon_ntf_out[lon_nbr_out]-lon_ntf_out[0L]; for(idx=0L;idx<lat_nbr_out;idx++) lat_ntf_out[idx]=lat_crn_out[mpf.dst_grid_corners*idx]; if(flg_s2n) lat_ntf_out[lat_nbr_out]=max_dbl(lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-1L],lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-2L]); else lat_ntf_out[lat_nbr_out]=min_dbl(lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-1L],lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-2L]); assert(lat_ntf_out[lat_nbr_out] != lat_ntf_out[lat_nbr_out-1]); lat_spn=fabs(lat_ntf_out[lat_nbr_out]-lat_ntf_out[0L]); /* Place 1-D rectangular interfaces into 2-D coordinate boundaries */ for(idx=0L;idx<lon_nbr_out;idx++){ lon_bnd_out[2L*idx]=lon_ntf_out[idx]; lon_bnd_out[2L*idx+1L]=lon_ntf_out[idx+1L]; } /* !lon_nbr_out */ for(idx=0L;idx<lat_nbr_out;idx++){ lat_bnd_out[2L*idx]=lat_ntf_out[idx]; lat_bnd_out[2L*idx+1L]=lat_ntf_out[idx+1L]; } /* !lat_nbr_out */ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0L;idx<lon_nbr_out;idx++) (void)fprintf(stdout,"lon[%li] = [%g, %g, %g]\n",idx,lon_bnd_out[2L*idx],lon_ctr_out[idx],lon_bnd_out[2L*idx+1L]); for(idx=0L;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li] = [%g, %g, %g]\n",idx,lat_bnd_out[2L*idx],lat_ctr_out[idx],lat_bnd_out[2L*idx+1L]); } /* endif dbg */ /* Global or regional grid? */ nco_grd_xtn_enm nco_grd_xtn; /* [enm] Extent of grid */ if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; /* Diagnose type of latitude output grid by testing second latitude center against formulae */ double lat_ctr_tst_eqa; double lat_ctr_tst_fv; if(flg_s2n) lat_ctr_tst_eqa=lat_ntf_out[0L]+lat_spn*1.5/lat_nbr_out; else lat_ctr_tst_eqa=lat_ntf_out[0L]-lat_spn*1.5/lat_nbr_out; if(flg_s2n) lat_ctr_tst_fv=lat_ntf_out[0L]+lat_spn/(lat_nbr_out-1L); else lat_ctr_tst_fv=lat_ntf_out[0L]-lat_spn/(lat_nbr_out-1L); double lat_ctr_tst_gss; /* In diagnosing grids, agreement to slightly worse than single-precision is "good enough for government work" Hence some comparisons cast from double to float before comparison 20150526: T42 grid from SCRIP and related maps, and NCL-generated Gaussian grids for CESM, are accurate to at most ~eight digits 20150611: map_ne120np4_to_fv801x1600_bilin.150418.nc has yc_b[1600]=-89.775000006 not expected exact value lat_ctr[1]=-89.775000000000006 20170521: T62 grid from NCEP-NCAR Reanalysis 1 is worse than single precision, has yc_[192]=-86.6531 not expected exact value lat_ctr[1]=-86.6532 */ if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_eqa) nco_grd_lat_typ=nco_grd_lat_eqa; if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_fv) nco_grd_lat_typ=nco_grd_lat_fv; double *wgt_Gss_out=NULL; // [frc] Gaussian weights double precision if(nco_grd_lat_typ == nco_grd_lat_nil){ /* Check for Gaussian grid */ double *lat_sin_out; // [frc] Sine of Gaussian latitudes double precision lat_sin_out=(double *)nco_malloc(lat_nbr_out*sizeof(double)); wgt_Gss_out=(double *)nco_malloc(lat_nbr_out*sizeof(double)); (void)nco_lat_wgt_gss(lat_nbr_out,flg_s2n,lat_sin_out,wgt_Gss_out); lat_ctr_tst_gss=rdn2dgr*asin(lat_sin_out[1L]); /* Gaussian weights on output grid will be double-precision accurate Grid itself is kept as user-specified so area diagnosed by ESMF_RegridWeightGen may be slightly inconsistent with weights */ if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stderr,"%s: INFO %s reports lat_ctr_out[1] = %g, lat_ctr_tst_gss = %g\n",nco_prg_nm_get(),fnc_nm,lat_ctr_out[1L],lat_ctr_tst_gss); if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_gss) nco_grd_lat_typ=nco_grd_lat_gss; if(lat_sin_out) lat_sin_out=(double *)nco_free(lat_sin_out); } /* !Gaussian */ if(nco_grd_lat_typ == nco_grd_lat_nil){ /* If still of unknown type, this 2D grid may be weird This occurs, e.g., with POP3 destination grid Change gridtype from nil (which means not-yet-set) to unknown (which means none of the others matched) */ nco_grd_lat_typ=nco_grd_lat_unk; } /* !nil */ /* Currently grd_lat_typ and grd_2D_typ are equivalent, though that may be relaxed in future */ if(nco_grd_lat_typ == nco_grd_lat_unk) nco_grd_2D_typ=nco_grd_2D_unk; else if(nco_grd_lat_typ == nco_grd_lat_gss) nco_grd_2D_typ=nco_grd_2D_gss; else if(nco_grd_lat_typ == nco_grd_lat_fv) nco_grd_2D_typ=nco_grd_2D_fv; else if(nco_grd_lat_typ == nco_grd_lat_eqa) nco_grd_2D_typ=nco_grd_2D_eqa; else assert(False); if(nco_grd_lon_typ == nco_grd_lon_nil){ /* NB: Longitude grid diagnosis is susceptible to mistakes when input mapfile embeds common faulty grids, e.g., ACME *150418* FV maps map_ne30np4_to_fv129x256_aave.150418.nc is diagnosed as regional grid of unknown type because of input grid flaws map_ne30np4_to_fv129x256_aave.20150901.nc is (correctly) diagnosed as global grid of with lon_Grn_ctr */ if( (float)lon_ctr_out[0L] == 0.0f && (float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_Grn_ctr; else if((float)lon_ctr_out[0L] == -180.0f && (float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_180_ctr; else if((float)lon_ntf_out[0L] == 0.0f && (float)lon_ntf_out[1L] == (float)(lon_ntf_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_Grn_wst; else if((float)lon_ntf_out[0L] == -180.0f && (float)lon_ntf_out[1L] == (float)(lon_ntf_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_180_wst; else if((float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_bb; else nco_grd_lon_typ=nco_grd_lon_unk; } /* !nco_grd_lon_typ */ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output latitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lat_sng(nco_grd_lat_typ)); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output longitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lon_sng(nco_grd_lon_typ)); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output grid-extent: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_xtn_sng(nco_grd_xtn)); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ slat_ctr_out=(double *)nco_malloc(slat_nbr_out*nco_typ_lng(crd_typ_out)); slat_wgt_out=(double *)nco_malloc(slat_nbr_out*nco_typ_lng(crd_typ_out)); slon_ctr_out=(double *)nco_malloc(slon_nbr_out*nco_typ_lng(crd_typ_out)); for(idx=0L;idx<slat_nbr_out;idx++){ slat_ctr_out[idx]=lat_ntf_out[idx+1L]; slat_wgt_out[idx]=fabs(sin(dgr2rdn*lat_ctr_out[idx+1L])-sin(dgr2rdn*lat_ctr_out[idx])); /* fabs() ensures positive area in n2s grids */ } /* !lat_nbr_out */ for(idx=0L;idx<slon_nbr_out;idx++){ slon_ctr_out[idx]=lon_ntf_out[idx]; } /* !lat_nbr_out */ } /* !nco_grd_lat_fv */ switch(nco_grd_lat_typ){ case nco_grd_lat_eqa: case nco_grd_lat_fv: for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=fabs(sin(dgr2rdn*lat_bnd_out[2*idx+1L])-sin(dgr2rdn*lat_bnd_out[2*idx])); /* fabs() ensures positive area in n2s grids */ break; case nco_grd_lat_gss: for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=wgt_Gss_out[idx]; if(wgt_Gss_out) wgt_Gss_out=(double *)nco_free(wgt_Gss_out); break; case nco_grd_lat_unk: for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=0.0; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING %s reports unknown output latitude grid-type. Unable to guess what latitude weights should be.\n",nco_prg_nm_get(),fnc_nm); break; default: nco_dfl_case_generic_err(); break; } /* end nco_grd_lat_typ switch */ /* Fuzzy test of latitude weight normalization */ lat_wgt_ttl=0.0; for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_ttl+=lat_wgt_out[idx]; if(nco_grd_lat_typ == nco_grd_lat_eqa || nco_grd_lat_typ == nco_grd_lat_fv){ double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */ lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd_out[2L*(lat_nbr_out-1L)+1L])-sin(dgr2rdn*lat_bnd_out[0L])); /* fabs() ensures positive area in n2s grids */ assert(fabs(1.0-lat_wgt_ttl/lat_wgt_ttl_xpc) < eps_rlt); if(lat_wgt_ttl_xpc < 0.0) abort(); /* CEWI Use lat_wgt_ttl_xpc at least once outside of assert() to avoid gcc 4.8.2 set-but-not-used warning */ } /* !nco_grd_lat_eqa, !nco_grd_lat_fv */ } /* !flg_grd_out_rct */ /* When possible, ensure area_out is non-zero 20150722: ESMF documentation says "The grid area array is only output when the conservative remapping option is used" Actually, ESMF does (always?) output area, but area == 0.0 unless conservative remapping is used 20150721: ESMF bilinear interpolation map ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.150418.nc has area == 0.0 20150710: Tempest regionally refined grids like bilinearly interpolated CONUS for ACME RRM has area_out == 0 20150821: ESMF always outputs area_out == 0.0 for bilinear interpolation Check whether NCO must diagnose and provide its own area_out */ /* If area_out contains any zero... */ for(idx=0;idx<(long)grd_sz_out;idx++) if(area_out[idx] == 0.0) break; if(idx != (long)grd_sz_out){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Output grid detected with zero-valued output area(s) at idx = %ld (and likely others, too).\n",nco_prg_nm_get(),idx); } /* !zero */ for(idx=0;idx<(long)grd_sz_out;idx++) if(area_out[idx] != 0.0) break; if(idx == (long)grd_sz_out){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports area_out from mapfile is everywhere zero. This is expected for bilinearly interpolated output maps produced by ESMF_RegridWeightGen. ",nco_prg_nm_get(),fnc_nm); if(flg_grd_out_2D && flg_grd_out_rct && (bnd_nbr_out == 2 || bnd_nbr_out == 4)){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable named \"%s\") from the destination gridcell boundaries. NCO diagnoses quadrilateral area for rectangular output grids from a formula that assumes that cell boundaries follow arcs of constant latitude and longitude. This differs from the area of cells with boundaries that follow great circle arcs (used by, e.g., ESMF_RegridWeightGen and TempestRemap). Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm); flg_dgn_area_out=True; }else if(flg_grd_out_2D && flg_grd_out_crv && (bnd_nbr_out == 2 || bnd_nbr_out == 4)){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable named \"%s\") from the destination gridcell boundaries. NCO diagnoses quadrilateral area for curvilinear output grids from formulae that assume that cell boundaries follow great circle arcs (as do, e.g., ESMF_RegridWeightGen and TempestRemap). This differs from the area of cells with boundaries that follow lines of constant latitude or longitude. Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm); flg_dgn_area_out=True; }else if(flg_grd_out_1D && flg_bnd_1D_usable){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable name \"%s\") from the destination gridcell boundaries. NCO diagnoses spherical polygon area for unstructured output grids from formulae that assume that cell boundaries follow great circle arcs (as do, e.g., ESMFRegridWeightGen and TempestRemap). This differs from the area of cells with boundaries that follow lines of constant latitude or longitude. Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm); flg_dgn_area_out=True; }else{ /* !1D */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"However, NCO cannot find enough boundary information, or it is too stupid about spherical trigonometry, to diagnose area_out. NCO will output an area variable (named \"%s\") copied from the input mapfile. This area will be everywhere zero.\n",rgr->area_nm); } /* !2D */ } /* !area */ if(flg_dgn_area_out){ if(flg_grd_out_1D && flg_bnd_1D_usable){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"INFO: Diagnosing area_out for 1D grid\n"); /* Area of unstructured grids requires spherical trigonometry */ nco_sph_plg_area(rgr,lat_bnd_out,lon_bnd_out,col_nbr_out,bnd_nbr_out,area_out); } /* !1D */ if(flg_grd_out_crv){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"INFO: Diagnosing area_out for curvilinear grid\n"); /* Area of curvilinear grids requires spherical trigonometry */ nco_sph_plg_area(rgr,lat_crn_out,lon_crn_out,grd_sz_out,bnd_nbr_out,area_out); } /* !flg_grd_out_crv */ if(flg_grd_out_rct && nco_grd_2D_typ != nco_grd_2D_unk){ /* Mr. Enenstein and George O. Abell taught me the area of spherical zones Spherical zone area is exact and faithful to underlying rectangular equi-angular grid However, ESMF and Tempest approximate spherical polygons as connected by great circle arcs fxm: Distinguish spherical zone shapes (e.g., equi-angular) from great circle arcs (e.g., unstructured polygons) */ for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++) area_out[lat_idx*lon_nbr_out+lon_idx]=fabs(dgr2rdn*(lon_bnd_out[2*lon_idx+1]-lon_bnd_out[2*lon_idx])*(sin(dgr2rdn*lat_bnd_out[2*lat_idx+1])-sin(dgr2rdn*lat_bnd_out[2*lat_idx]))); /* fabs() ensures positive area in n2s grids */ } /* !spherical zones */ } /* !flg_dgn_area_out */ if(rgr->tst == -1){ /* Passing --rgr tst=-1 causes regridder to fail here This failure should cause host climo script to abort */ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports regridder instructed to fail here. This tests failure mode in climo scripts...\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !tst */ /* Verify that frc_out is sometimes non-zero ESMF: "The grid frac arrays (frac_a and frac_b) are calculated by ESMF_RegridWeightGen. For conservative remapping, the grid frac array returns the area fraction of the grid cell which participates in the remapping. For bilinear and patch remapping, the destination grid frac array [frac_b] is one where the grid point participates in the remapping and zero otherwise. For bilinear and patch remapping, the source grid frac array is always set to zero." SCRIP: Similar to ESMF For both ESMF+SCRIP frac_[ab] are computed by the weight-generation algorithm and are not specified as part of the input grids How does an input ocean grid indicate that, say, half the gridcell is land and half ocean? Does it use the area variable to tell the weight generation algorithm that a gridcell is fractional? In other words does it use grid_imask=1 and grid_area=0.5*full_gridcell_area and, e.g., T=273.0? */ for(idx=0;idx<(long)grd_sz_out;idx++) if(frc_out[idx] != 0.0) break; if(idx == (long)grd_sz_out){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports frc_out == frac_b contains all zeros\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !always zero */ /* Test whether frc_out is ever zero... */ for(idx=0;idx<(long)grd_sz_out;idx++) if(frc_out[idx] == 0.0) break; if(nco_dbg_lvl_get() >= nco_dbg_std) if(idx != (long)grd_sz_out) (void)fprintf(stdout,"%s: INFO %s reports frc_out == frac_b contains zero-elements (e.g., at 1D idx = %ld)\n",nco_prg_nm_get(),fnc_nm,idx); /* Normalizing by frc_out is redundant iff frc_out == 1.0, so we can save time without sacrificing accuracy However, frc_out is often (e.g., for CS <-> RLL maps) close but not equal to unity (ESMF_RegridWeightGen issue?) Hence, decide whether to normalize by frc_out by diagnosing the furthest excursion of frc_out from unity */ nco_bool flg_frc_out_one=True; /* [flg] Destination gridcell fraction frc_out == frac_b is in [1-epsilon,frc_out,1+epsilon] */ nco_bool flg_frc_out_wrt=False; /* [flg] Write destination gridcell fraction frc_out == frac_b to regridded files */ double frc_out_dff_one; /* [frc] Deviation of frc_out from 1.0 */ double frc_out_dff_one_max=0.0; /* [frc] Maximum deviation of frc_out from 1.0 */ long idx_max_dvn; /* [idx] Index of maximum deviation from 1.0 */ for(idx=0;idx<(long)grd_sz_out;idx++){ frc_out_dff_one=fabs(frc_out[idx]-1.0); if(frc_out_dff_one > frc_out_dff_one_max){ frc_out_dff_one_max=frc_out_dff_one; idx_max_dvn=idx; } /* !max */ } /* !idx */ if(frc_out_dff_one_max > eps_rlt) flg_frc_out_one=False; nco_bool flg_frc_nrm=False; /* [flg] Must normalize by frc_out == frac_b because frc_out is not always unity and specified normalization is destarea or none */ if(!flg_frc_out_one && /* If fraction is sometimes "far" from 1.0 and ... */ ((nco_rgr_mpf_typ == nco_rgr_mpf_ESMF && nco_rgr_mth_typ == nco_rgr_mth_conservative && (nco_rgr_nrm_typ == nco_rgr_nrm_destarea || nco_rgr_nrm_typ == nco_rgr_nrm_none)) || /* ESMF map-file specifies conservative regridding with "destarea" or "none" or ... */ (nco_rgr_mpf_typ != nco_rgr_mpf_ESMF)) /* 20191003: Weight-generator does not adhere to ESMF "normalization type" convention */ && True){ flg_frc_nrm=True; /* Avoid writing frc_out unless discrepancies are particularly egregious Otherwise would frc_out for standard remaps like ne30->fv129x256 for which eps=2.46e-13 */ double eps_rlt_wrt_thr=3.0e-13; /* 20181104: Never write frac_b for CMIP6! */ /* if(frc_out_dff_one_max > eps_rlt_wrt_thr) flg_frc_out_wrt=True; */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s reports global metadata specifies conservative remapping with normalization of type = %s. Furthermore, destination fractions frc_dst = dst_frac = frac_b = frc_out contain non-unity elements (maximum deviation from unity of %g exceeds hard-coded (in variable eps_rlt) relative-epsilon threshold of %g for frc_out[%ld] = %g). Thus normalization issues will be explicitly treated. Will apply \'destarea\' normalization (i.e., divide by non-zero frc_out[dst_idx]) to all regridded arrays.\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ),frc_out_dff_one_max,eps_rlt,idx_max_dvn,frc_out[idx_max_dvn]); if(nco_dbg_lvl_get() >= nco_dbg_std && flg_frc_out_wrt) (void)fprintf(stdout,"%s: INFO %s Maximum deviation %g exceeds threshold of %g that triggers automatic writing of fractional destination area as variable named frac_b in regridded output.\n",nco_prg_nm_get(),fnc_nm,frc_out_dff_one_max,eps_rlt_wrt_thr); } /* !sometimes non-unity */ if(nco_dbg_lvl_get() >= nco_dbg_std && flg_frc_nrm && rgr->flg_rnr){ // 20190918: Weaken from WARNING to INFO because NCO no longer renormalizes when using "destarea" maps unless specifically requested to with --rnr_thr (void)fprintf(stdout,"%s: INFO %s reports manual request to renormalize partially overlapped destination gridcells (i.e., gridcells with non-unity frc_dst = dst_frac = frac_b) to preserve mean-value of valid fraction of source gridcells (usually most useful for state variables), rather than dilute valid-fraction mean over total destination gridcell area to preserve area-integral of source data (the default, often most useful for ensuring global conservation of fluxes).\n",nco_prg_nm_get(),fnc_nm); //(void)fprintf(stdout,"%s: INFO %s reports manual request (with --rnr) to renormalize fields with non-unity frc_dst = dst_frac = frac_b at same time global metadata specifies normalization type = %s. Normalizing twice can be an error, depending on intent of each. Charlie is all ears on how NCO should handle this :)\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ)); //nco_exit(EXIT_FAILURE); } /* !flg_rnr */ /* Detailed summary of 2D grids now available including quality-checked coordinates and area */ if(flg_grd_out_2D && nco_dbg_lvl_get() >= nco_dbg_sbr){ lat_wgt_ttl=0.0; area_out_ttl=0.0; if(flg_grd_out_rct){ (void)fprintf(stderr,"%s: INFO %s reports destination rectangular latitude grid:\n",nco_prg_nm_get(),fnc_nm); for(idx=0;idx<lat_nbr_out;idx++) lat_wgt_ttl+=lat_wgt_out[idx]; } /* !flg_grd_out_rct */ for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++) area_out_ttl+=area_out[lat_idx*lon_nbr_out+lon_idx]; (void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_out_ttl,area_out_ttl/(4.0*M_PI)); if(flg_grd_out_rct){ for(idx=0;idx<lon_nbr_out;idx++) (void)fprintf(stdout,"lon[%li] = [%g, %g, %g]\n",idx,lon_bnd_out[2*idx],lon_ctr_out[idx],lon_bnd_out[2*idx+1]); for(idx=0;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li] = [%g, %g, %g]\n",idx,lat_bnd_out[2*idx],lat_ctr_out[idx],lat_bnd_out[2*idx+1]); for(idx=0;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li], wgt[%li] = %20.15f, %20.15f\n",idx,idx,lat_ctr_out[idx],lat_wgt_out[idx]); } /* !flg_grd_out_rct */ if(nco_dbg_lvl_get() > nco_dbg_crr) for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++) (void)fprintf(stdout,"lat[%li] = %g, lon[%li] = %g, area[%li,%li] = %g\n",lat_idx,lat_ctr_out[lat_idx],lon_idx,lon_ctr_out[lon_idx],lat_idx,lon_idx,area_out[lat_idx*lon_nbr_out+lon_idx]); assert(area_out_ttl > 0.0); assert(area_out_ttl <= 4.0*M_PI + 5.0e-15); } /* !flg_grd_out_2D && !dbg */ /* Allocate space for and obtain weights and addresses */ wgt_raw=(double *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_DOUBLE),fnc_nm,"Unable to malloc() value buffer for remapping weights"); col_src_adr=(int *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() value buffer for remapping addresses"); row_dst_adr=(int *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() value buffer for remapping addresses"); /* Obtain remap matrix addresses and weights from map file */ dmn_srt[0]=0L; dmn_cnt[0]=mpf.num_links; rcd=nco_get_vara(in_id,col_src_adr_id,dmn_srt,dmn_cnt,col_src_adr,NC_INT); rcd=nco_get_vara(in_id,row_dst_adr_id,dmn_srt,dmn_cnt,row_dst_adr,NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=mpf.num_links; if(nco_rgr_mpf_typ != nco_rgr_mpf_SCRIP){ rcd=nco_get_vara(in_id,wgt_raw_id,dmn_srt,dmn_cnt,wgt_raw,NC_DOUBLE); }else{ /* SCRIP mapfiles store 2D weight array remap_matrix[num_links,num_wgts] Apply only first weight for first-order conservative accuracy (i.e., area overlap) Apply all three weights for second-order conservative accuracy (by including gradients from centroid to vertices) */ dmn_srd[0]=1L; dmn_srt[1]=0L; dmn_cnt[1]=1L; dmn_srd[1]=mpf.num_wgts; rcd=nco_get_vars(in_id,wgt_raw_id,dmn_srt,dmn_cnt,dmn_srd,wgt_raw,NC_DOUBLE); } /* !SCRIP */ /* Pre-subtract one from row/column addresses (stored, by convention, as Fortran indices) to optimize later access with C indices */ size_t lnk_nbr; /* [nbr] Number of links */ size_t lnk_idx; /* [idx] Link index */ lnk_nbr=mpf.num_links; for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) row_dst_adr[lnk_idx]--; for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) col_src_adr[lnk_idx]--; if(nco_dbg_lvl_get() >= nco_dbg_io){ (void)fprintf(stdout,"idx row_dst col_src wgt_raw\n"); for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) (void)fprintf(stdout,"%li %d %d %g\n",lnk_idx,row_dst_adr[lnk_idx],col_src_adr[lnk_idx],wgt_raw[lnk_idx]); } /* endif dbg */ /* Free memory associated with input file */ if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt) dmn_cnt=(long *)nco_free(dmn_cnt); if(dmn_srd) dmn_srd=(long *)nco_free(dmn_srd); /* Close input netCDF file */ nco_close(in_id); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in); /* Above this line, fl_in and in_id refer to map file Below this line, fl_in and in_id refer to input file to be regridded */ /* Initialize */ in_id=rgr->in_id; out_id=rgr->out_id; /* Sanity check that input data file matches expectations from mapfile */ char *col_nm_in=rgr->col_nm_in; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ char *lat_nm_in=rgr->lat_nm_in; /* [sng] Name of input dimension to recognize as latitude */ char *lon_nm_in=rgr->lon_nm_in; /* [sng] Name of input dimension to recognize as longitude */ int dmn_id_col=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_lat; /* [id] Dimension ID */ int dmn_id_lon; /* [id] Dimension ID */ /* 20160503 Discover coordinates via CF Convention if indicated This copies method used in nco_grd_nfr() */ /* Begin CF-coordinates block */ cf_crd_sct *cf=NULL; char *rgr_var; /* [sng] Variable for special regridding treatment */ nco_bool flg_cf=False; /* [flg] Follow CF Coordinates convention to find and infer grid */ rgr_var=rgr->var_nm; if(rgr_var){ /* Infer grid from special variable Intended to be variable that has both horizontal dimensions and "coordinates" attribute, e.g., ncks --cdl -m ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc | grep coordinates 4LFTX_221_SPDY_S113:coordinates = "gridlat_221 gridlon_221" ; Usage: ncks -O -D 3 --rgr infer --rgr_var=ALBDO_221_SFC_S113 --rgr grid=${HOME}/grd_narr.nc ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc ~/foo.nc */ char crd_sng[]="coordinates"; /* CF-standard coordinates attribute name */ cf=(cf_crd_sct *)nco_malloc(sizeof(cf_crd_sct)); cf->crd=False; /* [flg] CF coordinates information is complete */ cf->crd_id[0]=NC_MIN_INT; /* [id] Coordinate ID, first */ cf->crd_id[1]=NC_MIN_INT; /* [id] Coordinate ID, second */ cf->crd_nm[0]=NULL; /* [sng] Coordinate name, first */ cf->crd_nm[1]=NULL; /* [sng] Coordinate name, second */ cf->crd_sng=NULL; /* [sng] Coordinates attribute value */ cf->dmn_id[0]=NC_MIN_INT; /* [id] Dimension ID, first */ cf->dmn_id[1]=NC_MIN_INT; /* [id] Dimension ID, second */ cf->dmn_nm[0]=NULL; /* [sng] Dimension name, first */ cf->dmn_nm[1]=NULL; /* [sng] Dimension name, second */ cf->unt_sng[0]=NULL; /* [sng] Units string, first coordinate */ cf->unt_sng[1]=NULL; /* [sng] Units string, second coordinate */ cf->var_id=NC_MIN_INT; /* [id] Coordinate variable ID */ cf->var_nm=NULL; /* [sng] Coordinates variable name */ cf->var_type=NC_NAT; /* [enm] Coordinates variable type */ if((rcd=nco_inq_varid_flg(in_id,rgr_var,&cf->var_id)) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports special \"coordinates\" variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd */ cf->crd_sng=nco_char_att_get(in_id,cf->var_id,crd_sng); if(cf->crd_sng){ cf->crd=True; }else{ /* !rcd && att_typ */ (void)fprintf(stderr,"%s: WARNING %s reports coordinates variable %s does not have character-valued \"coordinates\" attribute. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd && att_typ */ /* Valid coordinates attribute requires two coordinate names separated by space character */ char *crd_nm[NCO_MAX_CRD_PER_VAR]; /* [sng] Coordinate name start position */ char *crd_dpl; /* [sng] Modifiable duplicate of coordinates string */ char *spc_ptr; /* [sng] Pointer to space character (' ') */ int crd_nbr=0; /* [nbr] Number of names in coordinates attribute */ int crd_spt=0; /* [nbr] Number of "spatial-like" (that include "degree" in units) coordinates */ int crd_idx=0; /* [idx] Counter for coordinate names */ for(crd_idx=0;crd_idx<NCO_MAX_CRD_PER_VAR;crd_idx++) crd_nm[crd_idx]=NULL; crd_dpl=(char *)strdup(cf->crd_sng); /* Search for spaces starting from end of string */ while((spc_ptr=strrchr(crd_dpl,' '))){ crd_nm[crd_nbr]=spc_ptr+1L; crd_nbr++; /* NUL-terminate so next search ends here */ *spc_ptr='\0'; } /* !sbs_ptr */ /* Final coordinate name begins where coordinate string starts */ crd_nm[crd_nbr]=crd_dpl; /* Change crd_nbr from 0-based index to actual coordinate number */ crd_nbr++; if(crd_nbr < 2){ (void)fprintf(stderr,"%s: WARNING %s found only %d coordinate(s) in \"coordinates\" attribute \"%s\", at least two are required. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,crd_nbr,cf->crd_sng); goto skp_cf; } /* !crd_nbr */ /* If more than two coordinate names are present, choose first two (searching backwards from end) with "degree" in units attributes, otherwise just choose first two */ crd_idx=crd_spt=0; while(crd_spt < 2 && crd_idx < crd_nbr){ cf->crd_nm[crd_spt]=crd_nm[crd_idx]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[crd_spt],&cf->crd_id[crd_spt])) == NC_NOERR){ cf->unt_sng[crd_spt]=nco_char_att_get(in_id,cf->crd_id[crd_spt],unt_sng); if(cf->unt_sng[crd_spt]){ if(strcasestr(cf->unt_sng[crd_spt],"degree")){ /* Increment count of spatial-like coordinates... */ crd_spt++; }else{ /* ...or free() memory allocated during search */ cf->unt_sng[crd_spt]=(char *)nco_free(cf->unt_sng[crd_spt]); } /* !strcasestr() */ crd_idx++; } /* !rcd && att_typ */ } /* !rcd */ } /* !crd_spt */ /* If while()-loop above was successful, our search is over Otherwise, use first two coordinate names regardless of units, and print more diagnostics */ if(crd_spt < 2){ cf->crd_nm[0]=crd_nm[0]; cf->crd_nm[1]=crd_nm[1]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[0],&cf->crd_id[0])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s not found. Turning-off CF coordinates search for this file.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0]); goto skp_cf; } /* !rcd */ if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[1],&cf->crd_id[1])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s not found. Turning-off CF coordinates search for this file.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1]); goto skp_cf; } /* !rcd */ cf->unt_sng[0]=nco_char_att_get(in_id,cf->crd_id[0],unt_sng); if(cf->unt_sng[0]){ if(!strcasestr(cf->unt_sng[0],"degrees_")) (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->unt_sng[0]); } /* !rcd && att_typ */ cf->unt_sng[1]=nco_char_att_get(in_id,cf->crd_id[1],unt_sng); if(cf->unt_sng[1]){ if(!strcasestr(cf->unt_sng[1],"degrees_")) (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1],cf->unt_sng[1]); } /* !rcd && att_typ */ } /* !crd_spt */ int crd_rnk; /* [nbr] Coordinate rank */ rcd=nco_inq_varndims(in_id,cf->crd_id[0],&crd_rnk); if(crd_rnk != 2){ (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s has %i dimension(s). Skipping CF coordinates method.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],crd_rnk); goto skp_cf; } /* !crd_rnk */ rcd=nco_inq_vardimid(in_id,cf->crd_id[0],cf->dmn_id); cf->dmn_nm[0]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); cf->dmn_nm[1]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); rcd=nco_inq_dimname(in_id,cf->dmn_id[0],cf->dmn_nm[0]); rcd=nco_inq_dimname(in_id,cf->dmn_id[1],cf->dmn_nm[1]); /* "coordinates" convention does not guarantee lat, lon are specified in that order Use "units" values, if any, to determine order In absence of "units", assume order is lat, lon */ nco_bool crd0_is_lat=False; /* [flg] First coordinate is latitude */ nco_bool crd0_is_lon=False; /* [flg] First coordinate is longitude */ nco_bool crd1_is_lat=False; /* [flg] Second coordinate is latitude */ nco_bool crd1_is_lon=False; /* [flg] Second coordinate is longitude */ if(cf->unt_sng[0]){ if(!strcasecmp(cf->unt_sng[0],"degrees_north")) crd0_is_lat=True; if(!strcasecmp(cf->unt_sng[0],"degrees_east")) crd0_is_lon=True; } /* endif */ if(cf->unt_sng[1]){ if(!strcasecmp(cf->unt_sng[1],"degrees_north")) crd1_is_lat=True; if(!strcasecmp(cf->unt_sng[1],"degrees_east")) crd1_is_lon=True; } /* endif */ assert((crd0_is_lat && crd1_is_lon) || (crd0_is_lon && crd1_is_lat)); int idx_lat; int idx_lon; if(crd0_is_lat && crd1_is_lon){ idx_lat=0; idx_lon=1; }else{ idx_lat=1; idx_lon=0; } /* endif */ /* Dimensions and coordinates have been vetted. Store as primary lookup names. Dimensions are always returned in order [LRV,MRV]=[0,1] LRV is along-track direction, and MRV is across-track (at least in NASA data) Internally we label LRV as "lat" and MRV as "lon" so that code looks similar for curvilinear and rectangular grids */ dmn_id_lat=cf->dmn_id[0]; dmn_id_lon=cf->dmn_id[1]; /* Subtlety: lat_nm_in is coordinate (variable+dimension) name when specified from command-line (as in nco_grd_nfr()), dimension name when found through CF-method (as in nco_rgr_wgt()). This confusing distinction could be avoided by passing command-line dimension names through-to nco_rgr_wgt(). However, that route would require complex priorities for what to do when passing command-line coordinate names not dimension names and visa-versa. */ lat_nm_in=strdup(cf->dmn_nm[0]); lon_nm_in=strdup(cf->dmn_nm[1]); //lat_nm_in=strdup(cf->crd_nm[idx_lat]); //lon_nm_in=strdup(cf->crd_nm[idx_lon]); /* Next four lines unnecessary in nco_rgr_wgt() which only needs dimension names (it reads input coordinates from map-file not data-file) */ //lat_ctr_id=cf->crd_id[idx_lat]; //lon_ctr_id=cf->crd_id[idx_lon]; //lat_dmn_nm=strdup(cf->dmn_nm[0]); //lon_dmn_nm=strdup(cf->dmn_nm[1]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s \"coordinates\" attribute \"%s\" points to coordinates %s and %s. Latitude coordinate \"%s\" has dimensions \"%s\" and \"%s\". Longitude coordinate \"%s\" has dimensions \"%s\" and \"%s\".\n",nco_prg_nm_get(),fnc_nm,rgr_var,cf->crd_sng,cf->crd_nm[0],cf->crd_nm[1],cf->crd_nm[idx_lat],cf->dmn_nm[idx_lat],cf->dmn_nm[idx_lon],cf->crd_nm[idx_lon],cf->dmn_nm[idx_lat],cf->dmn_nm[idx_lon]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s Coordinates %s and %s \"units\" values are \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->crd_nm[1],cf->unt_sng[0] ? cf->unt_sng[0] : "(non-existent)",cf->unt_sng[1] ? cf->unt_sng[1] : "(non-existent)"); /* Clean-up CF coordinates memory */ if(crd_dpl) crd_dpl=(char *)nco_free(crd_dpl); if(cf->crd_sng) cf->crd_sng=(char *)nco_free(cf->crd_sng); if(cf->dmn_nm[0]) cf->dmn_nm[0]=(char *)nco_free(cf->dmn_nm[0]); if(cf->dmn_nm[1]) cf->dmn_nm[1]=(char *)nco_free(cf->dmn_nm[1]); if(cf->unt_sng[0]) cf->unt_sng[0]=(char *)nco_free(cf->unt_sng[0]); if(cf->unt_sng[1]) cf->unt_sng[1]=(char *)nco_free(cf->unt_sng[1]); // if(foo) foo=(char *)nco_free(foo); } /* !rgr_var */ /* goto skp_cf */ skp_cf: /* free() any abandoned cf structure now */ if(!flg_cf) if(cf) cf=(cf_crd_sct *)nco_free(cf); rcd=NC_NOERR; /* End CF-coordinates block */ if(flg_grd_in_1D){ long col_nbr_in_dat; /* [nbr] Number of columns in input datafile */ /* Check default or command-line option first, then search usual suspects, and if that fails then guess unstructured dimension is dimension in input file with size n_a expected by input map file, suggested by PJCS Using internal database names first ensures users can pick between multiple dimensions of size n_a 20180313: fxm New PJCS algorithm is superior, should eliminate internal database for unstructured grids? Database is necessary for 2D grids because otherwise no good way to disambiguate latitude from longitude */ if(col_nm_in && (rcd=nco_inq_dimid_flg(in_id,col_nm_in,&dmn_id_col)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"lndgrid",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("lndgrid"); /* CLM */ else if((rcd=nco_inq_dimid_flg(in_id,"nCells",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("nCells"); /* MPAS-O/I */ else if((rcd=nco_inq_dimid_flg(in_id,"nEdges",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("nEdges"); /* MPAS-O/I */ else if((rcd=nco_inq_dimid_flg(in_id,"ncol_d",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("ncol_d"); /* EAM dynamics grid */ else if((rcd=nco_inq_dimid_flg(in_id,"ncol_p",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("ncol_d"); /* EAM physics grid */ else if((rcd=nco_inq_dimid_flg(in_id,"sounding_id",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("sounding_id"); /* OCO2 */ /* 20180605: Database matches to above names may be false-positives ALM/CLM/CTSM/ELM store all possible dimension names that archived variables could use NCO only prints dimensions used in variables, while ncdump prints all dimensions From ncdump we find usually unused ALM/CLM/CTSM/ELM dimensions: gridcell, lndunit, column, pft, levurb, numrad, levsno Check that matched dimension has expected size: */ if(dmn_id_col != NC_MIN_INT){ rcd=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr_in_dat); if(col_nbr_in != col_nbr_in_dat){ dmn_id_col=NC_MIN_INT; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s database-prioritized unstructured dimension candidate \"%s\" has size not expected by supplied map-file: mapfile col_nbr_in = %ld != %ld = col_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,col_nm_in,col_nbr_in,col_nbr_in_dat); } /* !col_nbr_in */ }else{ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s expects data on an unstructured grid yet cannot find a dimension name that matches the usual suspects for unstructured dimensions (ncol, gridcell, lndgrid, nCells, nEdges, sounding_id). Consider specifying horizontal dimension name to ncks with \"--rgr col_nm=foo\" or to ncremap with \"ncremap -R '--rgr col_nm=foo'\", and consider requesting the NCO project to add this horizontal dimension name to its internal database.\n",nco_prg_nm_get(),fnc_nm); } /* !dmn_id_col */ if(dmn_id_col == NC_MIN_INT){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s Proceeding with fallback algorithm to guess unstructured dimension as first dimension in data file of equal size to that expected by supplied map-file...\n",nco_prg_nm_get(),fnc_nm); /* 20180312: Unstructured dimension must have same size as input map file, suggested by PJCS */ int *dmn_ids_in; /* [nbr] Input file dimension IDs */ int dmn_nbr_in; /* [nbr] Number of dimensions in input file */ const int flg_prn=0; /* [enm] Parent flag */ rcd=nco_inq_dimids(in_id,&dmn_nbr_in,NULL,flg_prn); dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); rcd=nco_inq_dimids(in_id,NULL,dmn_ids_in,flg_prn); /* Find dimension, if any, with same size as map "a" src_grid_dims[0] = n_a dimension */ for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ dmn_id_col=dmn_ids_in[dmn_idx]; rcd=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr_in_dat); if(col_nbr_in == col_nbr_in_dat){ rcd=nco_inq_dimname(in_id,dmn_id_col,col_nm_in); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s found that dimension %s in datafile has same size (n_a = %ld) expected by map-file. Assuming %s is the unstructured dimension.\n",nco_prg_nm_get(),fnc_nm,col_nm_in,col_nbr_in,col_nm_in); break; } /* !col_nbr_in */ } /* !dmn_idx */ if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in); if(dmn_idx == dmn_nbr_in){ dmn_id_col=NC_MIN_INT; (void)fprintf(stdout,"%s: WARNING received a map-file constructed to process data on an unstructured (one-dimensional) grid, but %s (aka \"the regridder\") cannot find a dimension in the input data file (or, with ncremap, a possibly already subsetted intermediate file) that matches the size of the unstructured dimension in the supplied map-file = src_grd_dims[0] = n_a = %ld.\nHINT: Ensure at least one member of the variable extraction list has a spatial dimension of size = %ld\n",nco_prg_nm_get(),fnc_nm,col_nbr_in,col_nbr_in); (void)fprintf(stdout,"%s: INFO %s reports a third, last-ditch (aka \"Hail Mary\") workaround may work. The Hail-Mary allows logically 1D map-files to regrid logically 2D datasets, so long as the product of the horizontal dimension sizes in the 2D input data file equals the map-file 1D dimension size.\n",nco_prg_nm_get(),fnc_nm); /* Hail Mary algorithm: Use following 2D input grid block to identify horizontal coordinates and dimensions */ flg_grd_in_1D_dat_in_2D=True; flg_grd_in_2D=True; //nco_exit(EXIT_FAILURE); } /* !dmn_idx */ } /* !col_nm_in */ } /* !1D */ if(flg_grd_in_2D){ long lat_nbr_in_dat; /* [nbr] Number of latitudes in input datafile */ if(lat_nm_in && (rcd=nco_inq_dimid_flg(in_id,lat_nm_in,&dmn_id_lat)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("latitude"); else if((rcd=nco_inq_dimid_flg(in_id,"lat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("lat"); else if((rcd=nco_inq_dimid_flg(in_id,"Latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Latitude"); else if((rcd=nco_inq_dimid_flg(in_id,"Lat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Lat"); else if((rcd=nco_inq_dimid_flg(in_id,"south_north",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("south_north"); /* WRF */ else if((rcd=nco_inq_dimid_flg(in_id,"south_north_stag",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("south_north_stag"); else if((rcd=nco_inq_dimid_flg(in_id,"YDim:location",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("YDim:location"); /* AIRS L3 */ else if((rcd=nco_inq_dimid_flg(in_id,"YDim:MOD_Grid_monthly_CMG_VI",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("YDim:MOD_Grid_monthly_CMG_VI"); /* MODIS MOD13C2 */ else if((rcd=nco_inq_dimid_flg(in_id,"natrack",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("natrack"); /* MODIS DeepBlue SeaWiFS L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"nj",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nj"); /* CICE RTM */ else if((rcd=nco_inq_dimid_flg(in_id,"lsmlat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("lsmlat"); /* CISM/CLM/ELM */ else if((rcd=nco_inq_dimid_flg(in_id,"nlat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nlat"); /* POP */ else if((rcd=nco_inq_dimid_flg(in_id,"rlat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("rlat"); /* RACMO */ else if((rcd=nco_inq_dimid_flg(in_id,"nscan",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nscan"); /* AMSR, TRMM */ else if((rcd=nco_inq_dimid_flg(in_id,"nTimes",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nTimes"); /* OMI L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"number_of_lines",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("number_of_lines"); /* DSCOVR L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoTrack",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("GeoTrack"); /* AIRS L2 DAP NC */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoTrack:L2_Standard_atmospheric&surface_product",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("GeoTrack:L2_Standard_atmospheric&surface_product"); /* AIRS L2 HDF */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Along_Swath:mod04",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Cell_Along_Swath:mod04"); /* MODIS MOD04 L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Along_Swath_mod04",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Cell_Along_Swath_mod04"); /* MODIS MOD04 L2 (ncl_convert2nc changes colon to underscore) */ else if((rcd=nco_inq_dimid_flg(in_id,"CO_Latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("CO_Latitude"); else if((rcd=nco_inq_dimid_flg(in_id,"j",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("j"); /* CMIP5 NorESM1 ocean */ else if((rcd=nco_inq_dimid_flg(in_id,"latitude0",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("latitude0"); /* Oxford */ else if((rcd=nco_inq_dimid_flg(in_id,"y",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("y"); /* NEMO */ else if((rcd=nco_inq_dimid_flg(in_id,"x",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("x"); /* NSIDC polar stereographic (NB: unfortunate incompatible conflict between NEMO & NSIDC names) */ else if((rcd=nco_inq_dimid_flg(in_id,"y1",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("y1"); /* NSIDC EASE */ else if((rcd=nco_inq_dimid_flg(in_id,"ygrid",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("ygrid"); /* SSM/I */ else if((rcd=nco_inq_dimid_flg(in_id,"ygrid_0",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("ygrid_0"); /* NWS HRRR */ else{ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports unable to find latitude dimension in input file. Tried the usual suspects. HINT: Inform regridder of input latitude dimension name with \"ncks --rgr lat_nm_in=name\" or \"ncremap -R '--rgr lat_nm_in=name'\"\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !lat */ rcd=nco_inq_dimlen(in_id,dmn_id_lat,&lat_nbr_in_dat); if(lat_nbr_in != lat_nbr_in_dat && !flg_grd_in_1D_dat_in_2D){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports mapfile and data file dimension sizes disagree: mapfile lat_nbr_in = %ld != %ld = lat_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,lat_nbr_in,lat_nbr_in_dat); nco_exit(EXIT_FAILURE); } /* !err */ long lon_nbr_in_dat; /* [nbr] Number of longitudes in input datafile */ if(lon_nm_in && (rcd=nco_inq_dimid_flg(in_id,lon_nm_in,&dmn_id_lon)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"longitude",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("longitude"); else if((rcd=nco_inq_dimid_flg(in_id,"lon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("lon"); else if((rcd=nco_inq_dimid_flg(in_id,"Longitude",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Longitude"); else if((rcd=nco_inq_dimid_flg(in_id,"Lon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Lon"); else if((rcd=nco_inq_dimid_flg(in_id,"west_east",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("west_east"); /* WRF */ else if((rcd=nco_inq_dimid_flg(in_id,"west_east_stag",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("west_east_stag"); else if((rcd=nco_inq_dimid_flg(in_id,"XDim:location",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("XDim:location"); /* AIRS L3 */ else if((rcd=nco_inq_dimid_flg(in_id,"XDim:MOD_Grid_monthly_CMG_VI",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("XDim:MOD_Grid_monthly_CMG_VI"); /* MODIS MOD13C2 */ else if((rcd=nco_inq_dimid_flg(in_id,"ni",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("ni"); /* CICE RTM */ else if((rcd=nco_inq_dimid_flg(in_id,"lsmlon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("lsmlon"); /* CISM/CLM/ELM */ else if((rcd=nco_inq_dimid_flg(in_id,"nlon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nlon"); /* POP */ else if((rcd=nco_inq_dimid_flg(in_id,"rlon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("rlon"); /* POP */ else if((rcd=nco_inq_dimid_flg(in_id,"npix",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("npix"); /* AMSR */ else if((rcd=nco_inq_dimid_flg(in_id,"npixel",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("npixel"); /* TRMM */ else if((rcd=nco_inq_dimid_flg(in_id,"nxtrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nxtrack"); /* MODIS DeepBlue SeaWiFS L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"nXtrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nXtrack"); /* OMI L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"number_of_pixels",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("number_of_pixels"); /* DSCOVR L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoXTrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("GeoXTrack"); /* AIRS L2 DAP NC */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoXTrack:L2_Standard_atmospheric&surface_product",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("GeoXTrack:L2_Standard_atmospheric&surface_product"); /* AIRS L2 HDF */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Across_Swath:mod04",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Cell_Across_Swath:mod04"); /* MODIS MOD04 L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Across_Swath_mod04",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Cell_Across_Swath_mod04"); /* MODIS MOD04 L2 (ncl_convert2nc changes colon to underscore) */ else if((rcd=nco_inq_dimid_flg(in_id,"i",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("i"); /* CMIP5 NorESM1 ocean */ else if((rcd=nco_inq_dimid_flg(in_id,"longitude0",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("longitude0"); /* Oxford */ else if((rcd=nco_inq_dimid_flg(in_id,"x",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("x"); /* NEMO */ else if((rcd=nco_inq_dimid_flg(in_id,"y",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("y"); /* NSIDC polar stereographic (NB: unfortunate incompatible conflict between NEMO & NSIDC names) */ else if((rcd=nco_inq_dimid_flg(in_id,"x1",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("x1"); /* NSIDC EASE */ else if((rcd=nco_inq_dimid_flg(in_id,"xgrid",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("xgrid"); /* SSM/I */ else if((rcd=nco_inq_dimid_flg(in_id,"xgrid_0",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("xgrid_0"); /* NWS HRRR */ else{ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports unable to find longitude dimension in input file. Tried the usual suspects. HINT: Inform regridder of input longitude dimension name with \"ncks --rgr lon_nm_in=name\" or \"ncremap -R '--rgr lon_nm_in=name'\"\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !lat */ rcd=nco_inq_dimlen(in_id,dmn_id_lon,&lon_nbr_in_dat); if(lon_nbr_in != lon_nbr_in_dat && !flg_grd_in_1D_dat_in_2D){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports mapfile and data file dimension sizes disagree: mapfile lon_nbr_in = %ld != %ld = lon_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,lon_nbr_in,lon_nbr_in_dat); nco_exit(EXIT_FAILURE); } /* !err */ if(flg_grd_in_1D_dat_in_2D){ if(lon_nbr_in_dat*lat_nbr_in_dat == col_nbr_in){ (void)fprintf(stdout,"%s: INFO %s Hail Mary algorithm reports tentative success in that product of identifed horizontal dimension sizes in the 2D input data file equals the map-file 1D dimension size = %ld.\n",nco_prg_nm_get(),fnc_nm,col_nbr_in); lat_nbr_in=lat_nbr_in_dat; lon_nbr_in=lon_nbr_in_dat; }else{ /* !col_nbr_in */ (void)fprintf(stdout,"%s: ERROR %s Hail Mary algorithm reports final failure since product of identifed horizontal dimension sizes in the 2D input data file does not equal the map-file 1D dimension size = %ld.\n",nco_prg_nm_get(),fnc_nm,col_nbr_in); nco_exit(EXIT_FAILURE); } /* !col_nbr_in */ } /* !flg_grd_in_1D_dat_in_2D */ } /* !2D */ /* Do not extract grid variables (that are also extensive variables) like lon, lat, area, and masks If necessary, use remap data to diagnose them from scratch Other extensive variables (like counts, population) will be extracted and summed not averaged */ /* Exception list source: ALM/CLM: landmask (20170504: Debatable, including erroneous mask may be better than completely excluding an expected mask) (20170504: must keep landfrac since regridded by ncremap for SGS option) AMSR: Latitude, Longitude CAM, CERES, CMIP5: lat, lon CAM, CMIP5: gw, lat_bnds, lon_bnds CAM-FV: slon, slat, w_stag (w_stag is weights for slat grid, analagous to gw for lat grid) CAM-SE, EAM, MOSART: area CICE: latt_bounds, lont_bounds, latu_bounds, lonu_bounds, TLAT, TLON, ULAT, ULON (NB: CICE uses ?LON and POP uses ?LONG) (aice is ice area, tmask is state-variable mask, both not currently excluded, although all binary masks like tmask should be recomputed on new grid) CISM/CLM/ELM: LATIXY, LONGXY (glacier mask files) DSCOVR L2: latitude, longitude ESMF: gridcell_area GPM: S1_Latitude, S1_Longitude HIRDLS: Latitude MAR/RACMO: LAT, LON MLS: CO_Latitude MPAS-O/I/LI: areaCell, latCell, lonCell and others that are all handled by separated MPAS convention implementation below NCO: lat_vertices, lon_vertices NEMO: nav_lat, nav_lon NWS HRRR: gridlat_0, gridlon_0 OCO2: latitude_bnds, longitude_bnds OMI DOMINO: Latitude, LatitudeCornerpoints, Longitude, LongitudeCornerpoints Oxford: global_latitude0, global_longitude0, latitude0, longitude0 POP: TLAT, TLONG, ULAT, ULONG (NB: CICE uses ?LON and POP uses ?LONG) (POP does not archive spatial bounds) RACMO: rlat, rlon TRMM: Latitude, Longitude UV-CDAT regridder: bounds_lat, bounds_lon Unknown: XLAT_M, XLONG_M WRF: XLAT, XLONG */ const int var_xcl_lst_nbr=53; /* [nbr] Number of objects on exclusion list */ const char *var_xcl_lst[]={"/area","/gridcell_area","/gw","/LAT","/lat","/Latitude","/latitude","/nav_lat","/global_latitude0","gridlat_0","/latitude0","/rlat","/slat","/LATIXY","/LONGXY","/TLAT","/ULAT","/XLAT","/XLAT_M","/CO_Latitude","/S1_Latitude","/lat_bnds","/lat_vertices","/latt_bounds","/latu_bounds","/latitude_bnds","/LatitudeCornerpoints","/bounds_lat","/LON","/lon","/Longitude","/longitude","/nav_lon","/global_longitude0","gridlon_0","/longitude0","/rlon","/slon","/TLON","/TLONG","/ULON","/ULONG","/XLONG","/XLONG_M","/CO_Longitude","/S1_Longitude","/lon_bnds","/lon_vertices","/lont_bounds","/lonu_bounds","/longitude_bnds","/LongitudeCornerpoints","/bounds_lon","/w_stag"}; int var_cpy_nbr=0; /* [nbr] Number of copied variables */ int var_rgr_nbr=0; /* [nbr] Number of regridded variables */ int var_xcl_nbr=0; /* [nbr] Number of deleted variables */ int var_crt_nbr=0; /* [nbr] Number of created variables */ int var_xtn_nbr=0; /* [nbr] Number of extensive variables */ unsigned int idx_tbl; /* [idx] Counter for traversal table */ const unsigned int trv_nbr=trv_tbl->nbr; /* [idx] Number of traversal table entries */ for(idx=0;idx<var_xcl_lst_nbr;idx++){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,var_xcl_lst[idx])) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* endif */ } /* !idx */ cnv_sct *cnv; /* [sct] Convention structure */ /* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */ cnv=nco_cnv_ini(in_id); if(cnv->MPAS){ /* 20160228: MPAS has a host of mysterious grid and extensive variables that should probably not be regridded 20180206: Add from MPAS-LI xCell, yCell, zCell, and [xyz]Edge, and [xyz]Vertex 20180917: Restrict exclusion list to a subset of variables with nCells-dimension Six nCells-variables may be valuable when regridded to lat/lon mpas_xcl_lst in nco_rgr_wgt() and MPAS var_xcl_lst in nco_var_is_fix() differ by these six variables: areaCell for comparison to area(lat,lon) cellMask for area-weighted mask maxLevelCell for area-weighted underwater topographic mask xCell, yCell, zCell for area-weighted cartesian coordinates 20180918: Regridder currently only works on cell-based coordinates Decided regridder will omit not copy fields on vertex- or edge-based coordinates until it can regrid them Regridding vertex- or edge-based fields would require new sparse matrix for vertices or edges How would ERWG or TempestRemap handle that? MPAS geophysical variables on vertex-based (not cell-based) coordinates include: avg_airStressVertexUGeo_1, avg_airStressVertexVGeo_1, uOceanVelocityVertexGeo_1, uVelocityGeo_1, vOceanVelocityVertexGeo_1, vVelocityGeo_1 MPAS geophysical variables on edge-based (not cell-based) coordinates include: principalStress1Var_1, principalStress2Var_1 */ const int mpas_xcl_lst_nbr=35; const char *mpas_xcl_lst[]={"/angleEdge","/areaTriangle","/cellsOnCell","/cellsOnEdge","/cellsOnVertex","/dcEdge","/dvEdge","/edgeMask","/edgesOnCell","/edgesOnEdge","/edgesOnVertex","/indexToCellID","/indexToEdgeID","/indexToVertexID","/kiteAreasOnVertex","/latCell","/latEdge","/latVertex","/lonCell","/lonEdge","/lonVertex","/maxLevelEdgeTop","/meshDensity","/nEdgesOnCell","/nEdgesOnEdge","/vertexMask","/verticesOnCell","/verticesOnEdge","/weightsOnEdge","/xEdge","/yEdge","/zEdge","/xVertex","/yVertex","/zVertex"}; for(idx=0;idx<mpas_xcl_lst_nbr;idx++){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,mpas_xcl_lst[idx])) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined MPAS exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* endif */ } /* !idx */ } /* !MPAS */ char *dmn_nm_cp; /* [sng] Dimension name as char * to reduce indirection */ int dmn_nbr_in; /* [nbr] Number of dimensions in input variable */ int dmn_nbr_out; /* [nbr] Number of dimensions in output variable */ nco_bool has_lon; /* [flg] Contains longitude dimension */ nco_bool has_lat; /* [flg] Contains latitude dimension */ trv_sct trv; /* [sct] Traversal table object structure to reduce indirection */ /* Define regridding flag for each variable */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; dmn_nbr_in=trv_tbl->lst[idx_tbl].nbr_dmn; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ has_lon=False; has_lat=False; if(flg_grd_in_2D){ for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ /* Pre-determine flags necessary during next loop */ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; /* fxm: Generalize to include any variable containing two coordinates with "standard_name" = "latitude" and "longitude" */ if(!has_lon) has_lon=!strcmp(dmn_nm_cp,lon_nm_in); if(!has_lat) has_lat=!strcmp(dmn_nm_cp,lat_nm_in); } /* end loop over dimensions */ } /* !flg_grd_in_2D */ for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; /* Regrid variables containing the horizontal spatial dimension on 1D grids, and both latitude and longitude on 2D grids */ if(!strcmp(dmn_nm_cp,col_nm_in) || (has_lon && has_lat)){ trv_tbl->lst[idx_tbl].flg_rgr=True; var_rgr_nbr++; break; } /* endif */ } /* end loop over dimensions */ if(dmn_idx == dmn_nbr_in){ /* Not regridded, so must be omitted or copied... */ if(flg_grd_in_2D && (has_lon || has_lat)){ /* Single spatial dimensional variables on 2D input grids are likely extensive (e.g., grd_mrd_lng from bds) These could be salvaged with explicit rules or implicit assumptions */ trv_tbl->lst[idx_tbl].flg_xtr=False; var_xcl_nbr++; if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) extensive-seeming (e.g., 1D spatial variable in 2D input grid, or 2D spatial variable without primary grid dimensions from multi-grid file (e.g., west_east_stag or south_north_stag instead of west_east or south_north)) variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); }else{ /* !omitted */ /* Copy all variables that are not regridded or omitted */ var_cpy_nbr++; } /* !omitted */ } /* endif not regridded */ } /* end nco_obj_typ_var */ } /* end idx_tbl */ if(!var_rgr_nbr) (void)fprintf(stdout,"%s: WARNING %s reports no variables fit regridding criteria. The regridder expects something to regrid, and variables not regridded are copied straight to output. HINT: If the name(s) of the input horizontal spatial dimensions to be regridded (e.g., latitude and longitude or column) do not match NCO's preset defaults (case-insensitive unambiguous forms and abbreviations of \"latitude\", \"longitude\", and \"ncol\", respectively) then change the dimension names that NCO looks for. Instructions are at http://nco.sf.net/nco.html#regrid, e.g., \"ncks --rgr col=lndgrid --rgr lat=north\" or \"ncremap -R '--rgr col=lndgrid --rgr lat=north'\".\n",nco_prg_nm_get(),fnc_nm); for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.flg_rgr){ for(int xtn_idx=0;xtn_idx<rgr->xtn_nbr;xtn_idx++){ /* 20150927: Extensive variable treatments are still in alpha-development Currently testing on AIRS TSurfStd_ct (by summing not averaging) In future may consider variables that need more complex (non-summing) extensive treatment MPAS-O/I has a zillion of these [xyz]Cell, cellsOnCell, fCell, indexToCellID, maxLevelCell, meshDensity Not to mention the variables that depend on nEdges and nVertices... */ if(!strcmp(trv.nm,rgr->xtn_var[xtn_idx])){ trv_tbl->lst[idx_tbl].flg_xtn=True; var_xtn_nbr++; if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO Variable %s will be treated as extensive (summed not averaged)\n",nco_prg_nm_get(),trv.nm_fll); } /* !strcmp */ } /* !xtn_idx */ } /* !flg_rgr */ } /* !idx_tbl */ if(nco_dbg_lvl_get() >= nco_dbg_sbr){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr) (void)fprintf(stderr,"Regrid %s? %s\n",trv.nm,trv.flg_rgr ? "Yes" : "No"); } /* end idx_tbl */ } /* end dbg */ /* Lay-out regridded file */ aed_sct aed_mtd; char *area_nm_out; char *att_nm; char *bnd_nm_out; char *bnd_tm_nm_out; char *col_nm_out; char *frc_nm_out; char *lat_bnd_nm_out; char *lat_dmn_nm_out; char *lat_nm_out; char *lat_wgt_nm; char *lon_bnd_nm_out; char *lon_dmn_nm_out; char *lon_nm_out; char *msk_nm_out; char *slat_nm_out=NULL; char *slat_wgt_nm_out=NULL; char *slon_nm_out=NULL; int dmn_id_bnd; /* [id] Dimension ID */ int dmn_id_bnd_tm; /* [id] Dimension ID */ int dmn_id_slat; /* [id] Dimension ID */ int dmn_id_slon; /* [id] Dimension ID */ int area_out_id; /* [id] Variable ID for area */ int frc_out_id; /* [id] Variable ID for fraction */ int lon_out_id; /* [id] Variable ID for longitude */ int lat_out_id; /* [id] Variable ID for latitude */ int lat_wgt_id; /* [id] Variable ID for latitude weight */ int lon_bnd_id; /* [id] Variable ID for lon_bnds/lon_vertices */ int lat_bnd_id; /* [id] Variable ID for lat_bnds/lat_vertices */ int msk_out_id; /* [id] Variable ID for mask */ int slat_out_id; /* [id] Variable ID for staggered latitude */ int slat_wgt_id; /* [id] Variable ID for staggered latitude weight */ int slon_out_id; /* [id] Variable ID for staggered longitude */ int dmn_ids_out[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */ long dmn_srt_out[dmn_nbr_grd_max]; long dmn_cnt_tuo[dmn_nbr_grd_max]; /* Name output dimensions/variables */ area_nm_out=rgr->area_nm; bnd_tm_nm_out=rgr->bnd_tm_nm; frc_nm_out=rgr->frc_nm; lat_bnd_nm_out=rgr->lat_bnd_nm; lat_wgt_nm=rgr->lat_wgt_nm; lon_bnd_nm_out=rgr->lon_bnd_nm; msk_nm_out=rgr->msk_nm; /* Use explicitly specified output names, if any, otherwise use input names (either explicitly specified or discovered by fuzzing) */ if(rgr->col_nm_out) col_nm_out=rgr->col_nm_out; else col_nm_out=col_nm_in; if(rgr->lat_dmn_nm) lat_dmn_nm_out=rgr->lat_dmn_nm; else lat_dmn_nm_out=lat_nm_in; if(rgr->lon_dmn_nm) lon_dmn_nm_out=rgr->lon_dmn_nm; else lon_dmn_nm_out=lon_nm_in; if(rgr->lat_nm_out) lat_nm_out=rgr->lat_nm_out; else lat_nm_out=lat_nm_in; if(rgr->lon_nm_out) lon_nm_out=rgr->lon_nm_out; else lon_nm_out=lon_nm_in; if(flg_grd_out_1D){ bnd_nm_out=rgr->vrt_nm; lat_bnd_nm_out=rgr->lat_vrt_nm; lon_bnd_nm_out=rgr->lon_vrt_nm; } /* !flg_grd_out_1D */ if(flg_grd_out_crv){ bnd_nm_out=rgr->bnd_nm; } /* !flg_grd_out_crv */ if(flg_grd_out_rct){ bnd_nm_out=rgr->bnd_tm_nm; /* NB: default to bnd_tm_nm for spatial bounds */ } /* !flg_grd_out_rct */ if(flg_grd_out_2D){ lat_bnd_nm_out=rgr->lat_bnd_nm; lon_bnd_nm_out=rgr->lon_bnd_nm; } /* !flg_grd_out_2D */ if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ slat_nm_out=strdup("slat"); slat_wgt_nm_out=strdup("w_stag"); slon_nm_out=strdup("slon"); } /* !nco_grd_lat_fv */ /* Ensure temporal bounds dimension name is distinct from spatial bounds when their sizes differ */ if(bnd_nbr_out != bnd_tm_nbr_out){ if(!strcmp(bnd_nm_out,bnd_tm_nm_out)){ (void)fprintf(stdout,"%s: INFO %s reports spatial and temporal output bounds dimensions are identical (and named \"%s\") by default for rectangular output grids because both can be stored as 2D arrays. That cannot work for this mapping because temporal and spatial bounds dimensions sizes differ (bnd_nbr_out = %d, bnd_tm_nbr_out = %d). Using fall-back spatial bounds name \"%s\" instead. HINT: You may change one or both manually with \"ncks --rgr bnd_nm=name\" or \"ncks --rgr bnd_tm_nm=name\", or, using ncremap, with \"ncremap -R '--rgr bnd_nm=name'\" or \"ncremap -R '--rgr bnd_tm_nm=name'\"\n",nco_prg_nm_get(),fnc_nm,bnd_tm_nm_out,bnd_nbr_out,bnd_tm_nbr_out,bnd_nm_out); } /* !strcmp() */ } /* !bnd_nbr_out */ /* Persistent metadata */ aed_sct aed_mtd_crd; char *att_val_crd=NULL; char *att_nm_crd=NULL; att_nm_crd=strdup("coordinates"); aed_mtd_crd.att_nm=att_nm_crd; if(flg_grd_out_1D || flg_grd_out_crv) aed_mtd_crd.mode=aed_overwrite; else aed_mtd_crd.mode=aed_delete; aed_mtd_crd.type=NC_CHAR; aed_mtd_crd.sz=strlen(lat_nm_out)+strlen(lon_nm_out)+1L; att_val_crd=(char *)nco_malloc((aed_mtd_crd.sz+1L)*nco_typ_lng(aed_mtd_crd.type)); (void)sprintf(att_val_crd,"%s %s",lat_nm_out,lon_nm_out); aed_mtd_crd.val.cp=att_val_crd; /* Reminder: Regridder area_out options, e.g., --rgr area_out, set flg_area_out to control adding "area" variable to regridded output Regridder cll_msr options, --rgr cll_msr, set flg_cll_msr to control adding "cell_measures" attribute to regridded output ncks & ncra cll_msr options, --cll_msr, set EXTRACT_CLL_MSR to control adding "cell_measures" variables (e.g., area) to extraction list of input file EXTRACT_CLL_MSR supercedes --rgr area_out in determining whether to add "area" to regridded output */ nco_bool flg_area_out=rgr->flg_area_out; /* [flg] Add area to output */ nco_bool flg_cll_msr=rgr->flg_cll_msr; /* [flg] Add cell_measures attribute */ aed_sct aed_mtd_cll_msr; char *att_nm_cll_msr=NULL; char *att_val_cll_msr=NULL; if(flg_cll_msr){ att_nm_cll_msr=strdup("cell_measures"); aed_mtd_cll_msr.att_nm=att_nm_cll_msr; aed_mtd_cll_msr.mode=aed_overwrite; aed_mtd_cll_msr.type=NC_CHAR; att_val_cll_msr=(char *)nco_malloc((strlen(area_nm_out)+6L+1L)*nco_typ_lng(aed_mtd_cll_msr.type)); (void)sprintf(att_val_cll_msr,"area: %s",area_nm_out); aed_mtd_cll_msr.sz=strlen(att_val_cll_msr); aed_mtd_cll_msr.val.cp=att_val_cll_msr; } /* !flg_cll_msr */ /* Define new horizontal dimensions before all else */ if(flg_grd_out_1D){ rcd+=nco_def_dim(out_id,col_nm_out,col_nbr_out,&dmn_id_col); } /* !flg_grd_out_1D */ if(flg_grd_out_2D){ rcd+=nco_def_dim(out_id,lat_dmn_nm_out,lat_nbr_out,&dmn_id_lat); rcd+=nco_def_dim(out_id,lon_dmn_nm_out,lon_nbr_out,&dmn_id_lon); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ rcd+=nco_def_dim(out_id,slat_nm_out,slat_nbr_out,&dmn_id_slat); rcd+=nco_def_dim(out_id,slon_nm_out,slon_nbr_out,&dmn_id_slon); } /* !nco_grd_lat_fv */ } /* !flg_grd_out_2D */ /* If dimension has not been defined, define it */ rcd=nco_inq_dimid_flg(out_id,bnd_tm_nm_out,&dmn_id_bnd_tm); if(rcd != NC_NOERR) rcd=nco_def_dim(out_id,bnd_tm_nm_out,bnd_tm_nbr_out,&dmn_id_bnd_tm); /* If dimension has not been defined, define it */ rcd=nco_inq_dimid_flg(out_id,bnd_nm_out,&dmn_id_bnd); if(rcd != NC_NOERR) rcd=nco_def_dim(out_id,bnd_nm_out,bnd_nbr_out,&dmn_id_bnd); char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */ char *var_nm; /* [sng] Variable name */ int *dmn_id_in=NULL; /* [id] Dimension IDs */ int *dmn_id_out=NULL; /* [id] Dimension IDs */ int var_id_in; /* [id] Variable ID */ int var_id_out; /* [id] Variable ID */ nc_type var_typ_out; /* [enm] Variable type to write to disk */ nc_type var_typ_rgr; /* [enm] Variable type used during regridding */ nco_bool PCK_ATT_CPY=True; /* [flg] Copy attributes "scale_factor", "add_offset" */ int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; dfl_lvl=rgr->dfl_lvl; fl_out_fmt=rgr->fl_out_fmt; /* Define new coordinates and grid variables in regridded file */ if(flg_grd_out_1D){ rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&lat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&lon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_col; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_col; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&area_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&frc_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd+=nco_def_var(out_id,msk_nm_out,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_col,&msk_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_msk_out */ } /* !flg_grd_out_1D */ if(flg_grd_out_crv){ dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_lon; rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&area_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&frc_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd+=nco_def_var(out_id,msk_nm_out,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids_out,&msk_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_msk_out */ dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_lon; dmn_ids_out[2]=dmn_id_bnd; rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_3D,dmn_ids_out,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_3D,dmn_ids_out,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_grd_out_crv */ if(flg_grd_out_rct){ rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lat,&lat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lon,&lon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ rcd+=nco_def_var(out_id,slat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slat,&slat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,slat_wgt_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slat,&slat_wgt_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slat_wgt_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,slon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slon,&slon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !nco_grd_lat_fv */ dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_lon; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lat_wgt_nm,crd_typ_out,dmn_nbr_1D,&dmn_id_lat,&lat_wgt_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_wgt_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_lon; if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&area_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&frc_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd+=nco_def_var(out_id,msk_nm_out,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids_out,&msk_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_msk_out */ } /* !flg_grd_out_rct */ /* Add _FillValue to empty destination cells, if requested */ nco_bool flg_add_fll=rgr->flg_add_fll; /* [flg] Add _FillValue to fields with empty destination cells */ nco_bool flg_dst_mpt=False; /* [flg] At least one destination cell is empty */ size_t dst_idx; /* [idx] Index on destination grid */ /* Determine whether any destination cells are, in fact, empty Logic here could be replaced by examining frac_b variable, if we trust input frac_b... ...and we do trust input frac_b since it is already used for renormalization */ if(flg_add_fll){ if(flg_msk_apl){ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(msk_out[dst_idx] == 0) break; if(dst_idx < grd_sz_out) flg_dst_mpt=True; if(flg_dst_mpt && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports at least one destination cell, Fortran (1-based) row index %lu, is empty. User requested (with --msk_apl) that masked cells receive _FillValue, so regridder will ensure that all regridded fields have _FillValue attribute.\n",nco_prg_nm_get(),fnc_nm,dst_idx+1L); }else{ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ /* For each destination cell... */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ /* ...does any weight... */ if(row_dst_adr[lnk_idx] == dst_idx){ /* ...contribute to that cell? */ /* If so, break lnk_idx loop and continue to next iteration of dst_idx loop */ break; } /* !row_dst_adr */ } /* !lnk_idx */ /* If weight loop reached end without a match, then this destination cell is empty */ if(lnk_idx == lnk_nbr){ flg_dst_mpt=True; break; } /* !lnk_idx */ } /* !dst_idx */ if(flg_dst_mpt && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports at least one destination cell, Fortran (1-based) row index %lu, is empty. User requested (with --add_fll) that empty cells receive _FillValue, so regridder will ensure that all regridded fields have _FillValue attribute.\n",nco_prg_nm_get(),fnc_nm,dst_idx+1L); } /* !flg_msk_apl */ } /* !flg_add_fll */ /* Pre-allocate dimension ID and cnt/srt space */ int dmn_nbr_max; /* [nbr] Maximum number of dimensions variable can have in input or output */ int dmn_in_fst; /* [idx] Offset of input- relative to output-dimension due to non-MRV dimension insertion */ int dmn_nbr_rec; /* [nbr] Number of unlimited dimensions */ int *dmn_ids_rec=NULL; /* [id] Unlimited dimension IDs */ rcd+=nco_inq_ndims(in_id,&dmn_nbr_max); dmn_nbr_max++; /* Safety in case regridding adds dimension */ dmn_id_in=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* Identify all record-dimensions in input file */ rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec); if(dmn_nbr_rec > 0){ dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int)); rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec); } /* !dmn_nbr_rec */ int flg_pck; /* [flg] Variable is packed on disk */ nco_bool has_mss_val; /* [flg] Has numeric missing value attribute */ double mss_val_dbl; double mss_val_cmp_dbl; /* Missing value for comparison to double precision values */ /* Define regridded and copied variables in output file */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv_tbl->lst[idx_tbl].flg_mrv=True; trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ var_nm=trv.nm; /* Preserve input type in output type */ var_typ_out=trv.var_typ; /* Demote DP to SP to save space. fxm: missing value type will then be inconsistent if copied without demotion */ //if(trv.var_typ == NC_DOUBLE) var_typ_out=NC_FLOAT; else var_typ_out=trv.var_typ; dmn_nbr_in=trv.nbr_dmn; dmn_nbr_out=trv.nbr_dmn; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid_flg(out_id,var_nm,&var_id_out); /* If variable has not been defined, define it */ if(rcd != NC_NOERR){ if(trv.flg_rgr){ /* Regrid */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); dmn_in_fst=0; rcd=nco_inq_var_packing(in_id,var_id_in,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports variable \"%s\" is packed so results unpredictable. HINT: If regridded values seems weird, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,var_nm); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); /* Is horizontal dimension last, i.e., most-rapidly-varying? */ if(flg_grd_in_1D && !strcmp(dmn_nm,col_nm_in)){ if(dmn_idx != dmn_nbr_in-1){ /* Unstructured input grid has col in non-MRV location (expect this with, e.g., MPAS-O/I native grid dimension-ordering */ (void)fprintf(stdout,"%s: WARNING %s reports unstructured grid spatial coordinate %s is (zero-based) dimension %d of input variable to be regridded %s which has %d dimensions. The NCO regridder does not support unstructured spatial dimensions that are not the last (i.e., most rapidly varying) dimension of an input variable, so results are likely garbage.\nHINT: Re-arrange input file dimensions to place horizontal dimension(s) last with, e.g., \'ncpdq -a time,lev,%s in.nc out.nc\' prior to calling the regridder. E3SM users: If this is an MPAS dataset with a new (unknown to ncremap) dimension, please ask Charlie to add the dimension to the ncremap dimension permutation list.\n",nco_prg_nm_get(),fnc_nm,dmn_nm,dmn_idx,var_nm,dmn_nbr_in,dmn_nm); trv_tbl->lst[idx_tbl].flg_mrv=False; } /* !dmn_idx */ } /* !flg_grd_in_1D */ if(flg_grd_in_2D && (!strcmp(dmn_nm,lat_nm_in) || !strcmp(dmn_nm,lon_nm_in))){ /* Are horizontal dimensions most-rapidly-varying? */ if(dmn_idx != dmn_nbr_in-1 && dmn_idx != dmn_nbr_in-2){ /* NB: Lat/lon input grid has lat/lon in non-MRV location (expect this with, e.g., AIRS L2 grid dimension-ordering */ (void)fprintf(stdout,"%s: WARNING %s reports lat-lon grid spatial coordinate %s is (zero-based) dimension %d of input variable to be regridded %s which has %d dimensions. The NCO regridder does not support rectangular lat-lon dimension(s) that are not the last two (i.e., most rapidly varying) dimensions of an input variable, so results are likely garbage.\nHINT: Re-arrange input file dimensions to place horizontal dimensions last with, e.g., \'ncpdq -a time,lev,lat,lon in.nc out.nc\' prior to calling the regridder.\n",nco_prg_nm_get(),fnc_nm,dmn_nm,dmn_idx,var_nm,dmn_nbr_in); trv_tbl->lst[idx_tbl].flg_mrv=False; } /* !dmn_idx */ } /* !flg_grd_in_2D */ if(flg_grd_out_1D){ if((nco_rgr_typ == nco_rgr_grd_2D_to_1D) && (!strcmp(dmn_nm,lat_nm_in) || !strcmp(dmn_nm,lon_nm_in))){ /* Replace orthogonal horizontal dimensions by unstructured horizontal dimension already defined */ if(!strcmp(dmn_nm,lat_nm_in)){ /* Replace lat with col */ dmn_id_out[dmn_idx]=dmn_id_col; dmn_cnt[dmn_idx]=col_nbr_out; } /* endif lat */ if(!strcmp(dmn_nm,lon_nm_in)){ /* Assume non-MRV dimensions are ordered lat/lon. Replace lat with col. Shift MRV dimensions to left after deleting lon. */ dmn_id_out[dmn_idx]=NC_MIN_INT; dmn_cnt[dmn_idx]=NC_MIN_INT; dmn_nbr_out--; /* Reduce output dimension position of all subsequent input dimensions by one */ if(!trv_tbl->lst[idx_tbl].flg_mrv) dmn_in_fst=-1; } /* endif lon */ }else{ /* Dimension col_nm_in has already been defined as col_nm_out, replicate all other dimensions */ if(!strcmp(dmn_nm,col_nm_in)) rcd=nco_inq_dimid_flg(out_id,col_nm_out,dmn_id_out+dmn_idx); else rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx+dmn_in_fst); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx+dmn_in_fst); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt[dmn_idx+dmn_in_fst]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx+dmn_in_fst],dmn_id_out+dmn_idx+dmn_in_fst); } /* !rcd */ } /* !lat && !lon */ } /* !flg_grd_out_1D */ if(flg_grd_out_2D){ if(nco_rgr_typ == nco_rgr_grd_1D_to_2D && !strcmp(dmn_nm,col_nm_in)){ /* Replace unstructured horizontal dimension by orthogonal horizontal dimensions already defined */ dmn_id_out[dmn_idx]=dmn_id_lat; dmn_id_out[dmn_idx+1]=dmn_id_lon; dmn_cnt[dmn_idx]=lat_nbr_out; dmn_cnt[dmn_idx+1]=lon_nbr_out; dmn_nbr_out++; /* Increase output dimension position of all subsequent input dimensions by one */ if(!trv_tbl->lst[idx_tbl].flg_mrv) dmn_in_fst=1; }else{ /* Dimensions lat/lon_nm_in have already been defined as lat/lon_nm_out, replicate all other dimensions */ if(!strcmp(dmn_nm,lat_nm_in)) rcd=nco_inq_dimid_flg(out_id,lat_dmn_nm_out,dmn_id_out+dmn_idx); else if(!strcmp(dmn_nm,lon_nm_in)) rcd=nco_inq_dimid_flg(out_id,lon_dmn_nm_out,dmn_id_out+dmn_idx); else rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx+dmn_in_fst); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx+dmn_in_fst); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt[dmn_idx+dmn_in_fst]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx+dmn_in_fst],dmn_id_out+dmn_idx+dmn_in_fst); } /* !rcd */ } /* !col */ } /* !1D_to_2D */ } /* !dmn_idx */ }else{ /* !flg_rgr */ /* Replicate non-regridded variables */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx],dmn_id_out+dmn_idx); } /* !rcd */ } /* !dmn_idx */ } /* !flg_rgr */ rcd=nco_def_var(out_id,var_nm,var_typ_out,dmn_nbr_out,dmn_id_out,&var_id_out); /* Duplicate netCDF4 settings when possible */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC){ /* Deflation */ if(dmn_nbr_out > 0){ int dfl_lvl_in; /* [enm] Deflate level [0..9] */ rcd=nco_inq_var_deflate(in_id,var_id_in,&shuffle,&deflate,&dfl_lvl_in); /* Copy original deflation settings */ if(deflate || shuffle) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl_in); /* Overwrite HDF Lempel-Ziv compression level, if requested */ if(dfl_lvl == 0) deflate=(int)False; else deflate=(int)True; /* Turn-off shuffle when uncompressing otherwise chunking requests may fail */ if(dfl_lvl == 0) shuffle=NC_NOSHUFFLE; /* Shuffle never, to my knowledge, increases filesize, so shuffle by default when manually deflating */ if(dfl_lvl >= 0) shuffle=NC_SHUFFLE; if(dfl_lvl >= 0) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl); } /* !dmn_nbr_out */ } /* !NC_FORMAT_NETCDF4 */ (void)nco_att_cpy(in_id,out_id,var_id_in,var_id_out,PCK_ATT_CPY); if(trv.flg_rgr){ aed_mtd_crd.var_nm=var_nm; aed_mtd_crd.id=var_id_out; (void)nco_aed_prc(out_id,var_id_out,aed_mtd_crd); if(flg_cll_msr){ aed_mtd_cll_msr.var_nm=var_nm; aed_mtd_cll_msr.id=var_id_out; (void)nco_aed_prc(out_id,var_id_out,aed_mtd_cll_msr); } /* !flg_cll_msr */ /* 20210602: Ensure all regridded variables have _FillValue if user requested _FillValue in empty cells and there are empty cells */ if(flg_add_fll && flg_dst_mpt){ /* Check for _FillValue here iff user requests non-default behavior */ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,(double *)NULL); if(!has_mss_val){ val_unn mss_val_dfl; /* [] Default _FillValue */ mss_val_dfl=nco_mss_val_dfl_get(var_typ_out); rcd=nco_put_att(out_id,var_id_out,"_FillValue",var_typ_out,1L,(void *)(&mss_val_dfl)); } /* !has_mss_val */ } /* !flg_add_fll */ } /* !flg_rgr */ } /* !rcd */ } /* !var */ } /* !idx_tbl */ /* Free pre-allocated array space */ /* col_nm_in will not otherwise be free'd if it was guessed as usual suspect */ if(col_nm_in != rgr->col_nm_in) col_nm_in=(char *)nco_free(col_nm_in); if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt) dmn_cnt=(long *)nco_free(dmn_cnt); if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec); /* Define new metadata in regridded file */ if(flg_area_out){ rcd=nco_char_att_put(out_id,area_nm_out,"long_name","Solid angle subtended by gridcell"); rcd=nco_char_att_put(out_id,area_nm_out,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm_out,"units","steradian"); if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); att_val=(char *)nco_calloc((strlen(lat_dmn_nm_out)+strlen(lon_dmn_nm_out)+8L),sizeof(char)); (void)sprintf(att_val,"%s, %s: sum",lat_dmn_nm_out,lon_dmn_nm_out); rcd=nco_char_att_put(out_id,area_nm_out,"cell_mathods",att_val); if(att_val) att_val=(char *)nco_free(att_val); } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd=nco_char_att_put(out_id,frc_nm_out,"long_name","Fraction of gridcell valid on destination grid"); if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); att_val=(char *)nco_calloc((strlen(lat_dmn_nm_out)+strlen(lon_dmn_nm_out)+8L),sizeof(char)); (void)sprintf(att_val,"%s, %s: sum",lat_dmn_nm_out,lon_dmn_nm_out); rcd=nco_char_att_put(out_id,frc_nm_out,"cell_mathods",att_val); } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd=nco_char_att_put(out_id,msk_nm_out,"long_name","Mask (0 = invalid destination, 1 = valid destination)"); if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); } /* !flg_msk_out */ rcd=nco_char_att_put(out_id,lat_nm_out,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lat_nm_out,"standard_name","latitude"); rcd=nco_char_att_put(out_id,lat_nm_out,"units","degrees_north"); // 20200205: Attach "axis" attribute to single-dimensional geospatial coordinates not to two-dimensional coordinate variables per CF Conventions section 5.2 if(!flg_grd_out_crv) rcd=nco_char_att_put(out_id,lat_nm_out,"axis","Y"); double vld_min; vld_min=-90.0; att_nm=strdup("valid_min"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lat_nm_out; aed_mtd.id=lat_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_min; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lat_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); double vld_max; vld_max=90.0; att_nm=strdup("valid_max"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lat_nm_out; aed_mtd.id=lat_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_max; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lat_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,lat_nm_out,"bounds",lat_bnd_nm_out); if(flg_grd_out_rct) att_val=strdup("Gridcell latitude interfaces"); else att_val=strdup("Gridcell latitude vertices"); rcd=nco_char_att_put(out_id,lat_bnd_nm_out,"long_name",att_val); rcd=nco_char_att_put(out_id,lon_nm_out,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lon_nm_out,"standard_name","longitude"); rcd=nco_char_att_put(out_id,lon_nm_out,"units","degrees_east"); // 20200205: Attach "axis" attribute to single-dimensional geospatial coordinates not to two-dimensional coordinate variables per CF Conventions section 5.2 if(!flg_grd_out_crv) rcd=nco_char_att_put(out_id,lon_nm_out,"axis","X"); /* UGRID Conventions define "topology" and "modulo" attributes https://github.com/ugrid-conventions/ugrid-conventions My understanding is these should only be utilized for global grids */ if(nco_rgr_typ == nco_rgr_grd_2D_to_2D){ /* fxm: change this to check whether lon_spn >= 360 or nco_grd_xtn == global */ att_nm=strdup("modulo"); double modulo=360.0; aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&modulo; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,lon_nm_out,"topology","circular"); } /* !nco_rgr_grd_2D_to_2D */ if(lon_ctr_out[0] >= 0.0) vld_min=0.0; else vld_min=-180.0; att_nm=strdup("valid_min"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_min; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); if(lon_ctr_out[0] >= 0.0) vld_max=360.0; else vld_max=180.0; att_nm=strdup("valid_max"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_max; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,lon_nm_out,"bounds",lon_bnd_nm_out); att_nm=strdup("bounds"); att_val=lon_bnd_nm_out; aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=strlen(att_val); aed_mtd.type=NC_CHAR; aed_mtd.val.cp=att_val; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); if(flg_grd_out_rct) att_val=strdup("Gridcell longitude interfaces"); else att_val=strdup("Gridcell longitude vertices"); rcd=nco_char_att_put(out_id,lon_bnd_nm_out,"long_name",att_val); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ rcd=nco_char_att_put(out_id,slat_nm_out,"long_name","Latitude for staggered FV grid"); rcd=nco_char_att_put(out_id,slat_nm_out,"units","degrees_north"); rcd=nco_char_att_put(out_id,slat_wgt_nm_out,"long_name","Latitude weights for staggered FV grid"); rcd=nco_char_att_put(out_id,slon_nm_out,"long_name","Longitude for staggered FV grid"); rcd=nco_char_att_put(out_id,slon_nm_out,"units","degrees_east"); } /* !nco_grd_lat_fv */ if(flg_grd_out_rct) rcd=nco_char_att_put(out_id,lat_wgt_nm,"long_name","Latitude quadrature weights (normalized to sum to 2.0 on global grids)"); rcd=nco_char_att_put(out_id,NULL,"map_file",fl_in); rcd=nco_char_att_put(out_id,NULL,"input_file",rgr->fl_in); /* Annotate persistent metadata that should appear last in attribute list */ if(flg_grd_out_1D){ if(flg_area_out) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); if(flg_frc_out_wrt) rcd=nco_char_att_put(out_id,frc_nm_out,att_nm_crd,att_val_crd); if(flg_msk_out) rcd=nco_char_att_put(out_id,msk_nm_out,att_nm_crd,att_val_crd); } /* !flg_grd_out_1D */ /* Persistent metadata */ if(att_nm_crd) att_nm_crd=(char *)nco_free(att_nm_crd); if(att_val_crd) att_val_crd=(char *)nco_free(att_val_crd); if(flg_cll_msr){ if(att_nm_cll_msr) att_nm_cll_msr=(char *)nco_free(att_nm_cll_msr); if(att_val_cll_msr) att_val_cll_msr=(char *)nco_free(att_val_cll_msr); } /* !flg_cll_msr */ if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ if(slat_nm_out) slat_nm_out=(char *)nco_free(slat_nm_out); if(slat_wgt_nm_out) slat_wgt_nm_out=(char *)nco_free(slat_wgt_nm_out); if(slon_nm_out) slon_nm_out=(char *)nco_free(slon_nm_out); } /* !nco_grd_lat_fv */ /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Begin data mode */ (void)nco_enddef(out_id); /* Write new coordinates and variables to regridded file */ if(flg_grd_out_1D){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out); dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=col_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_bnd_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=col_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_bnd_out,crd_typ_out); if(flg_area_out){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out); } /* !flg_area_out */ if(flg_msk_out){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,(nc_type)NC_INT); } /* !flg_msk_out */ } /* !flg_grd_out_1D */ if(flg_grd_out_crv){ dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=lon_nbr_out; (void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out); (void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out); if(flg_area_out){ (void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out); } /* !flg_area_out */ if(flg_frc_out_wrt){ (void)nco_put_vara(out_id,frc_out_id,dmn_srt_out,dmn_cnt_tuo,frc_out,crd_typ_out); } /* !flg_frc_out_wrt */ if(flg_msk_out){ (void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,(nc_type)NC_INT); } /* !flg_msk_out */ dmn_srt_out[0]=dmn_srt_out[1]=dmn_srt_out[2]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=lon_nbr_out; dmn_cnt_tuo[2]=bnd_nbr_out; /* NB: 20160803 Semantically confusing---curvilinear grids must write *_crn_out data into *_bnd_out arrays */ (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_crn_out,crd_typ_out); (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_crn_out,crd_typ_out); } /* !flg_grd_out_crv */ if(flg_grd_out_rct){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=lat_nbr_out; (void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out); dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=lon_nbr_out; (void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=slat_nbr_out; (void)nco_put_vara(out_id,slat_out_id,dmn_srt_out,dmn_cnt_tuo,slat_ctr_out,crd_typ_out); (void)nco_put_vara(out_id,slat_wgt_id,dmn_srt_out,dmn_cnt_tuo,slat_wgt_out,crd_typ_out); dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=slon_nbr_out; (void)nco_put_vara(out_id,slon_out_id,dmn_srt_out,dmn_cnt_tuo,slon_ctr_out,crd_typ_out); if(slat_ctr_out) slat_ctr_out=(double *)nco_free(slat_ctr_out); if(slat_wgt_out) slat_wgt_out=(double *)nco_free(slat_wgt_out); if(slon_ctr_out) slon_ctr_out=(double *)nco_free(slon_ctr_out); } /* !nco_grd_lat_fv */ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=lat_nbr_out; (void)nco_put_vara(out_id,lat_wgt_id,dmn_srt_out,dmn_cnt_tuo,lat_wgt_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_bnd_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lon_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_bnd_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=lon_nbr_out; if(flg_area_out){ (void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out); } /* !flg_area_out */ if(flg_frc_out_wrt){ (void)nco_put_vara(out_id,frc_out_id,dmn_srt_out,dmn_cnt_tuo,frc_out,crd_typ_out); } /* !flg_frc_out_wrt */ if(flg_msk_out){ (void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,(nc_type)NC_INT); } /* !flg_msk_out */ } /* !flg_grd_out_rct */ /* Regrid or copy variable values */ const double wgt_vld_thr=rgr->wgt_vld_thr; /* [frc] Weight threshold for valid destination value */ const nco_bool flg_rnr=rgr->flg_rnr; /* [flg] Renormalize destination values by valid area */ char *sgs_frc_nm=NULL; char *sgs_msk_nm=NULL; double *sgs_frc_in=NULL; double *sgs_frc_out=NULL; double *var_val_dbl_in=NULL; double *var_val_dbl_out=NULL; double *wgt_vld_out=NULL; double var_val_crr; int *tally=NULL; /* [nbr] Number of valid (non-missing) values */ int lvl_idx; /* [idx] Level index */ int lvl_nbr; /* [nbr] Number of levels */ int thr_idx; /* [idx] Thread index */ size_t idx_in; /* [idx] Input grid index */ size_t idx_out; /* [idx] Output grid index */ size_t var_sz_in; /* [nbr] Number of elements in variable (will be self-multiplied) */ size_t var_sz_out; /* [nbr] Number of elements in variable (will be self-multiplied) */ size_t val_in_fst; /* [nbr] Number of elements by which current N-D slab input values are offset from origin */ size_t val_out_fst; /* [nbr] Number of elements by which current N-D slab output values are offset from origin */ /* 20190322: Prior to entering OpenMP loop, collect specified SGS information */ const double sgs_nrm=rgr->sgs_nrm; /* [frc] Sub-gridscale normalization */ if(rgr->sgs_frc_nm){ /* Normalization test: fl_in=20181217.CNTL_CNPCTC1850_OIBGC.ne30_oECv3.edison.clm2.h0.2000-12.nc /bin/cp -f ${DATA}/hdf/${fl_in} ~/elm_raw.nc ncremap -P sgs -v FSDS,TBOT,GPP -a aave -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/cmip6_180x360_scrip.20181001.nc ~/elm_raw.nc ~/elm_sgs.nc # Original SGS method ncks -A -v grid_area ${DATA}/grids/ne30np4_pentagons.091226.nc ~/elm_sgs.nc ncremap -P gsg -v FSDS,TBOT,GPP -m ${DATA}/maps/map_ne30np4_to_cmip6_180x360_aave.20181001.nc ~/elm_raw.nc ~/elm_gsg.nc # New SGS method */ if(rgr->sgs_msk_nm) sgs_msk_nm=(char *)strdup(rgr->sgs_msk_nm); sgs_frc_nm=(char *)strdup(rgr->sgs_frc_nm); var_nm=sgs_frc_nm; var_typ_rgr=NC_DOUBLE; /* NB: Regrid in double precision */ var_typ_out=NC_DOUBLE; /* NB: sgs_frc_out must be double precision */ var_sz_in=1L; /* Compute from scratch to be sure it matches grd_sz_in */ var_sz_out=grd_sz_out; /* Assume this holds */ char *fl_sgs=NULL; /* [sng] External sub-gridscale file name */ int sgs_id; /* [id] netCDF file ID for external sub-gridscale file */ sgs_id=in_id; if((rcd=nco_inq_varid_flg(sgs_id,var_nm,&var_id_in)) != NC_NOERR){ /* If sgs_frc_nm is not in input file then search for it in external area file */ #ifdef WIN32 const char sls_chr='\\'; /* [chr] Slash character */ #else /* !WIN32 */ const char sls_chr='/'; /* [chr] Slash character */ #endif /* !WIN32 */ char *sls_ptr; /* [sng] Pointer to last slash character (' ') */ sls_ptr=strrchr(var_nm,sls_chr); if(!sls_ptr){ (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unable to find sgs_frc_nm = %s in current input file, and unable to identify filename (ending with slash '/' or backslash '\\', as appropriate) portion of that string to serve as local external file for sgs_frc input, exiting\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm); nco_exit(EXIT_FAILURE); } /* !sls_ptr */ sgs_frc_nm=(char *)strdup(sls_ptr+1L); /* Copy variable-name portion of string */ *sls_ptr='\0'; /* NULL-terminate filename */ fl_sgs=(char *)strdup(var_nm); var_nm=sgs_frc_nm; /* NB: too tricky? */ rcd=nco_open(fl_sgs,NC_NOWRITE,&sgs_id); if((rcd=nco_inq_varid_flg(sgs_id,var_nm,&var_id_in)) != NC_NOERR){ (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unable to find sgs_frc_nm = \"%s\" in local external file %s, exiting\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm,fl_sgs); nco_exit(EXIT_FAILURE); } /* !rcd */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s obtaining sgs_frc = %s from file %s\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm,fl_sgs); } /* !rcd */ rcd=nco_inq_varndims(sgs_id,var_id_in,&dmn_nbr_in); dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out; dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */ dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); rcd=nco_inq_vardimid(sgs_id,var_id_in,dmn_id_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(sgs_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx); var_sz_in*=dmn_cnt_in[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ if(var_sz_in != grd_sz_in){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") requires that sgs_frc = %s be same size as spatial grid but var_sz_in = %lu != %lu = grd_sz_in\n",nco_prg_nm_get(),fnc_nm,var_nm,var_sz_in,grd_sz_in); nco_exit(EXIT_FAILURE); } /* !var_sz_in */ /* Missing value setup (NB: ELM landfrac has _FillValue and is _FillValue where masked */ has_mss_val=nco_mss_val_get_dbl(sgs_id,var_id_in,&mss_val_dbl); if(has_mss_val) mss_val_cmp_dbl=mss_val_dbl; else mss_val_cmp_dbl=NC_FILL_DOUBLE; sgs_frc_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() sgs_frc_in value buffer"); rcd=nco_get_vara(sgs_id,var_id_in,dmn_srt,dmn_cnt_in,sgs_frc_in,var_typ_rgr); /* If sgs_frc comes from external local file, close it now */ if(fl_sgs){ rcd=nco_close(sgs_id); fl_sgs=(char *)nco_free(fl_sgs); } /* !fl_sgs */ /* Initialize output */ sgs_frc_out=(double *)nco_malloc_dbg(grd_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() sgs_frc_out value buffer"); /* Initialize and regrid sgs_frc_out 20190907: sgs_frc_in (landfrac) is _FillValue (1.0e36) for ELM datasets in all masked gridcells, and is always positive definite (never zero) in all unmasked gridcells because it it a true area. ELM sgs_frc_out is always positive definite gridcell area everywhere, with no missing values and no zero values. 20190910: MPAS-Seaice datasets have no mask, and sgs_frc_in (timeMonthly_avg_iceAreaCell) is never (ncatted-appended) _FillValue (-9.99999979021477e+33) and is usually zero because it is time-mean area-fraction of sea ice which only exists in polar regions. MPAS-Seaice sgs_frc_out is zero in all gridcells without sea-ice. Regardless of input source, following blocks guarantee that sgs_frc_out is defined everywhere, is never a missing value (sgs_frc_out is zero where sgs_frc_in may have been _FillValue), and is always safe to multiply and normalize by sgs_frc_out in main regridding loop */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) sgs_frc_out[dst_idx]=0.0; for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) if((var_val_crr=sgs_frc_in[col_src_adr[lnk_idx]]) != mss_val_cmp_dbl) sgs_frc_out[row_dst_adr[lnk_idx]]+=var_val_crr*wgt_raw[lnk_idx]; /* Sanity check sgs_frc_out */ if(nco_dbg_lvl_get() >= nco_dbg_fl){ /* 20190326: sgs_frc expressed as a fraction must never exceed sgs_nrm CICE expresses sgs_frc (aice) in percent, i.e., sgs_nrm=100.0 Sum total value of sgs_frc (as opposed to gridcell_area) depends on grid resolution */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ /* 20190907: Approximate comparison because rounding causes frequent exceedances of sgs_nrm by epsilon ~ 1.0e-15 */ if((float)sgs_frc_out[dst_idx] > sgs_nrm) (void)fprintf(stdout,"%s: INFO %s reports sgs_frc_out[%lu] = %19.15f > %g = sgs_nrm\n",nco_prg_nm_get(),fnc_nm,dst_idx,sgs_frc_out[dst_idx],sgs_nrm); } /* !dst_idx */ } /* !dbg */ // for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ // (void)fprintf(stdout,"%s: INFO %s reports sgs_frc_out[%lu] = %19.15f\n",nco_prg_nm_get(),fnc_nm,dst_idx,sgs_frc_out[dst_idx]); // } /* !dst_idx */ if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); } /* !sgs_frc_nm */ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"Regridding progress: # means regridded, ~ means copied\n"); /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped as shared in parallel clause */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ /* OpenMP notes: default(none): GCC9.x does not accept this (https://github.com/nco/nco/issues/114) perhaps because of fp_stdout/stderr? Intel accepts it. firstprivate(): Pointers that could be inadvertently free()'d if they lost their NULL-initialization private(): Almost everything else shared(): uggh...shared clause depends on both compiler and compiler-version 1. Const variables (e.g., flg_rnr,fnc_nm,wgt_vld_thr) are default shared for gcc >= 4.9.2, 2. fnc_nm (only!) must be explicit shared for g++ 4.6.3 (travis) 3. flg_rnr,fnc_nm,wgt_vld_thr must be explicit shared for icc 13.1.3 (rhea) 4. assert() cannot be used in OpenMP blocks 5. Good discussion of "const" variables in shared() clause here http://jakascorner.com/blog/2016/07/omp-default-none-and-const.html 20200221: fxm Revisit default(none) in light of above article */ #ifdef __GNUG__ # define GCC_LIB_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ ) # if GCC_LIB_VERSION < 490 # define GXX_OLD_OPENMP_SHARED_TREATMENT 1 # endif /* 480 */ # if GCC_LIB_VERSION >= 900 # define GXX_WITH_OPENMP5_GPU_SUPPORT 1 # endif /* 900 */ #endif /* !__GNUC__ */ #if defined( __INTEL_COMPILER) #else /* !__INTEL_COMPILER */ # ifdef GXX_OLD_OPENMP_SHARED_TREATMENT # else /* !old g++ */ # if defined(GXX_WITH_OPENMP5_GPU_SUPPORT) && 0 # else # endif /* !GCC >= 9.0 */ # endif /* !GCC < 4.9 */ #endif /* !__INTEL_COMPILER */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; thr_idx=omp_get_thread_num(); in_id=trv_tbl->in_id_arr[thr_idx]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s%s ",trv.flg_rgr ? "#" : "~",trv.nm); if(trv.flg_rgr){ /* Regrid variable */ var_nm=trv.nm; var_typ_rgr=NC_DOUBLE; /* NB: Perform regridding in double precision */ var_typ_out=trv.var_typ; /* NB: Output type in file is same as input type */ var_sz_in=1L; var_sz_out=1L; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid(out_id,var_nm,&var_id_out); rcd=nco_inq_varndims(in_id,var_id_in,&dmn_nbr_in); rcd=nco_inq_varndims(out_id,var_id_out,&dmn_nbr_out); dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out; dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */ dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); rcd=nco_inq_vardimid(out_id,var_id_out,dmn_id_out); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx); var_sz_in*=dmn_cnt_in[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ rcd=nco_inq_dimlen(out_id,dmn_id_out[dmn_idx],dmn_cnt_out+dmn_idx); if(dmn_cnt_out[dmn_idx] == 0L){ /* No records have been written, so overwrite zero output record size with input record size */ char dmn_rec_nm[NC_MAX_NAME]; /* [sng] Record dimension name */ int dmn_rec_id_in; rcd=nco_inq_dimname(out_id,dmn_id_out[dmn_idx],dmn_rec_nm); rcd=nco_inq_dimid(in_id,dmn_rec_nm,&dmn_rec_id_in); rcd=nco_inq_dimlen(in_id,dmn_rec_id_in,dmn_cnt_out+dmn_idx); } /* !dmn_cnt_out */ var_sz_out*=dmn_cnt_out[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ /* Compute number and size of non-lat/lon or non-col dimensions (e.g., level, time, species, wavelength) Denote their convolution by level or 'lvl' for shorthand There are lvl_nbr elements for each lat/lon or col position 20151011: Until today assume lat/lon and col are most-rapidly varying dimensions 20151011: Until today lvl_nbr missed last non-spatial dimension for 1D output */ lvl_nbr=1; /* Simple prescription of lvl_nbr works when horizontal dimension(s) is/are MRV */ for(dmn_idx=0;dmn_idx<dmn_nbr_out-dmn_nbr_hrz_crd;dmn_idx++) lvl_nbr*=dmn_cnt_out[dmn_idx]; /* Determining whether an individual field _uses_ missing values is important because memory requirements of next four malloc's (i.e., exclusive of wgt_raw) can sum to ~7*sizeof(uncompressed var) for NC_FLOAT and ~3.5*sizeof(uncompressed var) for NC_DOUBLE. Traditionally has_mss_val answers "does this variable _have_ and explicit missing value?" As of 20210909, we expand the meaning of has_mss_val, though only in nco_rgr_wgt() Now has_mss_val means does the variable use the explicitly defined missing value, or, failing that, does it use the implicitly defined missing value? Only variables that _use_ a missing value need tally and wgt_vld_out arrays mss_val_dbl is what nco_mss_val_get_dbl() returns---its meaning has not changed However, it is no longer intended to be used Instead we create mss_val_cmp_dbl, a more general value for comparison and assignment */ var_val_dbl_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() input value buffer"); var_val_dbl_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output value buffer"); /* Obtain input variable */ rcd=nco_get_vara(in_id,var_id_in,dmn_srt,dmn_cnt_in,var_val_dbl_in,var_typ_rgr); /* 20210909: Begin new missing value treatment */ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl); /* NB: mss_val_cmp_dbl must be defined since it is now always used by regridder (even when has_mss_val is False) For instance flg_msk_apl block, below, uses mss_val_cmp_dbl for masked fields And test for _usage_ of missing values, below, necessarily compares to mss_val_cmp_dbl If missing value is not explicitly declared, use default missing value */ if(has_mss_val) mss_val_cmp_dbl=mss_val_dbl; else mss_val_cmp_dbl=NC_FILL_DOUBLE; /* Override float/double value with appropriate default missing value for integers */ if(!has_mss_val){ switch(var_typ_out){ case NC_BYTE: mss_val_cmp_dbl=NC_FILL_BYTE; break; case NC_CHAR: mss_val_cmp_dbl=NC_FILL_CHAR; break; case NC_SHORT: mss_val_cmp_dbl=NC_FILL_SHORT; break; case NC_INT: mss_val_cmp_dbl=NC_FILL_INT; break; case NC_FLOAT: mss_val_cmp_dbl=NC_FILL_FLOAT; break; case NC_DOUBLE: mss_val_cmp_dbl=NC_FILL_DOUBLE; break; case NC_UBYTE: mss_val_cmp_dbl=NC_FILL_UBYTE; break; case NC_USHORT: mss_val_cmp_dbl=NC_FILL_USHORT; break; case NC_UINT: mss_val_cmp_dbl=NC_FILL_UINT; break; /* 20210909: Implicit type conversion generates warnings: 'long long' to 'double' changes value from -9223372036854775806 to -9223372036854775808 'unsigned long long' to 'double' changes value from 18446744073709551614 to 18446744073709551616 Warnings can be fixed with -Wimplicit-const-int-float-conversion */ case NC_INT64: mss_val_cmp_dbl=NC_FILL_INT64; break; case NC_UINT64: mss_val_cmp_dbl=NC_FILL_UINT64; break; case NC_STRING: default: nco_dfl_case_nc_type_err(); break; } /* !var_typ_in */ } /* !has_mss_val */ /* Re-initialize Boolean to True and override with False if variable _uses_ missing values */ has_mss_val=True; for(idx_in=0;idx_in<var_sz_in;idx_in++){ if(var_val_dbl_in[idx_in] == mss_val_cmp_dbl) break; } /* !idx_in */ /* If neither implicit nor explicit missing value is present, treat all values as valid */ if(idx_in == var_sz_in) has_mss_val=False; /* 20210909: End new missing value treatment */ /* Memory allocation that depends on _FillValue and input variable contents */ if(has_mss_val) tally=(int *)nco_malloc_dbg(var_sz_out*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() tally buffer"); if(has_mss_val && flg_rnr) wgt_vld_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output renormalization weight buffer"); /* Initialize output */ (void)memset(var_val_dbl_out,0,var_sz_out*nco_typ_lng(var_typ_rgr)); if(has_mss_val) (void)memset(tally,0,var_sz_out*nco_typ_lng(NC_INT)); if(wgt_vld_out) (void)memset(wgt_vld_out,0,var_sz_out*nco_typ_lng(var_typ_rgr)); /* 20150914: Intensive variables require normalization, extensive do not Intensive variables (temperature, wind speed, mixing ratio) do not depend on gridcell boundaries Extensive variables (population, counts, numbers of things) depend on gridcell boundaries Extensive variables are the exception in models, yet are commonly used for sampling information, e.g., number of photons, number of overpasses Pass extensive variable list to NCO with, e.g., --xtn=TSurfStd_ct,... 20190420: Remove languishing, unfinished intensive variable code */ clock_t tm_srt; /* [us] Microseconds at start */ clock_t tm_end; /* [us] Microseconds at end */ float tm_drn; /* [s] Seconds elapsed */ if(nco_dbg_lvl_get() >= nco_dbg_var) tm_srt=clock(); /* This first block is for "normal" variables without sub-gridscale fractions */ if(!sgs_frc_out){ /* Apply weights */ if(!has_mss_val){ if(lvl_nbr == 1){ /* Weight single-level fields without missing values */ #ifdef ENABLE_GPU #else /* !ENABLE_GPU */ # if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 ) # endif /* !__GNUC__ */ #endif /* !ENABLE_GPU */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) var_val_dbl_out[row_dst_adr[lnk_idx]]+=var_val_dbl_in[col_src_adr[lnk_idx]]*wgt_raw[lnk_idx]; }else{ val_in_fst=0L; val_out_fst=0L; /* Weight multi-level fields without missing values */ #ifdef ENABLE_GPU #endif /* !ENABLE_GPU */ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ //if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(fp_stdout,"%s lvl_idx = %d val_in_fst = %li, val_out_fst = %li\n",trv.nm,lvl_idx,val_in_fst,val_out_fst); #ifdef ENABLE_GPU #else /* !ENABLE_GPU */ # if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 ) # endif /* !__GNUC__ */ #endif /* !ENABLE_GPU */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) var_val_dbl_out[row_dst_adr[lnk_idx]+val_out_fst]+=var_val_dbl_in[col_src_adr[lnk_idx]+val_in_fst]*wgt_raw[lnk_idx]; val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ }else{ /* has_mss_val */ if(lvl_nbr == 1){ /* Weight single-level fields with missing values */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]; idx_out=row_dst_adr[lnk_idx]; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_cmp_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]; if(wgt_vld_out) wgt_vld_out[idx_out]+=wgt_raw[lnk_idx]; tally[idx_out]++; } /* !mss_val_cmp_dbl */ } /* !lnk_idx */ }else{ /* lvl_nbr > 1 */ val_in_fst=0L; val_out_fst=0L; /* Weight multi-level fields with missing values */ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]+val_in_fst; idx_out=row_dst_adr[lnk_idx]+val_out_fst; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_cmp_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]; if(wgt_vld_out) wgt_vld_out[idx_out]+=wgt_raw[lnk_idx]; tally[idx_out]++; } /* !mss_val_cmp_dbl */ } /* !lnk_idx */ val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ } /* !has_mss_val */ if(!has_mss_val){ /* frc_dst = frc_out = dst_frac = frac_b contains non-unity elements and normalization type is "destarea" or "dstarea" or "none" When this occurs for conservative remapping, follow "destarea" normalization procedure See SCRIP manual p. 11 and http://www.earthsystemmodeling.org/esmf_releases/public/last, specifically http://www.earthsystemmodeling.org/esmf_releases/public/last/ESMF_refdoc/node3.html#SECTION03029000000000000000 "frac_a: When a conservative regridding method is used, this contains the fraction of each source cell that participated in the regridding. When a non-conservative regridding method is used, this array is set to 0.0. frac_b: When a conservative regridding method is used, this contains the fraction of each destination cell that participated in the regridding. When a non-conservative regridding method is used, this array is set to 1.0 where the point participated in the regridding (i.e. was within the unmasked source grid), and 0.0 otherwise. If the first-order conservative interpolation method is specified ("-m conserve") then the destination field may need to be adjusted by the destination fraction (frac_b). This should be done if the normalization type is ``dstarea'' (sic, really "destarea") and if the destination grid extends outside the unmasked source grid. If it isn't known if the destination extends outside the source, then it doesn't hurt to apply the destination fraction. (If it doesn't extend outside, then the fraction will be 1.0 everywhere anyway.) The following code shows how to adjust an already interpolated destination field (dst_field) by the destination fraction. The variables n_b, and frac_b are from the weight file: ! Adjust destination field by fraction do i=1, n_b if (frac_b(i) .ne. 0.0) then dst_field(i)=dst_field(i)/frac_b(i) endif enddo" NB: Non-conservative interpolation methods (e.g., bilinear) should NOT apply this normalization (theoretically there is no danger in doing so because frc_out == 1 always for all gridcells that participate in bilinear remapping and frc_out == 0 otherwise) NCO's renormalization procedure below is similar to the ESMF-recommended procedure above. However, users can control NCO renormalization with, e.g., --rnr_thr=0.1, or override it completely with --rnr_thr=none. Moreover, frac_b == frc_dst is determined solely by solely by gridcell binary mask overlaps during weight generation. It is time-invariant and 2D. Missing values (e.g., AOD) can vary in time and can be 3D (or N-D) and so can wgt_vld_out. Hence NCO renormalization is more flexible. flg_frc_nrm (i.e., ESMF-recommended) normalization makes fields pretty for graphics, yet is non-conservative because e.g., MPAS Ocean gridcells projected onto global uniform grids would have their SSTs normalized for prettiness on coastal gridpoints, which is inherently non-conservative. 20190912: Make "ESMF renormalization" of fields without missing values (i.e., "destarea") opt-in rather than default "destarea" and frac_b = frc_dst together set flg_frc_nrm Formerly flg_frc_nrm triggered ESMF renormalization by default Now flg_frc_nrm and user-explicitly-set --rnr_thr to [0.0,1.0] must both be true to trigger it This keep conservative maps conservative by default NB: This "ESMF renormalization" normalizes by frac_b == frc_dst (not by wgt_vld_out) regardless of rnr_thr 20151018: Avoid double-normalizing by only executing fractional normalization (flg_frc_nrm) block when !has_mss_val, and valid area normalization when has_mss_val */ if(flg_frc_nrm){ /* Only renormalize when frac_b < 1.0 (because frac_b == 1.0 does nothing) */ if(flg_rnr){ /* 20190912: Only renormalize when user explicitly requests it (because renormalization is non-conservative). Prior to today, renormalization was by default, henceforth it is opt-in. */ if(lvl_nbr == 1){ /* Fractionally renormalize single-level fields without missing values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=frc_out[dst_idx]; }else{ /* Fractionally renormalize multi-level fields without missing values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ if(frc_out[dst_idx] != 0.0){ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ var_val_dbl_out[dst_idx+lvl_idx*grd_sz_out]/=frc_out[dst_idx]; } /* !lvl_idx */ } /* !frc_out */ } /* !dst_idx */ } /* lvl_nbr > 1 */ } /* !flg_rnr */ } /* !flg_frc_nrm */ } /* !has_mss_val */ if(has_mss_val){ /* NCL and ESMF treatment of weights and missing values described at https://www.ncl.ucar.edu/Applications/ESMF.shtml#WeightsAndMasking http://earthsystemmodeling.org/esmf_releases/non_public/ESMF_6_1_1/ESMF_refdoc/node5.html#SECTION05012600000000000000 NCO implements one of two procedures: "conservative" or "renormalized" The "conservative" algorithm uses all valid data from the input grid on the output grid Destination cells receive the weighted valid values of the source cells This is conservative because the global integrals of the source and destination fields are equal The "renormalized" algorithm divides the destination value by the sum of the valid weights This returns "reasonable" values, i.e., the mean of the valid input values However, renormalization is equivalent to extrapolating valid data to missing regions Hence the input and output integrals are unequal and the regridding is not conservative */ /* In fields with missing values, destination cells with no accumulated weight are missing value */ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(!tally[dst_idx]) var_val_dbl_out[dst_idx]=mss_val_cmp_dbl; if(flg_rnr){ // if(nco_dbg_lvl_get() >= nco_dbg_quiet) (void)fprintf(fp_stdout,"%s: DEBUG renormalization for %s uses flg_rnr block\n",nco_prg_nm_get(),var_nm); if(wgt_vld_thr == 0.0){ /* Renormalize cells with no threshold by valid accumulated weight */ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(tally[dst_idx]) var_val_dbl_out[dst_idx]/=wgt_vld_out[dst_idx]; }else{ /* Renormalize cells with threshold by valid accumulated weight if weight exceeds threshold */ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(wgt_vld_out[dst_idx] >= wgt_vld_thr){var_val_dbl_out[dst_idx]/=wgt_vld_out[dst_idx];}else{var_val_dbl_out[dst_idx]=mss_val_cmp_dbl;} } /* !wgt_vld_thr */ } /* !flg_rnr */ } /* !has_mss_val */ } /* !sgs_frc_out */ /* Variables with sub-gridscale fractions require "double-weighting" and normalization */ if(sgs_frc_out){ if(!strcmp(var_nm,sgs_frc_nm)){ /* Copy shared variable sgs_frc_out that was regridded before OpenMP loop 20190911: Reasons to copy sgs_frc_out into sgs_frc_nm data include speed, consistency, and well-definedness of sgs_frc_out. One reason to regrid sgs_frc_nm here is consistency with original, raw dataset: ELM landfrac is masked so regridding it here (rather than using sgs_frc_out) would produce a regridded dataset more identical to raw ELM output. The same can be said for CICE (I think). MPAS cellMask and timeMonthly_avg_iceAreaCell are not masked, and so should produce the same values as sgs_frc_out if regridded here. */ memcpy(var_val_dbl_out,sgs_frc_out,grd_sz_out*nco_typ_lng(var_typ_rgr)); }else if(sgs_msk_nm && !strcmp(var_nm,sgs_msk_nm)){ /* Compute binary mask directly from shared sgs_frc_out (guaranteed to be all valid values) */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]=1.0; }else{ /* !sgs_msk_nm */ /* "Double-weight" all other sub-gridscale input values by sgs_frc_in and overlap weight, normalize by sgs_frc_out */ if(!has_mss_val){ if(lvl_nbr == 1){ /* SGS-regrid single-level fields without missing values */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) var_val_dbl_out[row_dst_adr[lnk_idx]]+=var_val_dbl_in[col_src_adr[lnk_idx]]*wgt_raw[lnk_idx]*sgs_frc_in[col_src_adr[lnk_idx]]; /* NB: MPAS-Seaice dataset sgs_frc_out is usually zero in non-polar regions */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=sgs_frc_out[dst_idx]; }else{ /* lvl_nbr > 1 */ /* SGS-regrid multi-level fields without missing values */ val_in_fst=0L; val_out_fst=0L; for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]; idx_out=row_dst_adr[lnk_idx]; var_val_dbl_out[idx_out+val_out_fst]+=var_val_dbl_in[idx_in+val_in_fst]*wgt_raw[lnk_idx]*sgs_frc_in[idx_in]; } /* !lnk_idx */ /* Normalize current level values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx+val_out_fst]/=sgs_frc_out[dst_idx]; val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ }else{ /* !has_mss_val */ if(lvl_nbr == 1){ /* SGS-regrid single-level fields with missing values */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]; idx_out=row_dst_adr[lnk_idx]; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_cmp_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]*sgs_frc_in[idx_in]; tally[idx_out]++; } /* !mss_val_cmp_dbl */ } /* !lnk_idx */ /* NB: Normalization clause is complex to support sgs_frc_out from both ELM and MPAS-Seaice */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(!tally[dst_idx]){var_val_dbl_out[dst_idx]=mss_val_cmp_dbl;}else{if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=sgs_frc_out[dst_idx];} }else{ /* lvl_nbr > 1 */ /* SGS-regrid multi-level fields with missing values */ val_in_fst=0L; val_out_fst=0L; for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]+val_in_fst; idx_out=row_dst_adr[lnk_idx]+val_out_fst; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_cmp_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]*sgs_frc_in[col_src_adr[lnk_idx]]; tally[idx_out]++; } /* !mss_val_cmp_dbl */ } /* !lnk_idx */ /* Normalize current level values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ idx_out=dst_idx+val_out_fst; if(!tally[idx_out]){var_val_dbl_out[idx_out]=mss_val_cmp_dbl;}else{if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[idx_out]/=sgs_frc_out[dst_idx];} } /* dst_idx */ val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ } /* !has_mss_val */ } /* !sgs_msk_nm */ } /* !sgs_frc_out */ if(nco_typ_ntg(var_typ_out)){ /* 20210407: Round, with rint(), integer fields before sending to netCDF for output Otherwise implicit type conversion will truncate (rather than round) output values This is critical for masks where rounding errors produce near integer values (e.g., 0.999...) that could then be truncated to zero by implicit conversion instead of rounded up to 1. */ if(has_mss_val){ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(var_val_dbl_out[dst_idx] != mss_val_cmp_dbl) var_val_dbl_out[dst_idx]=rint(var_val_dbl_out[dst_idx]); }else{ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) var_val_dbl_out[dst_idx]=rint(var_val_dbl_out[dst_idx]); } /* !has_mss_val */ } /* !nco_typ_ntg() */ if(flg_add_fll && !has_mss_val){ /* 20210604: Initialize fields without _FillValue in input file to default missing value in unmapped destination cells Otherwise empty destination cells will be zero (not _FillValue) in output file Fields with input _FillValue are already _FillValue in output where tally is zero */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ if(frc_out[dst_idx] == 0.0){ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ var_val_dbl_out[dst_idx+lvl_idx*grd_sz_out]=NC_FILL_DOUBLE; } /* !lvl_idx */ } /* !frc_out */ } /* !dst_idx */ } /* !flg_add_fll */ if(flg_msk_apl){ /* 20210607: Overwrite output values with _FillValue where destination cell is masked Same procedure regardless of whether input variables already have _FillValue NB: This is separate, and presumably independent, from above flg_add_fll loop Fields with flg_msk_apl will (harmlessly?) go through both loops */ double mss_val_msk; /* [frc] Missing value to apply where mask is false */ //if(has_mss_val) mss_val_msk=mss_val_dbl; else mss_val_msk=NC_FILL_DOUBLE; mss_val_msk=mss_val_cmp_dbl; /* [frc] Missing value to apply where mask is false */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ if(msk_out[dst_idx] == 0){ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ var_val_dbl_out[dst_idx+lvl_idx*grd_sz_out]=mss_val_msk; } /* !lvl_idx */ } /* !frc_out */ } /* !dst_idx */ } /* !flg_add_fll */ if(nco_dbg_lvl_get() >= nco_dbg_var){ tm_end=clock(); tm_drn=(float)(tm_end-tm_srt)/CLOCKS_PER_SEC; (void)fprintf(fp_stdout,"%s: INFO Compute time for %s (thread %d/%d): %g s\n",nco_prg_nm_get(),trv.nm,thr_idx,omp_get_num_threads(),tm_drn); } /* !dbg */ /* begin OpenMP critical */ // rcd=nco_put_var(out_id,var_id_out,var_val_dbl_out,var_typ_rgr); rcd=nco_put_vara(out_id,var_id_out,dmn_srt,dmn_cnt_out,var_val_dbl_out,var_typ_rgr); /* end OpenMP critical */ if(dmn_id_in) dmn_id_out=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); if(tally) tally=(int *)nco_free(tally); if(var_val_dbl_out) var_val_dbl_out=(double *)nco_free(var_val_dbl_out); if(var_val_dbl_in) var_val_dbl_in=(double *)nco_free(var_val_dbl_in); if(wgt_vld_out) wgt_vld_out=(double *)nco_free(wgt_vld_out); }else{ /* !trv.flg_rgr */ /* Use standard NCO copy routine for variables that are not regridded */ /* begin OpenMP critical */ (void)nco_cpy_var_val(in_id,out_id,(FILE *)NULL,(md5_sct *)NULL,trv.nm,trv_tbl); /* end OpenMP critical */ } /* !flg_rgr */ } /* !xtr */ } /* end (OpenMP parallel for) loop over idx_tbl */ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"\n"); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s completion report: Variables regridded = %d (%d extensive), copied unmodified = %d, omitted = %d, created = %d\n",nco_prg_nm_get(),fnc_nm,var_rgr_nbr,var_xtn_nbr,var_cpy_nbr,var_xcl_nbr,var_crt_nbr); /* Free memory allocated for grid reading/writing */ if(area_out) area_out=(double *)nco_free(area_out); if(col_src_adr) col_src_adr=(int *)nco_free(col_src_adr); if(dmn_sz_in_int) dmn_sz_in_int=(int *)nco_free(dmn_sz_in_int); if(dmn_sz_out_int) dmn_sz_out_int=(int *)nco_free(dmn_sz_out_int); if(frc_out) frc_out=(double *)nco_free(frc_out); if(lat_bnd_out) lat_bnd_out=(double *)nco_free(lat_bnd_out); if(lat_crn_out) lat_crn_out=(double *)nco_free(lat_crn_out); if(lat_ctr_out) lat_ctr_out=(double *)nco_free(lat_ctr_out); if(lat_ntf_out) lat_ntf_out=(double *)nco_free(lat_ntf_out); if(lat_wgt_out) lat_wgt_out=(double *)nco_free(lat_wgt_out); if(lon_bnd_out) lon_bnd_out=(double *)nco_free(lon_bnd_out); if(lon_crn_out) lon_crn_out=(double *)nco_free(lon_crn_out); if(lon_ctr_out) lon_ctr_out=(double *)nco_free(lon_ctr_out); if(lon_ntf_out) lon_ntf_out=(double *)nco_free(lon_ntf_out); if(msk_out) msk_out=(int *)nco_free(msk_out); if(row_dst_adr) row_dst_adr=(int *)nco_free(row_dst_adr); if(sgs_frc_nm) sgs_frc_nm=(char *)nco_free(sgs_frc_nm); if(sgs_frc_in) sgs_frc_in=(double *)nco_free(sgs_frc_in); if(sgs_frc_out) sgs_frc_out=(double *)nco_free(sgs_frc_out); if(sgs_msk_nm) sgs_msk_nm=(char *)nco_free(sgs_msk_nm); if(wgt_raw) wgt_raw=(double *)nco_free(wgt_raw); return rcd; } /* end nco_rgr_wgt() */ void nco_bsl_zro /* Return Bessel function zeros */ (const int bsl_zro_nbr, /* O [nbr] Order of Bessel function */ double * const bsl_zro) /* O [frc] Bessel zero */ { /* Purpose: Return Bessel function zeros Source: CCM code /fs/cgd/csm/models/atm/ccm3.5.8/src/ccmlsm_share/bsslzr.F Return bsl_zro_nbr zeros (or if bsl_zro_nbr > 50, approximate zeros), of the Bessel function j0 First 50 zeros are given exactly, and remaining zeros are computed by extrapolation, and therefore are not exact Original version: CCM1 Standardized: J. Rosinski, June 1992 Reviewed: J. Hack, D. Williamson, August 1992 Reviewed: J. Hack, D. Williamson, April 1996 Modified 19970123 by Jim Rosinski to use double precision arithmetic ~2000: Converted to Fortran9X by C. Zender, changed all real*16 statements to double precision (real*8) 20150530: Converted to C99 by C. Zender */ const char fnc_nm[]="nco_bsl_zro()"; /* [sng] Function name */ const double pi=M_PI; // [frc] 3 const double bsl_zro_tbl[]={ // Zeros of Bessel functions of order 1 to 50 -1.e36, 2.4048255577, 5.5200781103, 8.6537279129, 11.7915344391, 14.9309177086, 18.0710639679, 21.2116366299, 24.3524715308, 27.4934791320, 30.6346064684, 33.7758202136, 36.9170983537, 40.0584257646, 43.1997917132, 46.3411883717, 49.4826098974, 52.6240518411, 55.7655107550, 58.9069839261, 62.0484691902, 65.1899648002, 68.3314693299, 71.4729816036, 74.6145006437, 77.7560256304, 80.8975558711, 84.0390907769, 87.1806298436, 90.3221726372, 93.4637187819, 96.6052679510, 99.7468198587, 102.8883742542, 106.0299309165, 109.1714896498, 112.3130502805, 115.4546126537, 118.5961766309, 121.7377420880, 124.8793089132, 128.0208770059, 131.1624462752, 134.3040166383, 137.4455880203, 140.5871603528, 143.7287335737, 146.8703076258, 150.0118824570, 153.1534580192, 156.2950342685}; const int bsl_zro_tbl_nbr_max=50; /* [nbr] */ int bsl_idx; /* [idx] Counting index */ /* Main Code */ if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stdout,"%s: DEBUG Entering %s\n",nco_prg_nm_get(),fnc_nm); /* NB: Initialize bsl_zro[0] but (in C) never use it Initialization prevents uninitialized memory warnings */ for(bsl_idx=0;bsl_idx<=bsl_zro_nbr;bsl_idx++) if(bsl_idx <= bsl_zro_tbl_nbr_max) bsl_zro[bsl_idx]=bsl_zro_tbl[bsl_idx]; if(bsl_zro_nbr > bsl_zro_tbl_nbr_max) for(bsl_idx=bsl_zro_tbl_nbr_max+1;bsl_idx<=bsl_zro_nbr;bsl_idx++) bsl_zro[bsl_idx]=bsl_zro[bsl_idx-1]+pi; if(nco_dbg_lvl_get() == nco_dbg_old){ (void)fprintf(stdout,"%s: DEBUG %s reports bsl_zro_nbr = %d\n",nco_prg_nm_get(),fnc_nm,bsl_zro_nbr); (void)fprintf(stdout,"idx\tbsl_zro\n"); for(bsl_idx=1;bsl_idx<=bsl_zro_nbr;bsl_idx++) (void)fprintf(stdout,"%d\t%g\n",bsl_idx,bsl_zro[bsl_idx]); } /* endif dbg */ return; } /* end nco_bsl_zro() */ void nco_lat_wgt_gss /* [fnc] Compute and return sine of Gaussian latitudes and their weights */ (const int lat_nbr, /* I [nbr] Latitude number */ const nco_bool flg_s2n, /* I [enm] Latitude grid-direction is South-to-North */ double * const lat_sin, /* O [frc] Sine of latitudes */ double * const wgt_Gss) /* O [frc] Gaussian weights */ { /* Purpose: Compute and return sine of Gaussian latitudes and their weights Returned arrays are ordered south-to-north (S->N), not (N->S) Source: CCM /fs/cgd/csm/models/atm/ccm3.5.8/src/ccmlsm_share/gauaw.F Calculate sine of latitudes lat_sin(lat_nbr) and weights wgt_Gss(lat_nbr) for Gaussian quadrature Algorithm described in Davis and Rabinowitz, Journal of Research of the NBS, V 56, Jan 1956 Zeros of Bessel function j0, obtained from nco_bsl_zro(), are first guess for abscissae Original version: CCM1 Standardized: L. Bath, Jun 1992 L. Buja, Feb 1996 Reviewed: D. Williamson, J. Hack, Aug 1992 D. Williamson, J. Hack, Feb 1996 19970123 Modified by Jim Rosinski to use real*16 arithmetic in order to achieve (nearly) identical weights and latitudes on all machines. ~2000: Converted to Fortran9X by C. Zender, changed all real*16 statements to double precision (real*8) 20150530: Converted to C99 by C. Zender 20150725: Verified against tabulation at http://pomax.github.io/bezierinfo/legendre-gauss.html#n64 */ const char fnc_nm[]="nco_lat_wgt_gss()"; /* [sng] Function name */ const double eps_rlt=1.0e-16; // Convergence criterion (NB: Threshold was 1.0d-27 in real*16, 1.0e-15 fine for real*8, 1.0e-16 pushes double precision to the brink) const double pi=M_PI; // [frc] 3 const int itr_nbr_max=20; // [nbr] Maximum number of iterations double c_cff; // Constant combination coefficient double lat_idx_dbl; // Latitude index, double precision double lat_nnr_idx_dbl; // Inner latitude index, double precision double lat_nbr_dbl; // [nbr] Number of latitudes, double precision double pk=double_CEWI; // Polynomial double pkm1; // Polynomial double pkm2; // Polynomial double pkmrk; // Polynomial double sp; // Current iteration latitude increment double xz; // Abscissa estimate double cos_arg; // Intermediate parameter introduced while attempting to eliminate valgrind "uninitialised value" warnings int itr_cnt; // Iteration counter int lat_idx; // [idx] Counting index (latitude) int lat_sym_idx; // [idx] Counting index (symmetric latitude) int lat_nnr_idx; // [idx] Counting index (inner latitude loop) int lat_nbr_rcp2; // lat_nbr/2 (number of latitudes in hemisphere) double *lat_sin_p1; // Sine of Gaussian latitudes double precision double *wgt_Gss_p1; // Gaussian weights double precision /* Main Code */ if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stdout,"%s: DEBUG Entering %s\n",nco_prg_nm_get(),fnc_nm); /* Arrays with Fortran indexing (indicated by "plus one" = "_p1") keep numerical algorithm in C identical to Fortran */ lat_sin_p1=(double *)nco_malloc((lat_nbr+1)*sizeof(double)); // Sine of Gaussian latitudes double precision wgt_Gss_p1=(double *)nco_malloc((lat_nbr+1)*sizeof(double)); // Gaussian weights double precision /* Use Newton iteration to find abscissae */ c_cff=0.25*(1.0-4.0/(pi*pi)); lat_nbr_dbl=lat_nbr; lat_nbr_rcp2=lat_nbr/2; // NB: Integer arithmetic (void)nco_bsl_zro(lat_nbr_rcp2,lat_sin_p1); for(lat_idx=1;lat_idx<=lat_nbr_rcp2;lat_idx++){ // NB: Loop starts at 1 // 20150713: Introduce intermediate parameter cos_arg in attempt to eliminate valgrind "uninitialised value" warnings emitted by cos() (actually __cos_sse()) // Warnings occur with gcc-compiled code, not with clang-compiled code cos_arg=lat_sin_p1[lat_idx]/sqrt((lat_nbr_dbl+0.5)*(lat_nbr_dbl+0.5)+c_cff); xz=cos(cos_arg); /* First approximation to xz */ itr_cnt=0; /* goto label_73 */ label_73: pkm2=1.0; pkm1=xz; if(++itr_cnt > itr_nbr_max){ (void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %d\n",nco_prg_nm_get(),fnc_nm,fabs(sp),itr_nbr_max,lat_idx); nco_exit(EXIT_FAILURE); } /* endif */ /* Compute Legendre polynomial */ for(lat_nnr_idx=2;lat_nnr_idx<=lat_nbr;lat_nnr_idx++){ lat_nnr_idx_dbl=lat_nnr_idx; pk=((2.0*lat_nnr_idx_dbl-1.0)*xz*pkm1-(lat_nnr_idx_dbl-1.0)*pkm2)/lat_nnr_idx_dbl; pkm2=pkm1; pkm1=pk; } /* end inner loop over lat_nnr */ pkm1=pkm2; pkmrk=(lat_nbr_dbl*(pkm1-xz*pk))/(1.0-xz*xz); sp=pk/pkmrk; xz=xz-sp; /* NB: Easy to introduce bug here by not replacing Fortran abs() with C fabs() */ if(fabs(sp) > eps_rlt) goto label_73; lat_sin_p1[lat_idx]=xz; wgt_Gss_p1[lat_idx]=(2.0*(1.0-xz*xz))/((lat_nbr_dbl*pkm1)*(lat_nbr_dbl*pkm1)); } /* end outer loop over lat */ if(lat_nbr != lat_nbr_rcp2*2){ /* When lat_nbr is odd, compute weight at Equator */ lat_sin_p1[lat_nbr_rcp2+1]=0.0; pk=2.0/(lat_nbr_dbl*lat_nbr_dbl); for(lat_idx=2;lat_idx<=lat_nbr;lat_idx+=2){ lat_idx_dbl=lat_idx; pk=pk*lat_idx_dbl*lat_idx_dbl/((lat_idx_dbl-1.0)*(lat_idx_dbl-1.0)); } /* end loop over lat */ wgt_Gss_p1[lat_nbr_rcp2+1]=pk; } /* endif lat_nbr is odd */ /* Complete sets of abscissas and weights, using symmetry properties */ for(lat_idx=1;lat_idx<=lat_nbr_rcp2;lat_idx++){ lat_sym_idx=lat_nbr-lat_idx+1; lat_sin_p1[lat_sym_idx]=-lat_sin_p1[lat_idx]; wgt_Gss_p1[lat_sym_idx]=wgt_Gss_p1[lat_idx]; } /* end loop over lat */ /* Shift by one to remove Fortran offset in p1 arrays */ //memcpy(lat_sin,lat_sin_p1,lat_nbr*sizeof(double)); //memcpy(wgt_Gss,wgt_Gss_p1,lat_nbr*sizeof(double)); /* Reverse and shift arrays because original CCM code algorithm computes latitudes from north-to-south Shift by one to remove Fortran offset in p1 arrays */ if(flg_s2n){ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ lat_sin[lat_idx]=lat_sin_p1[lat_nbr-lat_idx]; wgt_Gss[lat_idx]=wgt_Gss_p1[lat_nbr-lat_idx]; } /* end loop over lat */ }else{ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ lat_sin[lat_idx]=lat_sin_p1[lat_idx+1]; wgt_Gss[lat_idx]=wgt_Gss_p1[lat_idx+1]; } /* end loop over lat */ } /* !flg_s2n */ if(nco_dbg_lvl_get() == nco_dbg_old){ (void)fprintf(stdout,"%s: DEBUG %s reports lat_nbr = %d\n",nco_prg_nm_get(),fnc_nm,lat_nbr); (void)fprintf(stdout,"idx\tasin\tngl_rad\tngl_dgr\tgw\n"); for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) (void)fprintf(stdout,"%d\t%g\t%g\t%g%g\n",lat_idx,lat_sin[lat_idx],asin(lat_sin[lat_idx]),180.0*asin(lat_sin[lat_idx])/pi,wgt_Gss[lat_idx]); } /* endif dbg */ if(wgt_Gss_p1) wgt_Gss_p1=(double *)nco_free(wgt_Gss_p1); if(lat_sin_p1) lat_sin_p1=(double *)nco_free(lat_sin_p1); return; } /* end nco_lat_wgt_gss() */ void nco_sph_plg_area /* [fnc] Compute area of spherical polygon */ (rgr_sct * const rgr, /* I [sct] Regridding structure */ const double * const lat_bnd, /* [dgr] Latitude boundaries of rectangular grid */ const double * const lon_bnd, /* [dgr] Longitude boundaries of rectangular grid */ const long col_nbr, /* [nbr] Number of columns in grid */ const int bnd_nbr, /* [nbr] Number of bounds in gridcell */ double * const area) /* [sr] Gridcell area */ { /* Purpose: Compute area of spherical polygon */ /* Computing triangular area accurately is hard in corner cases Spherical triangle suffer from at least as many issues as planar, which are described by "Miscalculating Area and Angles of a Needle-like Triangle" by W. Kahan, UC Berkeley In particular, the Law of Cosines and Heron's formula can be ill-conditioned For spherical triangles L'Huilier's Theorem is superior to Girard's Formula: http://mathworld.wolfram.com/LHuiliersTheorem.html Girard's formula depends on pi-minus-angle and angle is usually quite small in our applications so precision would be lost L'Huilier's theorem depends only on angles (a,b,c) and semi-perimeter (s) and is well-conditioned for small angles semi-perimeter = half-perimeter of triangle = 0.5*(a+b+c) Spherical Excess (SE) difference between the sum of the angles of a spherical triangle area and a planar triangle area with same interior angles (that sum to pi) SE is also the solid angle subtended by the spherical triangle and that's, well, astonishing and pretty cool Wikipedia shows a better SE formula for triangles that are ill-conditioned for L'Huilier's formula because a = b ~ 0.5c https://en.wikipedia.org/wiki/Spherical_trigonometry#Area_and_spherical_excess See also interesting discussion of L'Huilier by Charles Karney who suggests his own alternative: http://osgeo-org.1560.x6.nabble.com/Area-of-a-spherical-polygon-td3841625.html The discussion mentions Mil94 Robert D. Miller, Computing the area of a spherical polygon, Graphic Gems IV, chapter II.4, pages 132-137. http://books.google.com/books?id=CCqzMm_-WucC&pg=PA132&lpg=PA132&dq=miller+area+spherical+polygon+gems&source=bl&ots=mrnvZ6NJcm&sig=CMg8eaD8dzP5snMaPeCQzgoFWUk&hl=sv&ei=4G-YTKv5GsWZOI-mmZQP&sa=X&oi=book_result&ct=result&resnum=1&ved=0CBQQ6AEwAA#v=onepage&q&f=false Mil94 contains similar ideas to my method for spherical polygons (decomposing into adjacent multiple triangles from single vertex) However, his method places single vertex at pole, then adds signed areas to obtain full polygon area His method may suffer from degraded precision because of roundoff error and long side-lengths So-called "proper" spherical triangle are those for which all angles are less than pi, so a+b+c<3*pi Cartesian coordinates of (lat,lon)=(theta,phi) are (x,y,z)=(cos(theta)*cos(phi),cos(theta)*sin(phi),sin(theta)) Dot-product rule for vectors gives interior angle/arc length between two points: cos(a)=u dot v=cos(theta1)*cos(phi1)*cos(theta2)*cos(phi2)+cos(theta1)*sin(phi1)*cos(theta2)*sin(phi2)+sin(theta1)*sin(theta2) Spherical law of cosines relates interior angles/arc-lengths (a,b,c) to surface angles (A,B,C) in spherical triangle: https://en.wikipedia.org/wiki/Spherical_law_of_cosines cos(a)=cos(b)*cos(c)+sin(b)*sin(c)*cos(A) cos(b)=cos(c)*cos(a)+sin(c)*sin(a)*cos(B) cos(c)=cos(a)*cos(b)+sin(a)*sin(b)*cos(C) cos(A)=[cos(a)-cos(b)*cos(c)]/[sin(b)*sin(c)] cos(B)=[cos(b)-cos(c)*cos(a)]/[sin(c)*sin(a)] cos(C)=[cos(c)-cos(a)*cos(b)]/[sin(a)*sin(b)] Bounds information on unstructured grids will use bounds_nbr=maximum(vertice_nbr) Unused vertices are stored as either repeated points (ACME does this) or, conceiveably, as missing values Given (lat,lon) for N-points algorithm to find area of spherical polygon is: 1. Any decomposition, Girard areas: Loses precision due to mismatch between pi and small spherical excesses A. Find interior angles/arc-lengths (a,b,c,d...) using spherical law of cosines along each edge B. Apply generalized Girard formula SE_n = Sum(A_n) - (N-2) - pi 2. CSZ decomposition (N-2 triangles) with L'Huilier areas, Convert polygon into triangles by cycling spoke through all sides from common apex This method requires computation of N-2 (not N) triangles, though fewer sides due to optimization It works on all convex polygons (interior angles less than 180) but not, in general, concave polygons Whether it works or not on concave polygons depends upon their exact shape and the choice of apex point A. First three non-identical points form first triangle with sides A,B,C (first+second point define A, etc.) i. First vertice anchors all triangles ii. Third vertice of preceding triangle becomes second vertice of next triangle iii. Next non-identical point becomes last vertice of next triangle iv. Side C of previous triangle is side A of next triangle B. For each triangle, compute area with L'Huilier formula unless A = B ~ 0.5*C then use SAS formula 3. centroidal decomposition, N triangle version by Taylor, L'Huilier areas: Compute polygon centroid and treat this as hub from which spokes are drawn to all vertices This method requires computation of N triangles, though fewer sides due to optimization Moreover, it works on all convex polygons and on slightly concave polygons Centroid/hub has clear view of interior of most simple concave polygons 4. Any decomposition but with exact RLL grids by Zender and Agress 20160918 A. Decompose polygon into triangles via any method (e.g., method 2 or 3 above) B. Determine whether triangle is spherical or contains RLL (constant latitude) C. Spherical triangles use L'Huilier, RLL triangles use series expansion */ const char fnc_nm[]="nco_sph_plg_area()"; const double dgr2rdn=M_PI/180.0; int bnd_nbr_ttl; /* [nbr] Number of bounds in gridcell accounting for possibility of centroid information */ long idx; /* [idx] Counting index for unrolled grids */ short int bnd_idx; /* Shift to this method once we pass rgr into nco_sph_plg_area() */ nco_bool flg_mth_csz=False; /* [flg] Use CSZ's advancing polygon bisector method */ nco_bool flg_mth_ctr=False; /* [flg] Use centroid method to compute polygon area */ nco_edg_typ_enm edg_typ; /* [enm] Arc-type for triangle edges */ nco_ply_tri_mth_typ_enm ply_tri_mth; /* [enm] Polygon decomposition method */ if(rgr->edg_typ == nco_edg_nil) rgr->edg_typ=nco_edg_gtc; edg_typ=rgr->edg_typ; /* [enm] Arc-type for triangle edges */ ply_tri_mth=rgr->ply_tri_mth; /* [enm] Polygon decomposition method */ if(ply_tri_mth == nco_ply_tri_mth_csz) flg_mth_csz=True; if(ply_tri_mth == nco_ply_tri_mth_ctr) flg_mth_ctr=True; assert(flg_mth_ctr != flg_mth_csz); bnd_nbr_ttl=bnd_nbr; // Allocate space for one extra boundary to store centroid information if necessary if(flg_mth_ctr) bnd_nbr_ttl=bnd_nbr+1; double *lat_bnd_rdn=NULL_CEWI; /* [rdn] Latitude boundaries of rectangular destination grid */ double *lon_bnd_rdn=NULL_CEWI; /* [rdn] Longitude boundaries of rectangular destination grid */ double *lat_bnd_sin=NULL_CEWI; /* [frc] Sine of latitude boundaries of rectangular destination grid */ double *lon_bnd_sin=NULL_CEWI; /* [frc] Sine of longitude boundaries of rectangular destination grid */ double *lat_bnd_cos=NULL_CEWI; /* [frc] Cosine of latitude boundaries of rectangular destination grid */ double *lon_bnd_cos=NULL_CEWI; /* [frc] Cosine of longitude boundaries of rectangular destination grid */ /* Allocate one extra space for some arrays to store polygon centroid values for each column for ply_tri_mth=ctr */ lon_bnd_rdn=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double)); lat_bnd_rdn=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double)); lon_bnd_cos=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double)); lat_bnd_cos=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double)); lon_bnd_sin=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double)); lat_bnd_sin=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double)); memcpy(lat_bnd_rdn,lat_bnd,col_nbr*bnd_nbr*sizeof(double)); memcpy(lon_bnd_rdn,lon_bnd,col_nbr*bnd_nbr*sizeof(double)); for(idx=0;idx<col_nbr*bnd_nbr;idx++){ lon_bnd_rdn[idx]*=dgr2rdn; lat_bnd_rdn[idx]*=dgr2rdn; lon_bnd_cos[idx]=cos(lon_bnd_rdn[idx]); lat_bnd_cos[idx]=cos(lat_bnd_rdn[idx]); lon_bnd_sin[idx]=sin(lon_bnd_rdn[idx]); lat_bnd_sin[idx]=sin(lat_bnd_rdn[idx]); } /* !idx */ double area_smc_crc; /* [sr] Small-circle correction to spherical triangle area */ double area_smc; /* [sr] Gridcell area allowing for latitude-triangles */ double area_ttl; /* [sr] Total area of input polygon list assuming spherical triangles */ double area_smc_ttl; /* [sr] Total area of input polygon list allowing for latitude-triangles */ double area_smc_crc_ttl; /* [sr] Latitude-triangle correction (should be small) to total area of input polygon list */ double area_smc_crc_abs_ttl; /* [sr] Latitude-triangle absolute correction (no compensation of positive/negative contributions, should be no smaller than above) to total area of input polygon list */ double lat_ctr; /* [dgr] Latitude of polygon centroid */ double lon_ctr; /* [dgr] Longitude of polygon centroid */ double lat_ctr_rdn; /* [rdn] Latitude of polygon centroid */ double lon_ctr_rdn; /* [rdn] Longitude of polygon centroid */ double lat_ctr_cos; /* [frc] Cosine latitude of polygon centroid */ double lat_dlt; /* [rdn] Latitudinal difference */ double lon_dlt; /* [rdn] Longitudinal difference */ double ngl_a; /* [rdn] Interior angle/great circle arc a */ double ngl_b; /* [rdn] Interior angle/great circle arc b */ double ngl_c; /* [rdn] Interior angle/great circle arc c */ double ngl_ltr_a; /* [rdn] Interior angle/small circle arc a, canonical latitude-triangle geometry */ double ngl_ltr_b; /* [rdn] Interior angle/great circle arc b, canonical latitude-triangle geometry */ double ngl_ltr_c; /* [rdn] Interior angle/great circle arc c, canonical latitude-triangle geometry */ double prm_smi; /* [rdn] Semi-perimeter of triangle */ double sin_hlf_tht; /* [frc] Sine of half angle/great circle arc theta connecting two points */ double xcs_sph; /* [sr] Spherical excess */ int tri_nbr; /* [nbr] Number of triangles in polygon */ long bnd_vld_nbr=NC_MIN_INT; /* [idx] Number of valid (non-duplicative) vertices in each triangle */ long *a_idx; /* [idx] Point A 1-D indices for each triangle in polygon */ long *b_idx; /* [idx] Point B 1-D indices for each triangle in polygon */ long *c_idx; /* [idx] Point C 1-D indices for each triangle in polygon */ long *vrt_vld=NULL; /* [idx] Absolute 1-D indices of valid vertices */ long idx_a; /* [idx] Point A 1-D index */ long idx_b; /* [idx] Point B 1-D index */ long idx_c; /* [idx] Point C 1-D index */ nco_bool flg_sas_ndl=False; /* [flg] L'Huilier's formula will fail due to needle where one side exceeds semi-perimeter */ nco_bool flg_sas_isc=False; /* [flg] L'Huilier's formula is ill-conditioned due to flat, near-isoceles triangle */ nco_bool flg_sas_a=False; /* [flg] Use SAS triangle formula with central angle a */ nco_bool flg_sas_b=False; /* [flg] Use SAS triangle formula with central angle b */ nco_bool flg_sas_c=False; /* [flg] Use SAS triangle formula with central angle c */ nco_bool flg_ply_has_smc; /* [flg] Any triangle in polygon has small-circle edge */ nco_bool flg_tri_crr_smc; /* [flg] Current triangle has small_circle edge */ /* Initialize global accumulators */ area_ttl=0.0; area_smc_ttl=0.0; area_smc_crc_ttl=0.0; area_smc_crc_abs_ttl=0.0; for(long col_idx=0;col_idx<col_nbr;col_idx++){ /* Initialize local properties and accumulators for this cell/polygon */ flg_ply_has_smc=False; ngl_c=double_CEWI; /* Otherwise compiler unsure ngl_c is initialized first use */ area[col_idx]=0.0; area_smc=0.0; tri_nbr=0; if(col_idx == 0){ a_idx=(long *)nco_calloc(bnd_nbr,sizeof(long)); b_idx=(long *)nco_calloc(bnd_nbr,sizeof(long)); c_idx=(long *)nco_calloc(bnd_nbr,sizeof(long)); vrt_vld=(long *)nco_calloc(bnd_nbr,sizeof(long)); } /* !col_idx */ /* Safety re-initialization to ease debugging, not strictly necessary */ for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++){ vrt_vld[bnd_idx]=NC_MIN_INT; a_idx[bnd_idx]=NC_MIN_INT; b_idx[bnd_idx]=NC_MIN_INT; c_idx[bnd_idx]=NC_MIN_INT; } /* !bnd_idx */ if(flg_mth_ctr){ double lon_dff; /* [dgr] Longitude difference */ long bnd_srt_idx; /* [idx] Absolute starting index of vertices in polygon */ long bnd_idx; /* [idx] Offset of current valid vertex index from starting index */ long bnd_vld_idx; /* [idx] Absolute index of last valid vertex */ /* First vertice is always valid */ bnd_srt_idx=bnd_nbr*col_idx; bnd_vld_idx=bnd_srt_idx; vrt_vld[0]=bnd_vld_idx; lat_ctr=lat_bnd[bnd_srt_idx]; lon_ctr=lon_bnd[bnd_srt_idx]; bnd_vld_nbr=1; /* First guess for next valid index */ bnd_idx=1; /* bnd_idx labels offset from first vertex of next valid (i.e., non-duplicative) vertex */ while(bnd_idx<bnd_nbr){ /* Skip repeated points that must occur when polygon has fewer than allowed vertices */ while(lon_bnd[bnd_vld_idx] == lon_bnd[bnd_srt_idx+bnd_idx] && lat_bnd[bnd_srt_idx] == lat_bnd[bnd_srt_idx+bnd_idx]){ /* Next valid vertice must not duplicate first vertex */ bnd_idx++; /* Have we already found all valid vertices? */ if(bnd_idx == bnd_nbr) break; } /* !while */ /* Jump to normalization when all valid vertices found */ if(bnd_idx == bnd_nbr) break; /* Current vertex is valid (non-duplicative) */ bnd_vld_idx=bnd_srt_idx+bnd_idx; vrt_vld[bnd_vld_nbr]=bnd_vld_idx; bnd_vld_nbr++; if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG %s reports centroidal decomposition col_idx=%lu, bnd_nbr=%d, bnd_idx=%ld, bnd_vld_idx=%ld, bnd_vld_nbr=%ld\n",nco_prg_nm_get(),fnc_nm,col_idx,bnd_nbr,bnd_idx,bnd_vld_idx,bnd_vld_nbr); assert(bnd_vld_nbr <= bnd_nbr); lat_ctr+=lat_bnd[bnd_vld_idx]; lon_ctr+=lon_bnd[bnd_vld_idx]; lon_dff=lon_bnd[bnd_vld_idx]-lon_bnd[0]; if(lon_dff >= 180.0){ lon_ctr-=360.0; }else if(lon_dff <= -180.0){ lon_ctr+=360.0; } /* !lon_dff */ /* Search for next valid vertice in next iteration */ bnd_idx++; } /* !bnd_idx */ /* Compute centroid */ lat_ctr/=bnd_vld_nbr; lon_ctr/=bnd_vld_nbr; /* Centroid can become point A of bnd_nbr polygons or optimize algorithm: 1. Skip sub-dividing polygon into centroid-based triangles for bnd_vld_nbr == 3 2. Split quadrilaterals into two (non-centroid) triangles for bnd_vld_nbr == 4 3. Use full centroid-based triangle algorithm for bnd_vld_nbr >= 5 */ lat_ctr_rdn=lat_ctr*dgr2rdn; lon_ctr_rdn=lon_ctr*dgr2rdn; lat_ctr_cos=cos(lat_ctr_rdn); /* Place centroid values in extended arrays for easy access */ lat_bnd_rdn[(col_idx+1)*bnd_nbr_ttl-1L]=lat_ctr_rdn; lon_bnd_rdn[(col_idx+1)*bnd_nbr_ttl-1L]=lon_ctr_rdn; lat_bnd_cos[(col_idx+1)*bnd_nbr_ttl-1L]=lat_ctr_cos; /* Polygon centroid and valid vertices are now known */ assert(bnd_vld_nbr > 2); if(bnd_vld_nbr == 3){ /* Three vertices only means polygon is already decomposed into a triangle */ tri_nbr=1; a_idx[0]=vrt_vld[0]; b_idx[0]=vrt_vld[1]; c_idx[0]=vrt_vld[2]; }else if(bnd_vld_nbr == 4){ /* Bisect quadrilateral into two triangles rather than use centroid and have four triantles */ tri_nbr=2; a_idx[0]=vrt_vld[0]; b_idx[0]=vrt_vld[1]; c_idx[0]=vrt_vld[2]; a_idx[1]=vrt_vld[0]; /* NB: Order is important. This way side C of triangle[0] = side A of trangle[1] */ b_idx[1]=vrt_vld[2]; c_idx[1]=vrt_vld[3]; }else if(bnd_vld_nbr >= 5){ /* Centroid method has as many triangles as valid vertices */ tri_nbr=bnd_vld_nbr; for(int tri_idx=0;tri_idx<tri_nbr;tri_idx++){ a_idx[tri_idx]=(col_idx+1)*bnd_nbr_ttl-1L; /* A is always centroid, store values at end of arrays */ b_idx[tri_idx]=vrt_vld[tri_idx]; c_idx[tri_idx]=vrt_vld[(tri_idx+1)%tri_nbr]; } /* !tri_idx */ } /* !bnd_vld_nbr */ } /* !flg_mth_ctr */ if(flg_mth_csz){ /* A is always first vertice of all triangles */ idx_a=bnd_nbr*col_idx; /* Start search for B at next vertice */ bnd_idx=1; /* bnd_idx labels offset from point A of potential location of triangle points B and C We know that bnd_idx(A) == 0, bnd_idx(B) < bnd_nbr-1, bnd_idx(C) < bnd_nbr */ while(bnd_idx<bnd_nbr-1){ /* Only first triangle must search for B, subsequent triangles recycle previous C as current B */ if(tri_nbr == 0){ /* Skip repeated points that must occur when polygon has fewer than allowed vertices */ /* 20200115: Prior to today we never skipped polar points (same latitudes but different longitudes) That worked fine in practice for spherical triangles partly because triangles from CSZ decomposition (aka hub-and-spoke decomposition) are additive, even with multiple points on the same great circle, and partly due to luck (a starting vertex surrounded by points on the same geodesic would break it). Moreover, repeated polar points pose no issues for L'Huilier's (or Girard's) method which depends only on the interior angles and side lengths, not the longitudes of polar points. Small circles change that last part, and we must now eliminate repeated polar points. */ if(edg_typ == nco_edg_smc){ /* Skip repeated numerically identical points */ while(lon_bnd[idx_a] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_a] == lat_bnd[idx_a+bnd_idx]){ /* Next vertice may not duplicate A */ bnd_idx++; /* If there is no room for C then all triangles found */ if(bnd_idx == bnd_nbr-1) break; } /* !while */ /* Skip geometrically identical (i.e., repeated polar) points */ while((fabs(lat_bnd[idx_a]) == 90.0) && (fabs(lat_bnd[idx_a+bnd_idx]) == 90.0)){ bnd_idx++; if(bnd_idx == bnd_nbr-1) break; } /* !while */ }else if(edg_typ != nco_edg_smc){ /* Spherical polygongs can use simpler, pre-20200116 algorithm to eliminate repeated points */ while(lon_bnd[idx_a] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_a] == lat_bnd[idx_a+bnd_idx]){ /* Next vertice may not duplicate A */ bnd_idx++; /* If there is no room for C then all triangles found */ if(bnd_idx == bnd_nbr-1) break; } /* !while */ }else{ abort(); } /* !edg_typ */ /* Jump to next column when all triangles found */ if(bnd_idx == bnd_nbr-1) break; } /* !tri_nbr */ idx_b=idx_a+bnd_idx; /* Search for C at next vertice */ bnd_idx++; /* fxm */ while(lon_bnd[idx_b] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_b] == lat_bnd[idx_a+bnd_idx]){ /* Next vertice may not duplicate B */ bnd_idx++; /* If there is no room for C then all triangles found */ if(bnd_idx == bnd_nbr) break; } /* !while */ /* Jump to next column when all triangles found */ if(bnd_idx == bnd_nbr) break; idx_c=idx_a+bnd_idx; /* Valid triangle, vertices are known and labeled */ a_idx[tri_nbr]=idx_a; b_idx[tri_nbr]=idx_b; c_idx[tri_nbr]=idx_c; tri_nbr++; /* Begin search for next B at current C */ bnd_idx=idx_c-idx_a; } /* !bnd_idx */ } /* !flg_mth_csz */ /* Triangles are known for requested decomposition method Compute and accumulate their area Optimized algorithm recycles previous arc c as current arc a (after first triangle) */ for(int tri_idx=0;tri_idx<tri_nbr;tri_idx++){ idx_a=a_idx[tri_idx]; idx_b=b_idx[tri_idx]; idx_c=c_idx[tri_idx]; if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG %s reports triangle vertices: col_idx=%lu, tri_idx=%d, idx_a=%ld, idx_b=%ld, idx_c=%ld\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,idx_a,idx_b,idx_c); /* Compute interior angle/great circle arc a for first triangle; subsequent triangles recycle previous arc c */ if(tri_idx == 0){ /* 20150831: Test by computing ncol=0 area in conus chevrons grid, compare to MAT results ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/257x512_SCRIP.20150901.nc -m ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.20150901.nc ncremap -s ${DATA}/grids/257x512_SCRIP.20150901.nc -g ${DATA}/grids/conusx4v1np4_chevrons_scrip_c150815.nc -m ${DATA}/maps/map_fv257x512_to_conusx4v1np4_chevrons_bilin.20150901.nc ncks -O -D 5 -v FSNT --map ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.150418.nc ${DATA}/ne30/raw/famipc5_ne30_v0.3_00003.cam.h0.1979-01.nc ${DATA}/ne30/rgr/fv_FSNT.nc ncks -O -D 5 -v FSNT --rgr diagnose_area --map ${DATA}/maps/map_fv257x512_to_conusx4v1np4_chevrons_bilin.20150901.nc ${DATA}/ne30/rgr/fv_FSNT.nc ${DATA}/ne30/rgr/dogfood.nc ncks -O -D 1 --rgr infer#diagnose_area --rgr grid=${HOME}/grd.nc ${DATA}/ne30/rgr/dogfood.nc ~/foo.nc ncks -H -s %20.15e, -v area -d ncol,0 ${DATA}/ne30/rgr/dogfood.nc ncks -H -s %20.15e, -v grid_area -d grid_size,0 ${DATA}/grids/conusx4v1np4_chevrons_scrip_c150815.nc ncks -H -s %20.15e, -v grid_area -d grid_size,0 ${HOME}/grd.nc ncol=0 on conus chevrons file: 3.653857995295246e-05 raw GLL weight 3.653857995294305e-05 ESMF weight (area_b from map-file) 3.653857995294302e-05 matlab CSZ decomposition (N-2 triangles) computed at SNL by MAT 3.653857995294301e-05 matlab centroidal decomposition (N triangles) computed at SNL by MAT 3.653857995294258e-05 NCO CSZ _and_ centroidal decompositions (new haversine) 3.653857995289623e-05 NCO CSZ decomposition (old acos) 20191011: Tested this same polygon in ESMF and NCO weight-generator NCO maps begin with first destination gridcell, find next ESMF gridcell by searching for first col: ncks --trd -C -v col ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc | egrep "=1 " ncks -H --trd -s %20.15e -C -d n_b,0 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc 3.653857995294305e-05 ncks -H --trd -s '%20.15e, ' -C -d n_b,0 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc 3.653857995295246e-05 ESMF and NCO weight-generators produce nearly identical S results to double-precision: ncks -H --trd -s '%20.15e, ' -C -d n_s,0,1 -v S ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc 2.181999640069480e-03, 1.309571213636605e-02 ncks -H --trd -s %20.15e -C -d n_s,207436 -d n_s,209617 -v S ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc 2.181999640069454e-03, 1.309571213636510e-02 Compare first five polygon areas: ncks --trd -H -C -s '%20.15e, ' -d n_b,0,4 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc ncks --trd -H -C -s '%20.15e, ' -d n_b,0,4 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc 3.653857995294305e-05, 1.250459284052488e-04, 1.448204605591709e-04, 8.223598867312266e-05, 8.585831933875070e-05, # aave 3.653857995294258e-05, 1.250459284052470e-04, 1.448204605591675e-04, 8.223598867312247e-05, 8.585831933875186e-05, Compare total areas: ncwa -O -y ttl -v area.? ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc ~/foo_aave.nc ncwa -O -y ttl -v area.? ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc ~/foo_nco.nc ncks --trd -H -C -s '%20.15e, ' -v area.? ~/foo_aave.nc ncks --trd -H -C -s '%20.15e, ' -v area.? ~/foo_nco.nc aave: 1.256637061435867e+01, 1.256637061435973e+01 nco: 1.256637061435857e+01, 1.256637061435955e+01 4*pi: 1.25663706143591729538e+01 Does (tru_glb_ttl/NCO_glb_ttl)*NCO_lcl = ESMF_lcl ? (1.25663706143591729538/1.256637061435857)*3.653857995294258=3.6538579952944333 No, normalization alone does not explain differences between ESMF and NCO It does not appear that ESMF does a global normalization of areas/weights */ /* Computing great circle arcs over small arcs requires care since central angle is near 0 degrees Cosine small angles changes slowly for such angles, and leads to precision loss Use haversine formula instead of spherical law of cosines formula https://en.wikipedia.org/wiki/Great-circle_distance */ /* Interior angle/great circle arc a, spherical law of cosines formula (loses precision): cos_a=lat_bnd_cos[idx_a]*lon_bnd_cos[idx_a]*lat_bnd_cos[idx_b]*lon_bnd_cos[idx_b]+ lat_bnd_cos[idx_a]*lon_bnd_sin[idx_a]*lat_bnd_cos[idx_b]*lon_bnd_sin[idx_b]+ lat_bnd_sin[idx_a]*lat_bnd_sin[idx_b];ngl_a=acos(cos_a); */ /* Interior angle/great circle arc a, haversine formula: */ // 20160918: Use branch cut rules for longitude lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_a],lon_bnd_rdn[idx_b])); lat_dlt=fabs(lat_bnd_rdn[idx_a]-lat_bnd_rdn[idx_b]); sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_a]*lat_bnd_cos[idx_b]*pow(sin(0.5*lon_dlt),2)); ngl_a=2.0*asin(sin_hlf_tht); }else{ /* !tri_idx == 0 */ ngl_a=ngl_c; } /* !tri_idx == 0 */ /* Interior angle/great circle arc b */ lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_b],lon_bnd_rdn[idx_c])); lat_dlt=fabs(lat_bnd_rdn[idx_b]-lat_bnd_rdn[idx_c]); sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_b]*lat_bnd_cos[idx_c]*pow(sin(0.5*lon_dlt),2)); ngl_b=2.0*asin(sin_hlf_tht); /* Interior angle/great circle arc c */ lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_c],lon_bnd_rdn[idx_a])); lat_dlt=fabs(lat_bnd_rdn[idx_c]-lat_bnd_rdn[idx_a]); sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_c]*lat_bnd_cos[idx_a]*pow(sin(0.5*lon_dlt),2)); ngl_c=2.0*asin(sin_hlf_tht); /* Semi-perimeter */ prm_smi=0.5*(ngl_a+ngl_b+ngl_c); /* L'Huilier's formula results in NaN if any side exceeds semi-perimeter This can occur in needle-shaped triangles due to rounding errors in derived arc lengths a, b, c 20200203: Problematic needles occurs a few dozen times in ne120pg2 -> cmip6 maps Problematic isoceles triangles are much rarer than problematic needles Therefore look for needle-issues first, then, if none found, look for isoceles issues Wikipedia recommends treating ill-conditioned triangles by Side-Angle-Side (SAS) formula https://en.wikipedia.org/wiki/Spherical_trigonometry Diagnose needles beforehand and call SAS routines as above to avoid NaN in L'Huilier Label problematic needle triangles by shortest side, e.g., "flg_sas_a" means (b ~ c) and a ~ 0.0 */ flg_sas_ndl=flg_sas_isc=flg_sas_a=flg_sas_b=flg_sas_c=False; if(ngl_a > prm_smi){if(ngl_b > ngl_c) flg_sas_c=True; else flg_sas_b=True;} /* a exceeds semi-perimeter */ else if(ngl_b > prm_smi){if(ngl_c > ngl_a) flg_sas_a=True; else flg_sas_c=True;} /* b exceeds semi-perimeter */ else if(ngl_c > prm_smi){if(ngl_a > ngl_b) flg_sas_b=True; else flg_sas_a=True;} /* c exceeds semi-perimeter */ if(flg_sas_a || flg_sas_b || flg_sas_c) flg_sas_ndl=True; if(!flg_sas_ndl){ /* L'Huilier's formula becomes ill-conditioned when two sides are one half the third side This occurs for flat, isoceles-shaped triangles Label problematic isoceles triangles by longest side, e.g., "flg_sas_a" means (b ~ c) ~ 0.5*a */ /* Sensitivity tests on ~20191014 showed that triangular ill-conditioning treatment (i.e., switching to SAS method) does not improve (and may degrade) accuracy for eps_ill_cnd > 1.0e-15 */ const double eps_ill_cnd=1.0e-15; /* [frc] Ill-conditioned tolerance for interior angle/great circle arcs in triangle */ const double eps_ill_cnd_dbl=2.0*eps_ill_cnd; /* [frc] Ill-conditioned tolerance for interior angle/great circle arcs in triangle */ if((fabs(ngl_a-ngl_b) < eps_ill_cnd) && (fabs(ngl_a-0.5*ngl_c) < eps_ill_cnd_dbl)) flg_sas_c=True; /* c is twice a and b */ else if((fabs(ngl_b-ngl_c) < eps_ill_cnd) && (fabs(ngl_b-0.5*ngl_a) < eps_ill_cnd_dbl)) flg_sas_a=True; /* a is twice b and c */ else if((fabs(ngl_c-ngl_a) < eps_ill_cnd) && (fabs(ngl_c-0.5*ngl_b) < eps_ill_cnd_dbl)) flg_sas_b=True; /* b is twice c and a */ if(flg_sas_a || flg_sas_b || flg_sas_c) flg_sas_isc=True; } /* !flg_sas_ndl */ if(flg_sas_isc || flg_sas_ndl){ /* Compute area using SAS formula */ double cos_hlf_C; /* [frc] Cosine of half of canoncal surface angle C */ //double sin_hlf_C; /* [frc] Sine of half of canoncal surface angle C */ double ngl_sfc_ltr_C; /* [rdn] Canonical surface angle/great circle arc C */ double tan_hlf_a_tan_hlf_b; /* [frc] Product of tangents of one-half of nearly equal canoncal sides */ double xcs_sph_hlf_tan; /* [frc] Tangent of one-half the spherical excess */ /* Transform sides into canonical order for formula where C is surface angle between arcs a and b */ if(flg_sas_c){ ngl_ltr_a=ngl_a; ngl_ltr_b=ngl_b; ngl_ltr_c=ngl_c; } /* !flg_sas_c */ if(flg_sas_a){ ngl_ltr_a=ngl_b; ngl_ltr_b=ngl_c; ngl_ltr_c=ngl_a; } /* !flg_sas_a */ if(flg_sas_b){ ngl_ltr_a=ngl_c; ngl_ltr_b=ngl_a; ngl_ltr_c=ngl_b; } /* !flg_sas_b */ if(flg_sas_ndl && (nco_dbg_lvl_get() >= nco_dbg_scl)) (void)fprintf(stdout,"%s: INFO %s reports col_idx = %li triangle %d is needle-shaped triangle with a side that exceeds semi-perimeter = %0.16e. Eschew L'Huilier's formula for spherical excess to avoid NaN. Could use SAS formula with canonical central interior arc c = %0.16e.\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,prm_smi,ngl_ltr_c); if(flg_sas_isc && (nco_dbg_lvl_get() >= nco_dbg_scl)) (void)fprintf(stdout,"%s: INFO %s reports col_idx = %li triangle %d is nearly flat isoceles-shaped triangle. Canonical arcs a and b differ by %0.16e. Eschew L'Huilier's formula for spherical excess to avoid low precision. Could use SAS formula.\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,fabs(ngl_ltr_a-ngl_ltr_b)); /* Determine canonical surface angle C To find any angle given three spherical triangle sides, Wikipedia opines: "The cosine rule may be used to give the angles A, B, and C but, to avoid ambiguities, the half-angle formulae are preferred." Half-angle formulae include two applicable variants that yield the sine or cosine of half C Then C is determined as twice the asin() or acos() function, respectively For needle-shaped triangles, RHS sin formula is ~ sin^2(0)/sin(a)*sin(b) ~ 0.0 For needle-shaped triangles, RHS cos formula is ~ sin^2(s)/sin(a)*sin(b) ~ 0.5 For flat isoceles triangles, RHS sin formula is ~ sin^2(0)/sin(a)*sin(b) ~ 0.0 For flat isoceles triangles, RHS cos formula is ~ sin(s)*sin(0)/sin(a)*sin(b) ~ 0.0 Use sin formula since both needle- and isoceles-shaped triangles have RHS ~ 0.0 where arcsin() is most precise 20200203: Half-angle sine formula gives NaNs, and half-angle cosine formula works on ne120pg2->cmip. Why? Adopting cosine formula because it works */ //sin_hlf_C=sqrt(sin(prm_smi-ngl_ltr_a)*sin(prm_smi-ngl_ltr_b)/(sin(ngl_ltr_a)*sin(ngl_ltr_b))); // Half-angle sine formula cos_hlf_C=sqrt(sin(prm_smi)*sin(prm_smi-ngl_ltr_c)/(sin(ngl_ltr_a)*sin(ngl_ltr_b))); // Half-angle cosine formula //ngl_sfc_ltr_C=2.0*asin(sin_hlf_C); ngl_sfc_ltr_C=2.0*acos(cos_hlf_C); /* SAS formula */ tan_hlf_a_tan_hlf_b=tan(0.5*ngl_ltr_a)*tan(0.5*ngl_ltr_b); xcs_sph_hlf_tan=tan_hlf_a_tan_hlf_b*sin(ngl_sfc_ltr_C)/(1.0+tan_hlf_a_tan_hlf_b*cos(ngl_sfc_ltr_C)); assert(fabs(xcs_sph_hlf_tan) != M_PI_2); xcs_sph=2.0*atan(xcs_sph_hlf_tan); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO SAS area formula for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e). Spherical excess = %0.16e.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c,xcs_sph); // Single-line version // xcs_sph=2.0*atan(tan(0.5*ngl_ltr_a)*tan(0.5*ngl_ltr_b)*sin(2.0*acos(sqrt(sin(prm_smi)*sin(prm_smi-ngl_c)/(sin(ngl_a)*sin(ngl_b)))))/(1.0+tan_hlf_a_tan_hlf_b*cos(2.0*acos(sqrt(sin(prm_smi)*sin(prm_smi-ngl_c)/(sin(ngl_a)*sin(ngl_b))))))); /* Above procedure for problematic needle-shaped and isoceles-shaped triangles degrades statistics For ne30pg2, ne120pg2 -> cmip, setting area = 0.0 _greatly_ improves area statistics (Why?) Set spherical excess to zero for problematic needle-shaped and isoceles-shaped triangles */ /* fxm: Make zeroing skinny needles/isoceles-shaped triangle-areas a command-line option? */ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO Setting SAS area = 0.0\n",nco_prg_nm_get()); xcs_sph=0.0; /* !flg_sas */ }else{ double xcs_sph_qtr_tan; /* [frc] Tangent of one-quarter the spherical excess */ xcs_sph_qtr_tan=sqrt(tan(0.5*prm_smi)*tan(0.5*(prm_smi-ngl_a))*tan(0.5*(prm_smi-ngl_b))*tan(0.5*(prm_smi-ngl_c))); assert(fabs(xcs_sph_qtr_tan) != M_PI_2); xcs_sph=4.0*atan(xcs_sph_qtr_tan); /* 20191014: Aggregate all previous area-related commands into one, gigantic, unreadable, possibly more precise command (tested and it is more obfuscated but not more precise) */ // xcs_sph=4.0*atan(sqrt(tan(0.5*0.5*(ngl_a+ngl_b+ngl_c))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_a))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_b))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_c)))); } /* !flg_sas */ if(isnan(xcs_sph)){ const double eps_ngl_skn=1.0e-13; /* [frc] Angles skinnier than this form needles whose area ~ 0.0 */ /* Categorize reason for NaN */ (void)fprintf(stdout,"%s: WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING\nUnxpected NaN polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e).\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); if( /* Side exceeds semi-perimeter */ (ngl_a > prm_smi) || (ngl_b > prm_smi) || (ngl_c > prm_smi) ){ (void)fprintf(stdout,"%s: WARNING Triangle side exceeds semi-perimeter = %0.16e polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),prm_smi,col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); }else if( /* Are angles too skinny? Quite often on ne30pg2, ne120pg2 */ (ngl_a < eps_ngl_skn) || (ngl_b < eps_ngl_skn) || (ngl_c < eps_ngl_skn) ){ (void)fprintf(stdout,"%s: WARNING Triangle has at least one skinny angles < %g [rdn] for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16f, %0.16f, %0.16f). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),eps_ngl_skn,col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); }else if( /* Are two vertices identical to double-precision? Never on ne30pg2, ne120pg2 */ ((lat_bnd[idx_a] == lat_bnd[idx_b]) && (lon_bnd[idx_a] == lon_bnd[idx_b])) || ((lat_bnd[idx_b] == lat_bnd[idx_c]) && (lon_bnd[idx_b] == lon_bnd[idx_c])) || ((lat_bnd[idx_c] == lat_bnd[idx_a]) && (lon_bnd[idx_c] == lon_bnd[idx_a])) ){ (void)fprintf(stdout,"%s: WARNING Triangle has repeated points for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c]); }else{ (void)fprintf(stdout,"%s: WARNING Triangle area formula yields NaN for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16f, %0.16f, %0.16f). Are points co-linear? Assigned triangle area = 0.0.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); } /* !co-linear */ xcs_sph=0.0; } /* !NaN */ area[col_idx]+=xcs_sph; /* Accumulate spherical triangle area into reported polygon area and adjust below */ area_smc+=xcs_sph; /* Accumulate spherical triangle area into small-circle polygon area and adjust below */ area_ttl+=xcs_sph; /* Accumulate spherical triangle area into spherical polygon area */ area_smc_ttl+=xcs_sph; /* Accumulate spherical triangle area into total polygon area and adjust below */ /* 20160918 from here to end of loop is non-spherical work 20170217: Temporarily turn-off latitude circle diagnostics because Sungduk's POP case breaks them Canonical latitude-triangle geometry has point A at apex and points B and C at same latitude ncremap --dbg=1 --alg_typ=nco --grd_src=${DATA}/grids/ne30np4_pentagons.091226.nc --grd_dst=${DATA}/grids/cmip6_180x360_scrip.20181001.nc --map=${DATA}/maps/map_ne30np4_to_cmip6_180x360_nco.20190601.nc ncremap --dbg=1 -R 'edg_typ=smc' --alg_typ=nco --grd_src=${DATA}/grids/ne30np4_pentagons.091226.nc --grd_dst=${DATA}/grids/cmip6_180x360_scrip.20181001.nc --map=${DATA}/maps/map_ne30np4_to_cmip6_180x360_smc.20190601.nc */ flg_tri_crr_smc=False; if(lat_bnd_rdn[idx_a] == lat_bnd_rdn[idx_b] || lat_bnd_rdn[idx_b] == lat_bnd_rdn[idx_c] || lat_bnd_rdn[idx_c] == lat_bnd_rdn[idx_a]){ /* Set flag only if triangle is not degenerate. Degenerate triangles (3 points on a geodesic) have zero area */ if(xcs_sph != 0.0) flg_ply_has_smc=flg_tri_crr_smc=True; if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG Found small circle triangle with vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c]); } /* endif */ if((edg_typ == nco_edg_smc) && flg_tri_crr_smc){ double ngl_plr; /* [rdn] Polar angle (co-latitude) */ long idx_ltr_a; /* [idx] Point A (apex) of canonical latitude-triangle geometry, 1-D index */ long idx_ltr_b; /* [idx] Point B (base) of canonical latitude-triangle geometry, 1-D index */ long idx_ltr_c; /* [idx] Point C (base) of canonical latitude-triangle geometry, 1-D index */ /* Rotate labels to standard position with vertex A, equi-latitude points B and C */ if(lat_bnd_rdn[idx_a] == lat_bnd_rdn[idx_b]){ idx_ltr_a=idx_c; idx_ltr_b=idx_a; idx_ltr_c=idx_b; ngl_ltr_a=ngl_c; ngl_ltr_b=ngl_a; ngl_ltr_c=ngl_b; ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_a]); }else if(lat_bnd_rdn[idx_b] == lat_bnd_rdn[idx_c]){ idx_ltr_a=idx_a; idx_ltr_b=idx_b; idx_ltr_c=idx_c; ngl_ltr_a=ngl_a; ngl_ltr_b=ngl_b; ngl_ltr_c=ngl_c; ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_b]); }else if(lat_bnd_rdn[idx_c] == lat_bnd_rdn[idx_a]){ idx_ltr_a=idx_b; idx_ltr_b=idx_c; idx_ltr_c=idx_a; ngl_ltr_a=ngl_b; ngl_ltr_b=ngl_c; ngl_ltr_c=ngl_a; ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_c]); }else{ (void)fprintf(stdout,"%s: ERROR latitudes not equal in small circle section. Vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c]); abort(); } /* endif */ /* 20160918: Compute exact area of latitude triangle wedge */ double xpn_x; /* [frc] Expansion parameter */ lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_ltr_b],lon_bnd_rdn[idx_ltr_c])); assert(lon_dlt != 0.0); // Latitude triangles must have bases with distinct longitudes if(lon_dlt != M_PI){ /* Normal clause executed for small-circle triangles */ /* Numeric conditioning uncertain. Approaches divide-by-zero when lon_dlt << 1 */ xpn_x=lat_bnd_sin[idx_ltr_b]*(1.0-cos(lon_dlt))/sin(lon_dlt); assert(fabs(xpn_x) != M_PI_2); area_smc_crc=2.0*atan(xpn_x); /* 20170217: Sungduk's POP regrid triggers following abort(): ncremap -D 1 -i ~/pop_g16.nc -d ~/cam_f19.nc -o ~/foo.nc */ //assert(xpn_x >= 0.0); //if(lat_bnd[idx_ltr_b] > 0.0) area_smc_crc+=-lon_dlt*lat_bnd_sin[idx_ltr_b]; else area_smc_crc+=+lon_dlt*lat_bnd_sin[idx_ltr_b]; area_smc_crc+=-lon_dlt*lat_bnd_sin[idx_ltr_b]; }else{ /* 20200228: Latitude triangles may have bases with longitudes that differ by 180 degrees Consider a quadrilateral with four equidistant vertices in longitude, and that caps a pole: CSZ decomposition technique divides this into two triangles each with three co-latitudinal points and no vertex at pole Solution candidates: 1. Divide such quadrilaterals using centroid technique Just realized current implementation of centroid decomposition fails on polar caps Failure occurs because centroid latitude is +/- ~90 not mean of vertices' latitudes Must impute "pseudo-centroid" with latitude +/- 90 instead of averaging vertex latitudes Requires testing each polygon to determine if it contains pole <- Too difficult/expensive 2. Assume latitude triangles whose base is 180 degrees are at pole Compute area exactly using analytic formula for annular lune */ (void)fprintf(stdout,"%s: INFO longitudes differ by pi in small circle section. Vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_ltr_a],lon_bnd[idx_ltr_a],lat_bnd[idx_ltr_b],lon_bnd[idx_ltr_b],lat_bnd[idx_ltr_c],lon_bnd[idx_ltr_c]); (void)fprintf(stdout,"%s: DEBUG col_nbr=%lu, bnd_nbr=%d, col_idx=%ld, area=%g. Vertices [0..bnd_nbr-1] in format idx (lat,lon)\n",nco_prg_nm_get(),col_nbr,bnd_nbr,col_idx,xcs_sph); for(int bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%2d (%g, %g)\n",bnd_idx,lat_bnd[bnd_nbr*col_idx+bnd_idx],lon_bnd[bnd_nbr*col_idx+bnd_idx]); (void)fprintf(stdout,"%s: INFO Assuming this triangle is decomposed from polar cap polygon. Treating area with analytic formula for annular lune\n",nco_prg_nm_get()); /* Compute small circle correction as difference between spherical triangle area and standard annuular lune formula Small circle correction is positive-definite for polar triangles so use fabs(sin(lat_bnd_sin)) */ area_smc_crc=lon_dlt*fabs(lat_bnd_sin[idx_ltr_b])-area_smc; } /* !lon_dlt */ // Adjust diagnostic areas by small-circle area correction area_smc+=area_smc_crc; area_smc_ttl+=area_smc_crc; area_smc_crc_ttl+=area_smc_crc; area_smc_crc_abs_ttl+=fabs(area_smc_crc); // 20200109: Adjust area reported to calling code by small-circle area correction area[col_idx]+=area_smc_crc; if(0){ /* 20160918: Approximate area of latitude triangle wedge. Use truncated power expansion of exact formula. */ double xpn_x_sqr; /* [frc] Expansion parameter squared */ double xpn_sum; /* [frc] Expansion sum */ double xpn_nmr; /* [frc] Expansion term numerator */ double xpn_trm; /* [frc] Expansion term */ double xpn_dnm; /* [frc] Expansion term denominator */ const unsigned short int rdr_xpn=3; /* [nbr] Order of N in trigonometric series expansion */ unsigned short int idx_xpn; /* [idx] Index in series expansion */ xpn_x=cos(ngl_plr)*(1.0-cos(lon_dlt))/sin(lon_dlt); xpn_x_sqr=xpn_x*xpn_x; xpn_nmr=xpn_x; xpn_dnm=1.0; xpn_trm=xpn_nmr/xpn_dnm; xpn_sum+=xpn_trm; for(idx_xpn=3;idx_xpn<=rdr_xpn;idx_xpn+=2){ xpn_nmr*=xpn_x_sqr; xpn_dnm*=(idx_xpn-1)*idx_xpn; xpn_trm=xpn_nmr/xpn_dnm; xpn_sum+=xpn_trm; } /* !idx_xpn */ (void)fprintf(stdout,"%s: Small-circle area using series approximation...not implemented yet\n",nco_prg_nm_get()); } /* !0 */ if(nco_dbg_lvl_get() >= nco_dbg_scl){ (void)fprintf(stdout,"%s: INFO %s col_idx = %li triangle %d spherical area, latitude-triangle area, %% difference: %g, %g, %g%%\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,xcs_sph,xcs_sph+area_smc_crc,100.0*area_smc_crc/xcs_sph); if(fabs(area_smc_crc/xcs_sph) > 0.1){ (void)fprintf(stdout,"%s: DEBUG Non-spherical correction exceeds 10%% for current triangle with vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_ltr_a],lon_bnd[idx_ltr_a],lat_bnd[idx_ltr_b],lon_bnd[idx_ltr_b],lat_bnd[idx_ltr_c],lon_bnd[idx_ltr_c]); } /* !fabs */ } /* !dbg */ } /* !edg_typ && flg_tri_crr_smc */ } /* !tri_idx */ if(edg_typ == nco_edg_smc && flg_ply_has_smc){ /* Current gridcell contained at least one latitude-triangle */ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO %s col_idx = %li spherical area, small circle area, %% difference: %g, %g, %g%%\n",nco_prg_nm_get(),fnc_nm,col_idx,area[col_idx],area_smc,100.0*(area_smc-area[col_idx])/area[col_idx]); } /* !edg_typ && !flg_ply_has_smc */ } /* !col_idx */ if(edg_typ == nco_edg_smc && nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO %s total spherical area, small circle area, %% difference, crc_ttl, crc_abs_ttl: %g, %g, %g%%, %g, %g\n",nco_prg_nm_get(),fnc_nm,area_ttl,area_smc_ttl,100.0*(area_smc_ttl-area_ttl)/area_ttl,area_smc_crc_ttl,area_smc_crc_abs_ttl); if(vrt_vld) vrt_vld=(long *)nco_free(vrt_vld); if(a_idx) a_idx=(long *)nco_free(a_idx); if(b_idx) b_idx=(long *)nco_free(b_idx); if(c_idx) c_idx=(long *)nco_free(c_idx); if(lat_bnd_rdn) lat_bnd_rdn=(double *)nco_free(lat_bnd_rdn); if(lon_bnd_rdn) lon_bnd_rdn=(double *)nco_free(lon_bnd_rdn); if(lat_bnd_cos) lat_bnd_cos=(double *)nco_free(lat_bnd_cos); if(lon_bnd_cos) lon_bnd_cos=(double *)nco_free(lon_bnd_cos); if(lat_bnd_sin) lat_bnd_sin=(double *)nco_free(lat_bnd_sin); if(lon_bnd_sin) lon_bnd_sin=(double *)nco_free(lon_bnd_sin); } /* !nco_sph_plg_area() */ int /* O [enm] Return code */ nco_rgr_tps /* [fnc] Regrid using TempestRemap library */ (rgr_sct * const rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Regrid fields using TempestRemap "library" (more precisely, executables) Routine was originally written to call Tempest executables However, that functionality was all placed into the ncremap shell script Thus this C-interface is currently unused TempestRemap2 has a library that may be accessed on-line Test Tempest library: no way to activate yet export DATA_TEMPEST='/data/zender/rgr';ncks -O --rgr=Y ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc */ const char fnc_nm[]="nco_rgr_tps()"; const int fmt_chr_nbr=6; const char *cmd_rgr_fmt; char *cmd_rgr; char fl_grd_dst[]="/tmp/foo_outRLLMesh.g"; char *fl_grd_dst_cdl; int rcd_sys; int lat_nbr_rqs=180; int lon_nbr_rqs=360; nco_rgr_tps_cmd nco_tps_cmd; /* [enm] TempestRemap command enum */ char *nvr_DATA_TEMPEST; /* [sng] Directory where Tempest grids, meshes, and weights are stored */ nvr_DATA_TEMPEST=getenv("DATA_TEMPEST"); rgr->drc_tps= (nvr_DATA_TEMPEST && strlen(nvr_DATA_TEMPEST) > 0L) ? (char *)strdup(nvr_DATA_TEMPEST) : (char *)strdup("/tmp"); if(nco_dbg_lvl_get() >= nco_dbg_crr){ (void)fprintf(stderr,"%s: INFO %s reports\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stderr,"drc_tps = %s, ",rgr->drc_tps ? rgr->drc_tps : "NULL"); (void)fprintf(stderr,"\n"); } /* endif dbg */ /* Allow for whitespace characters in fl_grd_dst Assume CDL translation results in acceptable name for shell commands */ fl_grd_dst_cdl=nm2sng_fl(fl_grd_dst); /* Construct and execute regridding command */ nco_tps_cmd=nco_rgr_GenerateRLLMesh; cmd_rgr_fmt=nco_tps_cmd_fmt_sng(nco_tps_cmd); cmd_rgr=(char *)nco_malloc((strlen(cmd_rgr_fmt)+strlen(fl_grd_dst_cdl)-fmt_chr_nbr+1UL)*sizeof(char)); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stderr,"%s: %s reports generating %d by %d RLL mesh in %s...\n",nco_prg_nm_get(),fnc_nm,lat_nbr_rqs,lon_nbr_rqs,fl_grd_dst); (void)sprintf(cmd_rgr,cmd_rgr_fmt,lat_nbr_rqs,lon_nbr_rqs,fl_grd_dst_cdl); rcd_sys=system(cmd_rgr); if(rcd_sys == -1){ (void)fprintf(stdout,"%s: ERROR %s unable to complete TempestRemap regridding command \"%s\"\n",nco_prg_nm_get(),fnc_nm,cmd_rgr); nco_exit(EXIT_FAILURE); } /* end if */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"done\n"); /* Clean-up memory */ if(fl_grd_dst_cdl) fl_grd_dst_cdl=(char *)nco_free(fl_grd_dst_cdl); if(cmd_rgr) cmd_rgr=(char *)nco_free(cmd_rgr); return NCO_NOERR; } /* end nco_rgr_tps() */ const char * /* O [sng] String describing two-dimensional grid-type */ nco_grd_2D_sng /* [fnc] Convert two-dimensional grid-type enum to string */ (const nco_grd_2D_typ_enm nco_grd_2D_typ) /* I [enm] Two-dimensional grid-type enum */ { /* Purpose: Convert two-dimensional grid-type enum to string */ switch(nco_grd_2D_typ){ case nco_grd_2D_unk: return "Unknown, unclassified, or unrepresentable 2D grid type (e.g., unstructured, curvilinear, POP displaced-pole)"; case nco_grd_2D_gss: return "Gaussian latitude grid. Used by spectral transform models, e.g., CCM 1-3, CAM 1-3, ECMWF Forecast, LSM, MATCH, NCEP (R1, R2), UCICTM."; case nco_grd_2D_fv: return "Cap-latitude grid, aka FV-scalar grid (in Lin-Rood representation). When global (not regional) in extent and with odd number of latitudes, poles are considered at (and labeled as) centers of first and last gridcells. For example lat_ctr=-90,-89,-88,... and lat_crn=-89.5,-88.5,-87.5,... Thus pole-gridcells span half the equi-angular latitude increment of the rest of the grid. Used by CAM FV (i.e., CAM 4-6), ECMWF (ERA-I, ERA40, ERA5), GEOS-CHEM, UCICTM, UKMO."; case nco_grd_2D_eqa: return "Uniform/Equi-Angular latitude grid. Uniform/Equi-angle (everywhere) latitude grid. When global (not regional) in extent and with even number of latitudes, poles are at corners/edges of first and last gridcells. For example lat_ctr=-89.5,-88.5,-87.5,... and lat_crn=-90,-89,-88,.... When global, forms valid FV-staggered (aka FV-velocity, aka offset) grid (for Lin-Rood representation). Used by CIESIN/SEDAC, IGBP-DIS, NASA CMG, TOMS AAI, WOCE."; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_2D_sng() */ const char * /* O [sng] String describing latitude grid-type */ nco_grd_lat_sng /* [fnc] Convert latitude grid-type enum to string */ (const nco_grd_lat_typ_enm nco_grd_lat_typ) /* I [enm] Latitude grid-type enum */ { /* Purpose: Convert latitude grid-type enum to string */ switch(nco_grd_lat_typ){ case nco_grd_lat_unk: return "Unknown, unclassified, or unrepresentable latitude grid type (e.g., unstructured, curvilinear, POP3)"; case nco_grd_lat_gss: return "Gaussian latitude grid used by global spectral models: CCM 1-3, CAM 1-3, ECMWF Forecast, LSM, MATCH, NCEP (R1, R2), UCICTM."; case nco_grd_lat_fv: return "Cap-latitude grid, aka FV-scalar grid (in Lin-Rood representation). When global (not regional) in extent and with odd number of latitudes, poles are considered at (and labeled as) centers of first and last gridcells. For example lat_ctr=-90,-89,-88,... and lat_crn=-89.5,-88.5,-87.5,... Thus pole-gridcells span half the equi-angular latitude increment of the rest of the grid. Used by CAM FV (i.e., CAM 4-6), ECMWF (ERA-I, ERA40, ERA5), GEOS-CHEM, UCICTM, UKMO."; case nco_grd_lat_eqa: return "Uniform/Equi-Angular latitude grid. Uniform/Equi-angle (everywhere) latitude grid. When global (not regional) in extent and with even number of latitudes, poles are at corners/edges of first and last gridcells. For example lat_ctr=-89.5,-88.5,-87.5,... and lat_crn=-90,-89,-88,.... When global, forms valid FV-staggered (aka FV-velocity, aka offset) grid (for Lin-Rood representation). Used by CIESIN/SEDAC, IGBP-DIS, NASA CMG, TOMS AAI, WOCE."; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_lat_sng() */ const char * /* O [sng] String describing longitude grid-type */ nco_grd_lon_sng /* [fnc] Convert longitude grid-type enum to string */ (const nco_grd_lon_typ_enm nco_grd_lon_typ) /* I [enm] Longitude grid-type enum */ { /* Purpose: Convert longitude grid-type enum to string */ switch(nco_grd_lon_typ){ case nco_grd_lon_unk: return "Unknown, unclassified, or unrepresentable longitude grid type (e.g., unstructured, curvilinear)"; case nco_grd_lon_180_wst: return "Date line at west edge of first longitude cell"; case nco_grd_lon_180_ctr: return "Date line at center of first longitude cell"; case nco_grd_lon_Grn_wst: return "Greenwich at west edge of first longitude cell"; case nco_grd_lon_Grn_ctr: return "Greenwich at center of first longitude cell"; case nco_grd_lon_bb: return "Longitude grid determined by bounding box (lon_wst/lon_est) and gridcell number (lon_nbr)"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_lon_sng() */ const char * /* O [sng] String describing grid extent */ nco_grd_xtn_sng /* [fnc] Convert two-dimensional grid-extent enum to string */ (const nco_grd_xtn_enm nco_grd_xtn) /* I [enm] Grid-extent enum */ { /* Purpose: Convert grid-extent enum to string */ switch(nco_grd_xtn){ case nco_grd_xtn_nil: return "Unknown"; case nco_grd_xtn_glb: return "Global"; case nco_grd_xtn_rgn: return "Regional"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_xtn_sng() */ const char * /* O [sng] String describing grid conversion */ nco_rgr_grd_sng /* [fnc] Convert grid conversion enum to string */ (const nco_rgr_typ_enm nco_rgr_typ) /* I [enm] Grid conversion enum */ { /* Purpose: Convert grid conversion enum to string */ switch(nco_rgr_typ){ case nco_rgr_grd_1D_to_1D: return "1D_to_1D"; case nco_rgr_grd_1D_to_2D: return "1D_to_2D"; case nco_rgr_grd_2D_to_1D: return "2D_to_1D"; case nco_rgr_grd_2D_to_2D: return "2D_to_2D"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_grd_sng() */ const char * /* O [sng] String describing regridding method */ nco_rgr_mth_sng /* [fnc] Convert regridding method enum to string */ (const nco_rgr_mth_typ_enm nco_rgr_mth_typ) /* I [enm] Regridding method enum */ { /* Purpose: Convert regridding method enum to string */ switch(nco_rgr_mth_typ){ case nco_rgr_mth_conservative: return "Conservative remapping"; case nco_rgr_mth_bilinear: return "Bilinear remapping"; case nco_rgr_mth_none: return "none"; case nco_rgr_mth_unknown: return "Unknown (TempestRemap or ESMF_weight_only)"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_mth_sng() */ const char * /* O [sng] String describing mapfile generator */ nco_rgr_mpf_sng /* [fnc] Convert mapfile generator enum to string */ (const nco_rgr_mpf_typ_enm nco_rgr_mpf_typ) /* I [enm] Mapfile generator enum */ { /* Purpose: Convert mapfile generator enum to string */ switch(nco_rgr_mpf_typ){ case nco_rgr_mpf_ESMF: return "ESMF Offline Regridding Weight Generator (ERWG), either from ESMF_RegridWeightGen directly or via NCL"; case nco_rgr_mpf_SCRIP: return "SCRIP (original LANL package)"; case nco_rgr_mpf_Tempest: return "TempestRemap (GenerateOfflineMap)"; case nco_rgr_mpf_ESMF_weight_only: return "ESMF Offline Regridding Weight Generator (ERWG), either from ESMF_RegridWeightGen directly or via NCL, with --weight_only option from ERWG 7.1+"; case nco_rgr_mpf_NCO: return "netCDF Operators (NCO) Offline Regridding Weight Generator"; case nco_rgr_mpf_MBTR: return "MOAB-TempestRemap Online Regridding Weight Generator"; case nco_rgr_mpf_unknown: return "Unknown Weight Generator"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_mpf_sng() */ const char * /* O [sng] String describing regridding normalization */ nco_rgr_nrm_sng /* [fnc] Convert regridding normalization enum to string */ (const nco_rgr_nrm_typ_enm nco_rgr_nrm_typ) /* I [enm] Regridding normalization enum */ { /* Purpose: Convert regridding normalization enum to string */ switch(nco_rgr_nrm_typ){ case nco_rgr_nrm_fracarea: return "fracarea"; case nco_rgr_nrm_destarea: return "destarea"; case nco_rgr_nrm_none: return "none"; case nco_rgr_nrm_unknown: return "Unknown (possibilities include ESMF_weight_only, NCO, and TempestRemap)"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_nrm_sng() */ const char * /* O [sng] String containing regridding command and format */ nco_tps_cmd_fmt_sng /* [fnc] Convert TempestRemap command enum to command string */ (const nco_rgr_tps_cmd nco_tps_cmd) /* I [enm] TempestRemap command enum */ { /* Purpose: Convert TempestRemap command enum to command string and format */ switch(nco_tps_cmd){ case nco_rgr_ApplyOfflineMap: return "ApplyOfflineMap"; case nco_rgr_CalculateDiffNorms: return "CalculateDiffNorms"; case nco_rgr_GenerateCSMesh: return "GenerateCSMesh --res %d --file %s"; case nco_rgr_GenerateGLLMetaData: return "GenerateGLLMetaData"; case nco_rgr_GenerateICOMesh: return "GenerateICOMesh"; case nco_rgr_GenerateLambertConfConicMesh: return "GenerateLambertConfConicMesh"; case nco_rgr_GenerateOfflineMap: return "GenerateOfflineMap --in_mesh %s --out_mesh %s --ov_mesh %s --in_data %s --out_data %s"; case nco_rgr_GenerateOverlapMesh: return "GenerateOverlapMesh --a %s --b %s --out %s"; case nco_rgr_GenerateRLLMesh: return "GenerateRLLMesh --lat %d --lon %d --file %s"; case nco_rgr_GenerateTestData: return "GenerateTestData --mesh %s --np %d --test %d --out %s"; case nco_rgr_MeshToTxt: return "MeshToTxt"; case nco_rgr_AAA_nil: case nco_rgr_ZZZ_last: default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_tps_cmd_fmt_sng() */ const char * /* O [sng] String containing regridding command name */ nco_tps_cmd_sng /* [fnc] Convert TempestRemap command enum to command name */ (const nco_rgr_tps_cmd nco_tps_cmd) /* I [enm] TempestRemap command enum */ { /* Purpose: Convert TempestRemap command enum to command string */ switch(nco_tps_cmd){ case nco_rgr_ApplyOfflineMap: return "ApplyOfflineMap"; case nco_rgr_CalculateDiffNorms: return "CalculateDiffNorms"; case nco_rgr_GenerateCSMesh: return "GenerateCSMesh"; case nco_rgr_GenerateGLLMetaData: return "GenerateGLLMetaData"; case nco_rgr_GenerateICOMesh: return "GenerateICOMesh"; case nco_rgr_GenerateLambertConfConicMesh: return "GenerateLambertConfConicMesh"; case nco_rgr_GenerateOfflineMap: return "GenerateOfflineMap"; case nco_rgr_GenerateOverlapMesh: return "GenerateOverlapMesh"; case nco_rgr_GenerateRLLMesh: return "GenerateRLLMesh"; case nco_rgr_GenerateTestData: return "GenerateTestData"; case nco_rgr_MeshToTxt: return "MeshToTxt"; case nco_rgr_AAA_nil: case nco_rgr_ZZZ_last: default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_tps_cmd_sng() */ int /* O [enm] Return code */ nco_grd_mk /* [fnc] Create SCRIP-format grid file */ (rgr_sct * const rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Use grid information to create SCRIP-format grid file Spherical geometry terminology: spherical cap = spherical dome = volume cut-off by plane spherical lune = digon = area bounded by two half-great circles = base of spherical wedge spherical segment = volume defined by cutting sphere with pair parallel planes spherical sector = volume subtended by lat1 spherical wedge = ungula = volume subtended by lon2-lon1 spherical zone = area of spherical segment excluding bases spherical quadrangle = area of intersection of spherical zone and lune (i.e., area of bearing = angle from true north geodesic = shortest path between points on a surface great circle = orthodrome = "straight path" = geodesic of the sphere convergency = difference (in azimuth?) between great circle tracks at two different positions conversion angle = angle between geodesic and rhumb line rhumb line = loxodrome = "oblique (or slanted) path" = line of constant azimuth Formulae: http://www.movable-type.co.uk/scripts/latlong.html # On-line Javascript implementation http://williams.best.vwh.net/avform.htm ACME: https://acme-svn2.ornl.gov/acme-repo/acme/mapping/grids https://acme-svn2.ornl.gov/acme-repo/acme/inputdata/cpl/gridmaps NCAR: yellowstone.ucar.edu:/glade/p/cesm/cseg/mapping/grids yellowstone.ucar.edu:/glade/p_old/cesm/cseg/mapping/grids Global RLL grids: ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr grid=${DATA}/grids/180x360_SCRIP.20150901.nc --rgr latlon=180,360 --rgr lat_typ=eqa --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Equiangular grid 90x180' --rgr grid=${DATA}/grids/90x180_SCRIP.20150901.nc --rgr latlon=90,180 --rgr lat_typ=eqa --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc Maps for global RLL grids: ESMF_RegridWeightGen -s ${DATA}/grids/180x360_SCRIP.20150901.nc -d ${DATA}/grids/90x180_SCRIP.20150901.nc -w ${DATA}/maps/map_180x360_to_90x180.20150901.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/90x180_SCRIP.20150901.nc -d ${DATA}/grids/180x360_SCRIP.20150901.nc -w ${DATA}/maps/map_90x180_to_180x360.20150901.nc --method conserve ACME grids: ncks -O -D 1 --rgr ttl='FV-scalar grid 129x256' --rgr grid=${DATA}/grids/129x256_SCRIP.20150910.nc --rgr latlon=129,256 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='FV-scalar grid 257x512' --rgr grid=${DATA}/grids/257x512_SCRIP.20150910.nc --rgr latlon=257,512 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='FV-scalar grid 801x1600' --rgr grid=${DATA}/grids/801x1600_SCRIP.20150910.nc --rgr latlon=801,1600 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ACME maps: ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/129x256_SCRIP.20150910.nc -w ${DATA}/maps/map_ne30np4_to_fv129x256_aave.20150910.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/257x512_SCRIP.20150910.nc -w ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.20150910.nc --method bilinear ESMF_RegridWeightGen -s ${DATA}/grids/ne120np4_pentagons.100310.nc -d ${DATA}/grids/257x512_SCRIP.20150910.nc -w ${DATA}/maps/map_ne120np4_to_fv257x512_aave.20150910.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne120np4_pentagons.100310.nc -d ${DATA}/grids/801x1600_SCRIP.20150910.nc -w ${DATA}/maps/map_ne120np4_to_fv801x1600_bilin.20150910.nc --method bilinear AMWG grids: AMWG diagnostics (until ~2016) mis-diagnose FV grids with odd numbers of latitudes as Gaussian Grids ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 96x144 for horizontal resolution 1.9x2.5 degrees' --rgr grid=${DATA}/grids/96x144_SCRIP.20160301.nc --rgr latlon=96,144 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 192x288 for horizontal resolution 0.9x1.25 degrees' --rgr grid=${DATA}/grids/192x288_SCRIP.20160301.nc --rgr latlon=192,288 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 128x256 for horizontal resolution 1.4x1.4 degrees' --rgr grid=${DATA}/grids/128x256_SCRIP.20160301.nc --rgr latlon=128,256 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 256x512 for horizontal resolution 0.7x0.7 degrees' --rgr grid=${DATA}/grids/256x512_SCRIP.20160301.nc --rgr latlon=256,512 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 800x1600 for horizontal resolution 0.225x0.225 degrees' --rgr grid=${DATA}/grids/800x1600_SCRIP.20160301.nc --rgr latlon=800,1600 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Equiangular grid 360x720 produced by RTM' --rgr grid=${DATA}/grids/360x720rtm_SCRIP.20160301.nc --rgr latlon=360,720 --rgr lat_typ=eqa --rgr lon_typ=180_wst ~/nco/data/in.nc ~/foo.nc AMWG maps old method (no provenance archived): ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/128x256_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv128x256_aave.20160301.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/256x512_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv256x512_bilin.20160301.nc --method bilinear ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/256x512_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv256x512_aave.20160301.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/800x1600_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv800x1600_bilin.20160301.nc --method bilinear AMWG maps with ncremap (preferred method): ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/128x256_SCRIP.20160301.nc -m ${DATA}/maps/map_ne30np4_to_fv128x256_aave.20160301.nc -w esmf -a conserve ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/256x512_SCRIP.20160301.nc -m ${DATA}/maps/map_ne30np4_to_fv256x512_bilin.20160301.nc -w esmf -a bilinear ncremap -s ${DATA}/grids/ne120np4_pentagons.100310.nc -g ${DATA}/grids/256x512_SCRIP.20160301.nc -m ${DATA}/maps/map_ne120np4_to_fv256x512_aave.20160301.nc -w esmf -a conserve ncremap -s ${DATA}/grids/ne120np4_pentagons.100310.nc -g ${DATA}/grids/800x1600_SCRIP.20160301.nc -m ${DATA}/maps/map_ne120np4_to_fv800x1600_bilin.20160301.nc -w esmf -a bilinear MPAS grids: NCO cannot yet generate MPAS grids, but given an MPAS grid it can generate appropriate maps MPAS maps: ncremap -s ${DATA}/grids/oEC60to30.SCRIP.150729.nc -g ${DATA}/grids/t62_SCRIP.20150901.nc -m ${DATA}/maps/map_oEC60to30_to_t62_aave.20160301.nc -w esmf -a conserve ncremap -s ${DATA}/grids/oEC60to30.SCRIP.150729.nc -g ${DATA}/grids/t62_SCRIP.20150901.nc -m ${DATA}/maps/map_oEC60to30_to_t62_bilin.20160301.nc -w esmf -a bilinear Regional RLL grids: ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr grid=${DATA}/sld/rgr/grd_dst.nc --rgr latlon=100,100 --rgr snwe=30.0,70.0,-120.0,-90.0 ~/nco/data/in.nc ~/foo.nc Global RLL skeleton: ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr skl=${DATA}/sld/rgr/skl_180x360.nc --rgr grid=${DATA}/grids/180x360_SCRIP.20150901.nc --rgr latlon=180,360#lat_typ=eqa#lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc Curvilinear grids: ncks -O -D 1 --rgr ttl='Curvilinear grid 10x20. Degenerate case.' --rgr crv --rgr lon_crv=0.0 --rgr skl=${DATA}/sld/rgr/skl_crv.nc --rgr grid=${DATA}/sld/rgr/grd_crv.nc --rgr latlon=10,20 --rgr snwe=-5.0,5.0,-10.0,10.0 ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Curvilinear grid 10x20. Curvilinearity = 1.0 lon' --rgr lon_crv=1.0 --rgr skl=${DATA}/sld/rgr/skl_crv.nc --rgr grid=${DATA}/sld/rgr/grd_crv.nc --rgr latlon=10,20 --rgr snwe=-5.0,5.0,-10.0,10.0 ~/nco/data/in.nc ~/foo.nc 1-D Latitude (no longitude) grids: ncks -O -D 1 --rgr ttl='Latitude-only zonal grid' --rgr skl=${DATA}/sld/rgr/skl_lat_10dgr_uni.nc --rgr grid=${DATA}/sld/rgr/grd_lat_10dgr_uni.nc --rgr latlon=18,1 --rgr snwe=-90,90,0,360 ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Latitude-only zonal grid' --rgr skl=${DATA}/sld/rgr/skl_lat_05dgr_cap.nc --rgr grid=${DATA}/sld/rgr/grd_lat_05dgr_cap.nc --rgr latlon=37,1 --rgr snwe=-90,90,0,360 ~/nco/data/in.nc ~/foo.nc ncremap -i ${DATA}/sld/rgr/skl_lat_10dgr_uni.nc -d ${DATA}/sld/rgr/skl_lat_05dgr_cap.nc -m ${DATA}/maps/map_lat10uni_to_lat05cap_aave.nc -o ~/rgr/lat10to05.nc ESMF_RegridWeightGen -s ${DATA}/sld/rgr/grd_lat_10dgr_uni.nc -d ${DATA}/sld/rgr/grd_lat_05dgr_cap.nc -w ${DATA}/maps/map_lat10uni_to_lat05cap_aave.nc --method conserve */ const char fnc_nm[]="nco_grd_mk()"; /* [sng] Function name */ const double rdn2dgr=180.0/M_PI; const double dgr2rdn=M_PI/180.0; const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */ const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */ const int itr_nbr_max=20; // [nbr] Maximum number of iterations const nc_type crd_typ=NC_DOUBLE; char *fl_out_tmp=NULL_CEWI; char *fl_out; char grd_area_nm[]="grid_area"; /* 20150830: NB ESMF_RegridWeightGen --user_areas looks for variable named "grid_area" */ char dmn_sz_nm[]="grid_dims"; char grd_crn_lat_nm[]="grid_corner_lat"; char grd_crn_lon_nm[]="grid_corner_lon"; char grd_crn_nm[]="grid_corners"; char grd_ctr_lat_nm[]="grid_center_lat"; char grd_ctr_lon_nm[]="grid_center_lon"; char grd_rnk_nm[]="grid_rank"; char grd_sz_nm[]="grid_size"; char msk_nm[]="grid_imask"; double *grd_ctr_lat; /* [dgr] Latitude centers of grid */ double *grd_ctr_lon; /* [dgr] Longitude centers of grid */ double *grd_crn_lat; /* [dgr] Latitude corners of grid */ double *grd_crn_lon; /* [dgr] Longitude corners of grid */ double *area; /* [sr] Area of grid */ double *lat_bnd=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular grid */ double *lat_crn=NULL; /* [dgr] Latitude corners of rectangular grid */ double *lat_ctr=NULL_CEWI; /* [dgr] Latitude centers of rectangular grid */ double *lat_ntf=NULL; /* [dgr] Latitude interfaces of rectangular grid */ double *lat_wgt=NULL; /* [dgr] Latitude weights of rectangular grid */ double *lon_bnd=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular grid */ double *lon_crn=NULL; /* [dgr] Longitude corners of rectangular grid */ double *lon_ctr=NULL_CEWI; /* [dgr] Longitude centers of rectangular grid */ double *lon_ntf=NULL; /* [dgr] Longitude interfaces of rectangular grid */ double area_ttl=0.0; /* [frc] Exact sum of area */ double lat_crv; /* [dgr] Latitudinal curvilinearity */ double lon_crv; /* [dgr] Longitudinal curvilinearity */ double lat_nrt; /* [dgr] Latitude of northern edge of grid */ double lat_sth; /* [dgr] Latitude of southern edge of grid */ double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */ double lat_wgt_gss; /* [frc] Latitude weight estimated from interface latitudes */ double lon_est; /* [dgr] Longitude of eastern edge of grid */ double lon_wst; /* [dgr] Longitude of western edge of grid */ double lon_ncr; /* [dgr] Longitude increment */ double lat_ncr; /* [dgr] Latitude increment */ double lon_spn; /* [dgr] Longitude span */ double lat_spn; /* [dgr] Latitude span */ double *wgt_Gss=NULL; // [frc] Gaussian weights double precision int *msk=NULL; /* [flg] Mask of grid */ int *dmn_sz_int; /* [nbr] Array of dimension sizes of grid */ int dmn_ids[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_out_fmt=NC_FORMAT_CLASSIC; /* [enm] Output file format */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int area_id; /* [id] Area variable ID */ int dmn_id_grd_crn; /* [id] Grid corners dimension ID */ int dmn_id_grd_rnk; /* [id] Grid rank dimension ID */ int dmn_id_grd_sz; /* [id] Grid size dimension ID */ int dmn_sz_int_id; /* [id] Grid dimension sizes ID */ int grd_crn_lat_id; /* [id] Grid corner latitudes variable ID */ int grd_crn_lon_id; /* [id] Grid corner longitudes variable ID */ int grd_ctr_lat_id; /* [id] Grid center latitudes variable ID */ int grd_ctr_lon_id; /* [id] Grid center longitudes variable ID */ int itr_cnt; /* Iteration counter */ int msk_id; /* [id] Mask variable ID */ long dmn_srt[dmn_nbr_grd_max]; long dmn_cnt[dmn_nbr_grd_max]; long bnd_nbr; /* [nbr] Number of bounds in gridcell */ long col_nbr; /* [nbr] Number of columns in grid */ long crn_idx; /* [idx] Counting index for corners */ long grd_crn_nbr; /* [nbr] Number of corners in gridcell */ long grd_rnk_nbr; /* [nbr] Number of dimensions in grid */ long grd_sz_nbr; /* [nbr] Number of gridcells in grid */ long idx2; /* [idx] Counting index for unrolled grids */ long idx; /* [idx] Counting index for unrolled grids */ long lat_idx2; /* [idx] Counting index for unrolled latitude */ long lat_idx; long lat_nbr; /* [nbr] Number of latitudes in grid */ long lon_idx2; /* [idx] Counting index for unrolled longitude */ long lon_idx; long lon_nbr; /* [nbr] Number of longitudes in grid */ nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=True; /* Option O */ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_CREATE=rgr->flg_uio; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool WRT_TMP_FL=False; /* [flg] Write output to temporary file */ nco_bool flg_grd_1D=False; nco_bool flg_grd_2D=False; nco_bool flg_grd_crv=False; nco_bool flg_s2n=True; /* I [enm] Latitude grid-direction is South-to-North */ nco_grd_2D_typ_enm grd_typ; /* [enm] Grid-type enum */ nco_grd_lat_drc_enm lat_drc; /* [enm] Latitude grid-direction enum */ nco_grd_lat_typ_enm lat_typ; /* [enm] Latitude grid-type enum */ nco_grd_lon_typ_enm lon_typ; /* [enm] Longitude grid-type enum */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ dfl_lvl=rgr->dfl_lvl; grd_typ=rgr->grd_typ; /* [enm] Grid type */ fl_out=rgr->fl_grd; fl_out_fmt=rgr->fl_out_fmt; lat_drc=rgr->lat_drc; /* [enm] Latitude grid direction */ lat_typ=rgr->lat_typ; /* [enm] Latitude grid type */ lon_typ=rgr->lon_typ; /* [enm] Longitude grid type */ lat_nbr=rgr->lat_nbr; /* [nbr] Number of latitudes in grid */ lon_nbr=rgr->lon_nbr; /* [nbr] Number of longitudes in grid */ lat_crv=rgr->lat_crv; /* [dgr] Latitude curvilinearity */ lon_crv=rgr->lon_crv; /* [dgr] Longitude curvilinearity */ lat_sth=rgr->lat_sth; /* [dgr] Latitude of southern edge of grid */ lon_wst=rgr->lon_wst; /* [dgr] Longitude of western edge of grid */ lat_nrt=rgr->lat_nrt; /* [dgr] Latitude of northern edge of grid */ lon_est=rgr->lon_est; /* [dgr] Longitude of eastern edge of grid */ /* Use curvilinear coordinates (lat and lon are 2D arrays) if flg_crv already set or it lat_crv or lon_crv set */ if(lat_crv != 0.0 || lon_crv != 0.0 || rgr->flg_crv) flg_grd_crv=True; if(lat_drc == nco_grd_lat_drc_n2s) flg_s2n=False; /* Assume 2D grid */ flg_grd_2D=True; grd_rnk_nbr=dmn_nbr_2D; /* Assume quadrilaterals */ grd_crn_nbr=4; /* Assume rectangles */ bnd_nbr=2; col_nbr=lat_nbr*lon_nbr; grd_sz_nbr=lat_nbr*lon_nbr; /* Allocate space for output data */ area=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); dmn_sz_int=(int *)nco_malloc(grd_rnk_nbr*nco_typ_lng((nc_type)NC_INT)); msk=(int *)nco_malloc(grd_sz_nbr*nco_typ_lng((nc_type)NC_INT)); lat_bnd=(double *)nco_malloc(lat_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(lat_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lon_bnd=(double *)nco_malloc(lon_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(lon_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(lon_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); wgt_Gss=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); grd_ctr_lat=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_ctr_lon=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lat=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lon=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); /* Define variable values */ int lon_psn=int_CEWI; /* [idx] Ordinal position of longitude in rectangular grid dimension-size array */ int lat_psn=int_CEWI; /* [idx] Ordinal position of latitude in rectangular grid dimension-size array */ if(grd_rnk_nbr == dmn_nbr_2D){ lon_psn=0; /* SCRIP introduced [lon,lat] convention because more natural for Fortran */ lat_psn=1; } /* !flg_grd_in_2D */ dmn_sz_int[lon_psn]=lon_nbr; dmn_sz_int[lat_psn]=lat_nbr; for(idx=0;idx<grd_sz_nbr;idx++) msk[idx]=1; /* Compute rectangular arrays NB: Much is a more-generic rewrite of map/map_grd.F90:map_grd_mk() */ /* 20150827: Old rule: Longitude grid was entirely specified by one of four longitude map tokens: Grn_ctr,Grn_wst,180_ctr,180_wst New rule: User may specify bounds (lon_wst,lon_est,lat_sth,lat_nrt) independently of grid token Such bounds ALWAYS refer bounding box interface edges, NEVER to centers of first last gridcells Bounds and number of gridcells completely determine uniform grid so former longitude-type tokens have no effect when bounds specified (so letting grid-type tokens affect grid would over-determine grid and lead to errors) Hence, grid-type tokens may be used as short-hand to specify grids but may not be required to exist later (because regional grids would not have specified them) Grid grid-type tokens lon_bb/lat_bb imply bounding box was originally used to specify bounds 1x1 degree global grid with first longitude centered at Greenwich: --lon_nbr=360 --lon_typ Grn_ctr --lon_nbr=360 --lon_wst=-0.5 --lon_est=359.5 1x1 degree global grid with Greenwich at west edge of first longitude: --lon_nbr=360 --lon_typ Grn_wst --lon_nbr=360 --lon_wst=0.0 --lon_est=360.0 1x1 degree regional grid, total size 9x9 degrees, Greenwich at center of middle gridcell: --lon_nbr=9 --lon_wst=-4.5 --lon_est=4.5 1x1 degree regional grid, total size 10x10 degrees, Greenwich at east/west edges of middle two gridcells --lon_nbr=10 --lon_wst=-5.0 --lon_est=5.0 */ /* Were east/west longitude bounds set explicitly or implicitly? NB: This is redundant since it was done in nco_rgr_ini(), yet better safe than sorry */ if(lon_wst != NC_MAX_DOUBLE || lon_est != NC_MAX_DOUBLE) lon_typ=rgr->lon_typ=nco_grd_lon_bb; if(lon_wst == NC_MAX_DOUBLE){ /* Precomputed longitude grids begin with longitude 0.0 or -180.0 degrees */ switch(lon_typ){ case nco_grd_lon_bb: case nco_grd_lon_Grn_ctr: case nco_grd_lon_Grn_wst: lon_wst=0.0; break; case nco_grd_lon_180_ctr: case nco_grd_lon_180_wst: lon_wst=-180.0; break; default: nco_dfl_case_generic_err(); break; } /* !lon_typ */ } /* !lon */ if(lon_est == NC_MAX_DOUBLE){ /* Precomputed longitude grids end with longitude 360.0 or 180.0 degrees */ switch(lon_typ){ case nco_grd_lon_bb: case nco_grd_lon_Grn_ctr: case nco_grd_lon_Grn_wst: lon_est=360.0; break; case nco_grd_lon_180_ctr: case nco_grd_lon_180_wst: lon_est=180.0; break; default: nco_dfl_case_generic_err(); break; } /* !lon_typ */ } /* !lon */ /* Determine longitude increment from span of pre-centered bounding box (centering will not change span) */ lon_spn=lon_est-lon_wst; lon_ncr=lon_spn/lon_nbr; /* Centering: If user did not set explicit longitude bounds then... */ if(lon_typ != nco_grd_lon_bb) /* map_lon_ctr_typ determines whether lon_wst refers to cell center or Western edge */ if((lon_typ == nco_grd_lon_Grn_ctr) || (lon_typ == nco_grd_lon_180_ctr)) lon_wst=lon_wst-(lon_ncr/2.0); /* Re-derive lon_est from lon_wst and lon_nbr (more fundamental properties) */ lon_est=lon_wst+lon_ncr*lon_nbr; /* lon_wst and lon_est have been set and will not change */ assert(lon_wst < lon_est); lon_ntf[0L]=lon_wst; lon_ntf[lon_nbr]=lon_est; for(lon_idx=1L;lon_idx<lon_nbr;lon_idx++) lon_ntf[lon_idx]=lon_ntf[0L]+lon_idx*lon_ncr; /* Ensure rounding errors do not produce unphysical grid */ lon_ntf[lon_nbr]=lon_ntf[0L]+lon_spn; /* Finished with longitude, now tackle latitude */ /* Were south/north latitude bounds set explicitly or implicitly? */ // if(lat_sth != NC_MAX_DOUBLE || lat_nrt != NC_MAX_DOUBLE) lon_typ=rgr->lat_typ=nco_grd_lat_bb; if(lat_sth == NC_MAX_DOUBLE) lat_sth=-90.0; if(lat_nrt == NC_MAX_DOUBLE) lat_nrt=90.0; /* Determine latitude increment from span of pre-centered bounding box (centering will not change span) */ lat_spn=lat_nrt-lat_sth; lat_ncr=lat_spn/lat_nbr; const long lat_nbr_hlf=lat_nbr/2L; // [nbr] Half number of latitudes (e.g., lat_nbr_hlf=32 for lat_nbr=64 and 65) double *lat_sin=NULL; // [frc] Sine of Gaussian latitudes double precision /* Create S->N grid. If user requested N->S, flip grid at end */ // if(flg_s2n) lat_ntf[0L]=lat_sth; else lat_ntf[0L]=lat_nrt; lat_ntf[0L]=lat_sth; switch(lat_typ){ case nco_grd_lat_fv: lat_ncr=lat_spn/(lat_nbr-1L); lat_ntf[1L]=lat_ntf[0L]+0.5*lat_ncr; for(lat_idx=2L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=lat_ntf[1L]+(lat_idx-1L)*lat_ncr; break; case nco_grd_lat_eqa: lat_ncr=lat_spn/lat_nbr; for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=lat_ntf[0L]+lat_idx*lat_ncr; break; case nco_grd_lat_gss: lat_sin=(double *)nco_malloc(lat_nbr*sizeof(double)); (void)nco_lat_wgt_gss(lat_nbr,True,lat_sin,wgt_Gss); for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]); /* First guess for lat_ntf is midway between Gaussian abscissae */ for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=0.5*(lat_ctr[lat_idx-1L]+lat_ctr[lat_idx]); /* Iterate guess until area between interfaces matches Gaussian weight (compute for one hemisphere, make other symmetric) */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++){ double fofx_at_x0; /* [frc] Function to iterate evaluated at current guess */ double dfdx_at_x0; /* [frc] Derivative of equation evaluated at current guess */ const double eps_rlt_cnv=1.0e-15; // Convergence criterion (1.0e-16 pushes double precision to the brink) itr_cnt=0; lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; while(fabs(fofx_at_x0) > eps_rlt_cnv){ /* Newton-Raphson iteration: Let x=lat_ntf[lat_idx], y0=lat_ntf[lat_idx-1L], gw = Gaussian weight (exact solution) f(x)=sin(dgr2rdn*x)-sin(dgr2rdn*y0)-gw=0 # s2n grid f(x)=sin(dgr2rdn*y0)-sin(dgr2rdn*x)-gw=0 # n2s grid dfdx(x)= dgr2rdn*cos(dgr2rdn*x) # s2n grid dfdx(x)=-dgr2rdn*cos(dgr2rdn*x) # n2s grid x_better=x0-f(x0)/f'(x0) */ dfdx_at_x0=dgr2rdn*cos(dgr2rdn*lat_ntf[lat_idx]); /* 20190613: n2s latitudes are constructed s2n and flipped to n2s later Hence next line is commented-out in construction mode but used in infer mode */ // if(!flg_s2n) dfdx_at_x0=-dfdx_at_x0; lat_ntf[lat_idx]+=fofx_at_x0/dfdx_at_x0; /* NB: not sure why this is minus not plus but it works :) */ lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; if(++itr_cnt > itr_nbr_max){ (void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %ld\n",nco_prg_nm_get(),fnc_nm,fabs(fofx_at_x0),itr_nbr_max,lat_idx); nco_exit(EXIT_FAILURE); } /* endif */ } /* !while */ } /* !lat_idx */ /* Use Gaussian grid symmetry to obtain same interfaces in both hemispheres (avoids cumulative rounding errors) */ if(lat_nbr%2){ /* lat_nbr is odd */ for(lat_idx=1L;lat_idx<=lat_nbr_hlf+1L;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx+1L]; }else{ /* lat_nbr is even */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx]; } /* !flg_lat_evn */ break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ /* Ensure rounding errors do not produce unphysical grid */ lat_ntf[lat_nbr]=lat_nrt; if(nco_dbg_lvl_get() > nco_dbg_old){ (void)fprintf(stderr,"%s: DEBUG %s Gaussian abscissae/interfaces for lat_nbr=%ld\n",nco_prg_nm_get(),fnc_nm,lat_nbr); (void)fprintf(stderr,"idx\tlat_ctr\tlat_ntf\tntf_p1\n"); for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ (void)fprintf(stderr,"%ld\t%20.15f\t%20.15f\t%20.15f\n",lat_idx,lat_ctr[lat_idx],lat_ntf[lat_idx],lat_ntf[lat_idx+1L]); } /* !lat_idx */ } /* !dbg */ /* Always define longitude centers midway between interfaces */ for(lon_idx=0L;lon_idx<=lon_nbr-1L;lon_idx++) lon_ctr[lon_idx]=0.5*(lon_ntf[lon_idx]+lon_ntf[lon_idx+1L]); /* Many grids have center latitude equally spaced between interfaces */ if(lat_typ != nco_grd_lat_fv && lat_typ != nco_grd_lat_gss){ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=0.5*(lat_ntf[lat_idx]+lat_ntf[lat_idx+1L]); } /* !lat_typ */ /* Cap grids excepted---they place centers of first/last gridcells at poles */ if(lat_typ == nco_grd_lat_fv){ lat_ctr[0L]=lat_ntf[0L]; for(lat_idx=1L;lat_idx<lat_nbr-1L;lat_idx++) lat_ctr[lat_idx]=0.5*(lat_ntf[lat_idx]+lat_ntf[lat_idx+1L]); lat_ctr[lat_nbr-1L]=lat_ntf[lat_nbr]; } /* !cap */ /* Gaussian grid centerpoints are defined by solutions to Legendre polynomials */ if(lat_typ == nco_grd_lat_gss){ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]); } /* !Gaussian */ for(idx=0L;idx<lon_nbr;idx++){ lon_bnd[2*idx]=lon_ntf[idx]; lon_bnd[2*idx+1L]=lon_ntf[idx+1L]; } /* !idx */ for(idx=0L;idx<lat_nbr;idx++){ lat_bnd[2*idx]=lat_ntf[idx]; lat_bnd[2*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0L;idx<lat_nbr;idx++){ (void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr[idx]); for(int bnd_idx=0L;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", "); } /* end loop over lat */ } /* endif dbg */ /* Use centers and boundaries to diagnose latitude weights */ switch(lat_typ){ case nco_grd_lat_eqa: case nco_grd_lat_fv: for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx])); break; case nco_grd_lat_gss: for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=wgt_Gss[lat_idx]; break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ /* Fuzzy test of latitude weight normalization 20180903 Tolerance threshold of eps_rlt_max=1.0e-14 is too strict for Gaussian grids somewhere lat_nbr >~ 150 20180904 Tolerance threshold of eps_rlt_max=1.0e-12 allows Gaussian grids like ECMWF O1280 Newton-Raphson method of interface determination may need improvement to fix that Tolerance threshold of 1.0e-14 works for all relevant E3SM Uniform and Cap grids */ //const double eps_rlt_max=1.0e-14; /* [frc] Round-off error tolerance: Used 1.0e-14 until 20180904 */ const double eps_rlt_max=1.0e-12; /* [frc] Round-off error tolerance: Used 1.0e-12 since 20180904 */ lat_wgt_ttl=0.0; for(idx=0L;idx<lat_nbr;idx++) lat_wgt_ttl+=lat_wgt[idx]; if(grd_typ == nco_grd_2D_fv || grd_typ == nco_grd_2D_eqa){ double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */ lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd[2*(lat_nbr-1)+1L])-sin(dgr2rdn*lat_bnd[0L])); if(fabs(1.0-lat_wgt_ttl/lat_wgt_ttl_xpc) > eps_rlt_max){ (void)fprintf(stdout,"%s: ERROR %s reports grid normalization does not meet precision tolerance eps_rlt_max = %20.15f\nlat_wgt_ttl = %20.15f, lat_wgt_ttl_xpc = %20.15f, lat_wgt_frc = %20.15f, eps_rlt = %20.15f\n",nco_prg_nm_get(),fnc_nm,eps_rlt_max,lat_wgt_ttl,lat_wgt_ttl_xpc,lat_wgt_ttl/lat_wgt_ttl_xpc,1.0-lat_wgt_ttl/lat_wgt_ttl_xpc); nco_exit(EXIT_FAILURE); } /* !imprecise */ } /* !nco_grd_lat_eqa, !nco_grd_lat_fv */ /* 20180831 Code above assumes grids run S->N User can request N->S grids with --rgr lat_drc=n2s If so, flip grid before unrolling into output arrays */ if(!flg_s2n){ double *lat_ctr_tmp=NULL_CEWI; /* [dgr] Temporary Latitude centers of rectangular grid */ double *lat_wgt_tmp=NULL; /* [dgr] Temporary Latitude weights of rectangular grid */ double *lat_ntf_tmp=NULL; /* [dgr] Temporary Latitude interfaces of rectangular grid */ lat_ctr_tmp=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lat_ntf_tmp=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt_tmp=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); long tmp_idx; /* [idx] Temporary index for swapping values */ for(idx=0L;idx<lat_nbr;idx++){ lat_ctr_tmp[idx]=lat_ctr[idx]; lat_wgt_tmp[idx]=lat_wgt[idx]; } /* !idx */ for(idx=0L;idx<lat_nbr;idx++){ tmp_idx=lat_nbr-idx-1L; lat_ctr[idx]=lat_ctr_tmp[tmp_idx]; lat_wgt[idx]=lat_wgt_tmp[tmp_idx]; } /* !idx */ for(idx=0L;idx<lat_nbr+1L;idx++){ lat_ntf_tmp[idx]=lat_ntf[idx]; } /* !idx */ for(idx=0L;idx<lat_nbr+1L;idx++){ tmp_idx=lat_nbr+1L-idx-1L; /* NB: Subtle index difference */ lat_ntf[idx]=lat_ntf_tmp[tmp_idx]; } /* !idx */ for(idx=0L;idx<lat_nbr;idx++){ lat_bnd[2*idx]=lat_ntf[idx]; lat_bnd[2*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ if(lat_ctr_tmp) lat_ctr_tmp=(double *)nco_free(lat_ctr_tmp); if(lat_ntf_tmp) lat_ntf_tmp=(double *)nco_free(lat_ntf_tmp); if(lat_wgt_tmp) lat_wgt_tmp=(double *)nco_free(lat_wgt_tmp); } /* !flg_s2n */ assert(grd_crn_nbr == 4); for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){ idx=grd_crn_nbr*lon_idx; lon_crn[idx]=lon_ntf[lon_idx]; lon_crn[idx+1L]=lon_ntf[lon_idx+1L]; lon_crn[idx+2L]=lon_ntf[lon_idx+1L]; lon_crn[idx+3L]=lon_ntf[lon_idx]; } /* !lon_idx */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ idx=grd_crn_nbr*lat_idx; lat_crn[idx]=lat_ntf[lat_idx]; lat_crn[idx+1L]=lat_ntf[lat_idx]; lat_crn[idx+2L]=lat_ntf[lat_idx+1L]; lat_crn[idx+3L]=lat_ntf[lat_idx+1L]; } /* !lat_idx */ /* Stuff rectangular arrays into unrolled arrays */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){ idx=lat_idx*lon_nbr+lon_idx; grd_ctr_lat[idx]=lat_ctr[lat_idx]; grd_ctr_lon[idx]=lon_ctr[lon_idx]; for(crn_idx=0L;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; lat_idx2=lat_idx*grd_crn_nbr+crn_idx; lon_idx2=lon_idx*grd_crn_nbr+crn_idx; grd_crn_lat[idx2]=lat_crn[lat_idx2]; grd_crn_lon[idx2]=lon_crn[lon_idx2]; } /* !crn */ } /* !lon */ } /* !lat */ if(flg_grd_crv){ /* Impose curvilinearity by adding lon_crv offset to each row relative to previous row, and lat_crv offset to each column relative to previous column */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){ idx=lat_idx*lon_nbr+lon_idx; grd_ctr_lat[idx]+=lon_idx*lat_crv; grd_ctr_lon[idx]+=lat_idx*lon_crv; for(crn_idx=0L;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; lat_idx2=lat_idx*grd_crn_nbr+crn_idx; lon_idx2=lon_idx*grd_crn_nbr+crn_idx; grd_crn_lat[idx2]=lat_crn[lat_idx2]; grd_crn_lon[idx2]=lon_crn[lon_idx2]; if(crn_idx == 0L || crn_idx == 1L){ grd_crn_lat[idx2]+=lat_idx*lat_crv; /* LL, LR */ grd_crn_lon[idx2]+=lat_idx*lon_crv; /* LL, LR */ }else if(crn_idx == 2L || crn_idx == 3L){ grd_crn_lat[idx2]+=(lat_idx+1L)*lat_crv; /* UL, UR */ grd_crn_lon[idx2]+=(lat_idx+1L)*lon_crv; /* UL, UR */ } /* !crn */ } /* !crn */ } /* !lon */ } /* !lat */ } /* !flg_grd_crv */ /* 20190613: Convert CW quadrilaterals to CCW quadrilaterals so TempestRemap accepts grids Default construction/inferral method orders corners CCW and CW for s2n and n2s grids, respectively */ if(!flg_s2n){ nco_bool flg_ccw; /* [flg] Gridcell is CCW */ const int rcr_lvl=1; /* [nbr] Recursion level (1 is top level, 2 and greater are recursed */ const int idx_ccw=0; /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */ for(idx=0L;idx<grd_sz_nbr;idx++){ idx2=grd_crn_nbr*idx; flg_ccw=nco_ccw_chk(grd_crn_lat+idx2,grd_crn_lon+idx2,grd_crn_nbr,idx_ccw,rcr_lvl); if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_vec) (void)fprintf(stderr,"%s: DEBUG %s reports nco_ccw_chk() tried to change idx = %lu from CW to CCW\n",nco_prg_nm_get(),fnc_nm,idx); } /* !idx */ } /* !flg_s2n */ if(nco_dbg_lvl_get() >= nco_dbg_std){ long int idx_crn_ll; long int idx_crn_lr; long int idx_crn_ur; long int idx_crn_ul; long idx_dbg; idx_dbg=rgr->idx_dbg; idx_crn_ll=grd_crn_nbr*idx_dbg+0L; idx_crn_lr=grd_crn_nbr*idx_dbg+1L; idx_crn_ur=grd_crn_nbr*idx_dbg+2L; idx_crn_ul=grd_crn_nbr*idx_dbg+3L; (void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,grd_ctr_lat[idx_dbg],grd_ctr_lon[idx_dbg],grd_crn_lat[idx_crn_ll],grd_crn_lon[idx_crn_ll],grd_crn_lat[idx_crn_lr],grd_crn_lon[idx_crn_lr],grd_crn_lat[idx_crn_ur],grd_crn_lon[idx_crn_ur],grd_crn_lat[idx_crn_ul],grd_crn_lon[idx_crn_ul]); } /* !dbg */ if(flg_grd_crv){ /* Area of arbitrary curvilinear grids requires spherical trigonometry */ nco_sph_plg_area(rgr,grd_crn_lat,grd_crn_lon,grd_sz_nbr,grd_crn_nbr,area); }else{ /* Area of rectangular spherical zones from elementary calculus results 20150906: Half-angle formulae for better conditioning improve area normalization for 801x1600 by 2.0e-15 area[lat_idx*lon_nbr+lon_idx]=dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*2.0*(sin(0.5*dgr2rdn*lat_bnd[2*lat_idx+1L])*cos(0.5*dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(0.5*dgr2rdn*lat_bnd[2*lat_idx])*cos(0.5*dgr2rdn*lat_bnd[2*lat_idx])); Gain not worth the extra complexity */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++) /* fabs() ensures positive area in n2s grids */ area[lat_idx*lon_nbr+lon_idx]=fabs(dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*(sin(dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(dgr2rdn*lat_bnd[2*lat_idx]))); } /* !flg_grd_2D */ if(nco_dbg_lvl_get() >= nco_dbg_sbr){ lat_wgt_ttl=0.0; area_ttl=0.0; if(flg_grd_2D){ (void)fprintf(stderr,"%s: INFO %s reports destination rectangular latitude grid:\n",nco_prg_nm_get(),fnc_nm); for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_wgt_ttl+=lat_wgt[lat_idx]; } /* !flg_grd_2D */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++) area_ttl+=area[lat_idx*lon_nbr+lon_idx]; (void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_ttl,area_ttl/(4.0*M_PI)); assert(area_ttl > 0.0); assert(area_ttl <= 4.0*M_PI); } /* endif dbg */ /* Open grid file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Define dimensions */ rcd=nco_def_dim(out_id,grd_crn_nm,grd_crn_nbr,&dmn_id_grd_crn); rcd=nco_def_dim(out_id,grd_sz_nm,grd_sz_nbr,&dmn_id_grd_sz); rcd=nco_def_dim(out_id,grd_rnk_nm,grd_rnk_nbr,&dmn_id_grd_rnk); int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; /* Define variables */ (void)nco_def_var(out_id,dmn_sz_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_rnk,&dmn_sz_int_id); /* NB: Too small to deflate */ (void)nco_def_var(out_id,grd_area_nm,(nc_type)crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,msk_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_sz,&msk_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lat_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lon_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lon_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_grd_sz; dmn_ids[1]=dmn_id_grd_crn; (void)nco_def_var(out_id,grd_crn_lat_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lat_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_grd_sz; dmn_ids[1]=dmn_id_grd_crn; (void)nco_def_var(out_id,grd_crn_lon_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lon_id,shuffle,deflate,dfl_lvl); /* Define global and "units" attributes */ char *att_val; rcd=nco_char_att_put(out_id,NULL,"title",rgr->grd_ttl); rcd=nco_char_att_put(out_id,NULL,"Conventions","SCRIP"); const char usr_cpp[]=TKN2SNG(USER); /* [sng] Hostname from C pre-processor */ rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO"); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ)); rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ)); rcd=nco_char_att_put(out_id,dmn_sz_nm,"long_name","Size(s) of horizontal dimensions (in Fortran storage order for historical reasons)"); rcd=nco_char_att_put(out_id,grd_area_nm,"long_name","Solid Angle Subtended on Source Grid"); rcd=nco_char_att_put(out_id,grd_area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,grd_area_nm,"units","steradian"); rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"standard_name","latitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"bounds",grd_crn_lat_nm); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"standard_name","longitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"bounds",grd_crn_lon_nm); rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"long_name","Latitude of Grid Cell Vertices"); rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"standard_name","latitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"long_name","Longitude of Grid Cell Vertices"); rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"standard_name","longitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,msk_nm,"long_name","Binary Integer Mask for Grid"); rcd=nco_char_att_put(out_id,msk_nm,"units","none"); /* Begin data mode */ (void)nco_enddef(out_id); /* Write variables */ dmn_srt[0]=0L; dmn_cnt[0]=grd_rnk_nbr; rcd=nco_put_vara(out_id,dmn_sz_int_id,dmn_srt,dmn_cnt,dmn_sz_int,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,msk_id,dmn_srt,dmn_cnt,msk,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ); dmn_srt[0]=0L; dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lat_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ); dmn_srt[0]=0L; dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lon_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ); /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); fl_out=rgr->fl_skl; if(fl_out){ /* Write skeleton data file on requested grid Skeleton file can then be populated with data for testing */ char *area_nm; char *bnd_nm; // char *bnd_tm_nm; char *col_nm_out; char *lat_nm_out; /* [sng] Name of output dimension for latitude */ char *lat_wgt_nm; char *lon_nm_out; /* [sng] Name of variable to recognize as longitude */ char *lat_bnd_nm; /* [sng] Name of latitude boundary variable */ char *lon_bnd_nm; /* [sng] Name of longitude boundary variable */ // int area_id; /* [id] Variable ID for area */ int dmn_id_bnd; /* [id] Dimension ID */ //int dmn_id_bnd_tm; /* [id] Dimension ID */ int dmn_id_col; /* [id] Dimension ID */ int dmn_id_lat; /* [id] Dimension ID */ int dmn_id_lon; /* [id] Dimension ID */ int lat_bnd_id; /* [id] Variable ID for lat_bnds/lat_vertices */ int lat_id; /* [id] Variable ID for latitude */ int lat_wgt_id; /* [id] Variable ID for latitude weight */ int lon_bnd_id; /* [id] Variable ID for lon_bnds/lon_vertices */ int lon_id; /* [id] Variable ID for longitude */ /* Use explicitly specified output names, if any, otherwise use input names (either explicitly specified or discovered by fuzzing) */ if(rgr->lat_nm_out) lat_nm_out=rgr->lat_nm_out; else lat_nm_out=(char *)strdup("lat"); if(rgr->lon_nm_out) lon_nm_out=rgr->lon_nm_out; else lon_nm_out=(char *)strdup("lon"); if(rgr->col_nm_out) col_nm_out=rgr->col_nm_out; else col_nm_out=(char *)strdup("ncol"); /* Name output dimensions */ area_nm=rgr->area_nm; bnd_nm=rgr->bnd_nm; //bnd_tm_nm=rgr->bnd_tm_nm; lat_bnd_nm=rgr->lat_bnd_nm; lat_wgt_nm=rgr->lat_wgt_nm; lon_bnd_nm=rgr->lon_bnd_nm; /* Use names discovered by fuzzing */ if(flg_grd_1D){ bnd_nm=rgr->vrt_nm; lat_bnd_nm=rgr->lat_vrt_nm; lon_bnd_nm=rgr->lon_vrt_nm; } /* !flg_grd_1D */ if(flg_grd_2D){ bnd_nm=rgr->bnd_nm; lat_bnd_nm=rgr->lat_bnd_nm; lon_bnd_nm=rgr->lon_bnd_nm; } /* !flg_grd_2D */ /* Open grid file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Define dimensions */ if(flg_grd_crv){ rcd=nco_def_dim(out_id,bnd_nm,grd_crn_nbr,&dmn_id_bnd); }else{ rcd=nco_def_dim(out_id,bnd_nm,bnd_nbr,&dmn_id_bnd); } /* !flg_grd_crv */ if(flg_grd_1D){ rcd=nco_def_dim(out_id,col_nm_out,col_nbr,&dmn_id_col); } /* !flg_grd_1D */ if(flg_grd_2D){ rcd=nco_def_dim(out_id,lat_nm_out,lat_nbr,&dmn_id_lat); rcd=nco_def_dim(out_id,lon_nm_out,lon_nbr,&dmn_id_lon); } /* !flg_grd_2D */ /* Define new coordinates and variables in regridded file */ if(flg_grd_1D){ (void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_col,&lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_col,&lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_col; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_col; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_1D,&dmn_id_col,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); } /* !flg_grd_1D */ if(flg_grd_crv){ dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_lon; (void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_2D,dmn_ids,&lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_2D,dmn_ids,&lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_2D,dmn_ids,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_lon; dmn_ids[2]=dmn_id_bnd; (void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_3D,dmn_ids,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_3D,dmn_ids,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); }else if(flg_grd_2D){ (void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_lat,&lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_lon,&lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lon; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lat_wgt_nm,crd_typ,dmn_nbr_1D,&dmn_id_lat,&lat_wgt_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_wgt_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_lon; (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_2D,dmn_ids,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); } /* !flg_grd_2D */ /* Define attributes */ rcd=nco_char_att_put(out_id,NULL,"title",rgr->grd_ttl); rcd=nco_char_att_put(out_id,NULL,"Conventions","CF-1.6"); rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ)); rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ)); rcd=nco_char_att_put(out_id,area_nm,"long_name","Solid angle subtended by gridcell"); rcd=nco_char_att_put(out_id,area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm,"units","steradian"); char *crd_val_sng; /* CF-standard coordinates values string */ size_t crd_val_sng_lng=strlen(lat_nm_out)+strlen(lon_nm_out)+1L; crd_val_sng=(char *)nco_malloc(crd_val_sng_lng*sizeof(char)+1L); (void)sprintf(crd_val_sng,"%s %s",lat_nm_out,lon_nm_out); rcd=nco_char_att_put(out_id,area_nm,"coordinates",crd_val_sng); if(crd_val_sng) crd_val_sng=(char *)nco_free(crd_val_sng); rcd=nco_char_att_put(out_id,lat_nm_out,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lat_nm_out,"standard_name","latitude"); rcd=nco_char_att_put(out_id,lat_nm_out,"units","degrees_north"); rcd=nco_char_att_put(out_id,lat_nm_out,"axis","Y"); rcd=nco_char_att_put(out_id,lat_nm_out,"bounds",lat_bnd_nm); if(flg_grd_2D) att_val=strdup("Gridcell latitude interfaces"); else att_val=strdup("Gridcell latitude vertices"); rcd=nco_char_att_put(out_id,lat_bnd_nm,"long_name",att_val); if(flg_grd_2D) rcd=nco_char_att_put(out_id,lat_wgt_nm,"long_name","Latitude quadrature weights (normalized to sum to 2.0 on global grids)"); rcd=nco_char_att_put(out_id,lon_nm_out,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lon_nm_out,"standard_name","longitude"); rcd=nco_char_att_put(out_id,lon_nm_out,"units","degrees_east"); rcd=nco_char_att_put(out_id,lon_nm_out,"axis","X"); rcd=nco_char_att_put(out_id,lon_nm_out,"bounds",lon_bnd_nm); if(flg_grd_2D) att_val=strdup("Gridcell longitude interfaces"); else att_val=strdup("Gridcell longitude vertices"); rcd=nco_char_att_put(out_id,lon_bnd_nm,"long_name",att_val); /* Begin data mode */ (void)nco_enddef(out_id); /* Write new coordinates and variables to regridded file */ if(flg_grd_1D){ dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; (void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; (void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; (void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); } /* !flg_grd_1D */ if(flg_grd_crv){ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; (void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ); (void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ); (void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); dmn_srt[0]=dmn_srt[1]=0L;dmn_srt[2]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; dmn_cnt[2]=grd_crn_nbr; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ); (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ); }else if(flg_grd_2D){ dmn_srt[0]=0L; dmn_cnt[0]=lat_nbr; (void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=lon_nbr; (void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=lat_nbr; (void)nco_put_vara(out_id,lat_wgt_id,dmn_srt,dmn_cnt,lat_wgt,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lon_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; (void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); } /* !flg_grd_2D */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); } /* !fl_out */ /* Free memory associated with input file */ if(dmn_sz_int) dmn_sz_int=(int *)nco_free(dmn_sz_int); if(msk) msk=(int *)nco_free(msk); if(area) area=(double *)nco_free(area); if(grd_ctr_lat) grd_ctr_lat=(double *)nco_free(grd_ctr_lat); if(grd_ctr_lon) grd_ctr_lon=(double *)nco_free(grd_ctr_lon); if(grd_crn_lat) grd_crn_lat=(double *)nco_free(grd_crn_lat); if(grd_crn_lon) grd_crn_lon=(double *)nco_free(grd_crn_lon); if(lat_bnd) lat_bnd=(double *)nco_free(lat_bnd); if(lat_crn) lat_crn=(double *)nco_free(lat_crn); if(lat_ctr) lat_ctr=(double *)nco_free(lat_ctr); if(lat_ntf) lat_ntf=(double *)nco_free(lat_ntf); if(lat_sin) lat_sin=(double *)nco_free(lat_sin); if(lat_wgt) lat_wgt=(double *)nco_free(lat_wgt); if(lon_bnd) lon_bnd=(double *)nco_free(lon_bnd); if(lon_crn) lon_crn=(double *)nco_free(lon_crn); if(lon_ctr) lon_ctr=(double *)nco_free(lon_ctr); if(lon_ntf) lon_ntf=(double *)nco_free(lon_ntf); if(wgt_Gss) wgt_Gss=(double *)nco_free(wgt_Gss); return rcd; } /* !nco_grd_mk() */ int /* O [enm] Return code */ nco_grd_nfr /* [fnc] Infer SCRIP-format grid file from input data file */ (rgr_sct * const rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Use grid information and guesswork to create SCRIP-format grid file from input data file Test curvilinear grids: ncks -O -D 1 --rgr infer --rgr grid=${DATA}/sld/rgr/grd_airs.nc ${DATA}/sld/raw/AIRS.2014.10.01.202.L2.TSurfStd.Regrid010.1DLatLon.nc ~/foo.nc ncks -O -D 1 --rgr infer --rgr grid=${DATA}/sld/rgr/grd_airs.nc ${DATA}/sld/raw/AIRS.2014.10.01.202.L2.TSurfStd.Regrid010.1DLatLon.hole.nc ~/foo.nc */ const char fnc_nm[]="nco_grd_nfr()"; /* [sng] Function name */ const double rdn2dgr=180.0/M_PI; const double dgr2rdn=M_PI/180.0; const int dmn_nbr_0D=0; /* [nbr] Rank of 0-D grid variables */ const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ const int dmn_nbr_grd_max=4; /* [nbr] Maximum rank of grid variables (msk_[src/dst] could be rank 4) */ const int itr_nbr_max=20; // [nbr] Maximum number of iterations const int idx_ccw=0; /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */ const int rcr_lvl=1; /* [nbr] Recursion level (1 is top level, 2 and greater are recursed */ const nc_type crd_typ=NC_DOUBLE; char *area_nm_in=NULL; char *fl_in; char *fl_out; char *fl_out_tmp=NULL_CEWI; char *fl_pth_lcl=NULL; char *msk_nm_in=NULL; char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */ /* SCRIP-format grid names are non-negotiable and thus fixed not dynamic */ char area_nm[]="grid_area"; /* 20150830: NB ESMF_RegridWeightGen --user_areas looks for variable named "grid_area" */ char dmn_sz_nm[]="grid_dims"; char grd_crn_lat_nm[]="grid_corner_lat"; char grd_crn_lon_nm[]="grid_corner_lon"; char grd_crn_nm[]="grid_corners"; char grd_ctr_lat_nm[]="grid_center_lat"; char grd_ctr_lon_nm[]="grid_center_lon"; char grd_rnk_nm[]="grid_rank"; char grd_sz_nm[]="grid_size"; char msk_nm[]="grid_imask"; char unt_sng[]="units"; /* netCDF-standard units attribute name */ double *grd_ctr_lat; /* [dgr] Latitude centers of grid */ double *grd_ctr_lon; /* [dgr] Longitude centers of grid */ double *grd_crn_lat; /* [dgr] Latitude corners of grid */ double *grd_crn_lon; /* [dgr] Longitude corners of grid */ double *area; /* [sr] Area of grid */ double *lat_bnd=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular grid */ double *lat_crn=NULL; /* [dgr] Latitude corners of rectangular grid */ double *lat_ctr=NULL_CEWI; /* [dgr] Latitude centers of rectangular grid */ double *lat_ntf=NULL; /* [dgr] Latitude interfaces of rectangular grid */ double *lat_wgt=NULL; /* [dgr] Latitude weights of rectangular grid */ double *lon_bnd=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular grid */ double *lon_crn=NULL; /* [dgr] Longitude corners of rectangular grid */ double *lon_ctr=NULL_CEWI; /* [dgr] Longitude centers of rectangular grid */ double *lon_ntf=NULL; /* [dgr] Longitude interfaces of rectangular grid */ double *vrt_lat=NULL; /* [rdn] MPAS latitude boundary variable latVertex */ double *vrt_lon=NULL; /* [rdn] MPAS longitude boundary variable lonVertex */ double area_ttl=0.0; /* [frc] Exact sum of area */ //double lat_nrt; /* [dgr] Latitude of northern edge of grid */ double lat_sth; /* [dgr] Latitude of southern edge of grid */ double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */ double lat_wgt_gss; /* [frc] Latitude weight estimated from interface latitudes */ // double lon_est; /* [dgr] Longitude of eastern edge of grid */ double lon_wst; /* [dgr] Longitude of western edge of grid */ double lon_ncr; /* [dgr] Longitude increment */ double lat_ncr; /* [dgr] Latitude increment */ double lon_spn; /* [dgr] Longitude span */ double lat_spn; /* [dgr] Latitude span */ double mss_val_area_dbl; double mss_val_ctr_dbl; double mss_val_msk_dbl; int *msk=NULL; /* [flg] Mask of grid */ int *vrt_cll=NULL; /* [enm] MPAS variable verticesOnCell */ int *dmn_sz_int; /* [nbr] Array of dimension sizes of grid */ int dmn_ids[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int dmn_idx; /* [idx] Dimension index */ int fl_out_fmt=NC_FORMAT_CLASSIC; /* [enm] Output file format */ int in_id; /* I [id] Input netCDF file ID */ int md_open; /* [enm] Mode flag for nc_open() call */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int area_id=NC_MIN_INT; /* [id] Area variable ID */ int dmn_id_grd_crn; /* [id] Grid corners dimension ID */ int dmn_id_grd_rnk; /* [id] Grid rank dimension ID */ int dmn_id_grd_sz; /* [id] Grid size dimension ID */ int dmn_sz_int_id; /* [id] Grid dimension sizes ID */ int grd_crn_lat_id; /* [id] Grid corner latitudes variable ID */ int grd_crn_lon_id; /* [id] Grid corner longitudes variable ID */ int grd_ctr_lat_id; /* [id] Grid center latitudes variable ID */ int grd_ctr_lon_id; /* [id] Grid center longitudes variable ID */ int itr_cnt; /* Iteration counter */ int lat_rnk; /* [nbr] Rank of latitude coordinate */ int lon_rnk; /* [nbr] Rank of longitude coordinate */ int lat_ctr_id=NC_MIN_INT; /* [id] Latitude centers of rectangular grid variable ID */ int lon_ctr_id=NC_MIN_INT; /* [id] Longitude centers of rectangular grid variable ID */ int lat_bnd_id=NC_MIN_INT; /* [id] Latitude centers of rectangular grid variable ID */ int lon_bnd_id=NC_MIN_INT; /* [id] Longitude centers of rectangular grid variable ID */ int msk_id=NC_MIN_INT; /* [id] Mask variable ID */ int msk_rnk_nbr; /* [id] Mask rank */ int mss_val_int_out=NC_MIN_INT; /* [nbr] Value that can be non-erroneously pointed to */ int val_two=2; /* [nbr] Value that can be non-erroneously pointed to */ int val_zero=0; /* [nbr] Value that can be non-erroneously pointed to */ int var_id; /* [id] Current variable ID */ int vrt_cll_id=NC_MIN_INT; /* [id] MPAS variable verticesOnCell ID */ int vrt_lat_id=NC_MIN_INT; /* [id] MPAS latitude boundary variable latVertex ID */ int vrt_lon_id=NC_MIN_INT; /* [id] MPAS longitude boundary variable lonVertex ID */ long dmn_srt[dmn_nbr_grd_max]; long dmn_cnt[dmn_nbr_grd_max]; long bnd_idx; long bnd_nbr=NC_MIN_INT; /* [nbr] Number of bounds in gridcell */ long col_idx; long col_nbr; /* [nbr] Number of columns in grid */ long crn_idx; /* [idx] Counting index for corners */ long ttl_idx; /* [idx] Total (unrolled) counting index for grid+corners */ long dmn_sz; /* [nbr] Size of current dimension */ long grd_crn_nbr; /* [nbr] Number of corners in gridcell */ long grd_rnk_nbr=int_CEWI; /* [nbr] Number of dimensions in grid */ long grd_sz_nbr; /* [nbr] Number of gridcells in grid */ long idx2; /* [idx] Counting index for unrolled grids */ long idx; /* [idx] Counting index for unrolled grids */ long idx_crn; long idx_ctr; long idx_fst; /* [idx] Index offset */ long idx_tmp; /* [idx] Temporary index */ long lat_idx2; /* [idx] Counting index for unrolled latitude */ long lat_idx; long lat_nbr; /* [nbr] Number of latitudes in grid */ long lon_idx2; /* [idx] Counting index for unrolled longitude */ long lon_idx; long lon_nbr; /* [nbr] Number of longitudes in grid */ long vrt_idx; /* [idx] Counting index for vertices */ long vrt_nbr; /* [nbr] Number of vertices in MPAS grid */ long int idx_crn_ll; long int idx_crn_lr; long int idx_crn_ur; long int idx_crn_ul; nco_bool FL_RTR_RMT_LCN; nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=True; /* Option O */ nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_CREATE=rgr->flg_uio; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool WRT_TMP_FL=False; /* [flg] Write output to temporary file */ nco_bool flg_1D_mpas_bnd=False; /* [flg] Unstructured input grid with MPAS bounds */ nco_bool flg_1D_psd_rct_bnd=False; /* [flg] Unstructured input grid with pseudo-rectangular bounds */ nco_bool flg_ccw; /* [flg] Gridcell is CCW */ nco_bool flg_grd_1D=False; nco_bool flg_grd_2D=False; nco_bool flg_grd_crv=False; nco_bool flg_s2n=True; /* [enm] Latitude grid-direction is South-to-North */ nco_bool flg_wrt_crn=True; nco_bool flg_crn_grd_lat_lon=False; /* [flg] Curvilinear corner array ordered non-canonically as grd_nbr,lat_nbr,lon_nbr */ nco_bool use_mss_val_area=False; nco_bool has_mss_val_area=False; nco_bool has_mss_val_bnd=False; nco_bool has_mss_val_ctr=False; nco_bool has_mss_val_msk=False; nco_grd_2D_typ_enm grd_typ; /* [enm] Grid-type enum */ nco_grd_lat_typ_enm lat_typ; /* [enm] Latitude grid-type enum */ nco_grd_lon_typ_enm lon_typ; /* [enm] Longitude grid-type enum */ nco_grd_xtn_enm nco_grd_xtn=nco_grd_xtn_nil; /* [enm] Grid-extent enum */ nc_type msk_typ; ptr_unn msk_unn; size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ /* Algorithm: Read grid information from input data file (aka *_in) Close input file Once grid dimensions known, allocate output grid arrays (aka *_out) Open output file (aka grid-file) Use guesswork and standard algorithms to fill-in output arrays */ /* Duplicate (because nco_fl_mk_lcl() free()'s fl_in) */ fl_in=(char *)strdup(rgr->fl_in); /* Make sure file is on local system and is readable or die trying */ fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id); char *bnd_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as bounds */ char *col_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as column */ char *lat_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as latitude */ char *lon_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as longitude */ char *lat_nm_in=NULL_CEWI; /* [sng] Name of variable to recognize as latitude */ char *lon_nm_in=NULL_CEWI; /* [sng] Name of variable to recognize as longitude */ char *lat_bnd_nm=NULL_CEWI; /* [sng] Name of latitude boundary variable */ char *lon_bnd_nm=NULL_CEWI; /* [sng] Name of longitude boundary variable */ char *vrt_dmn_nm=NULL_CEWI; /* [sng] Name of MPAS vertices dimension nVertices */ char *vrt_cll_nm=NULL_CEWI; /* [sng] Name of MPAS variable verticesOnCell */ char *vrt_lat_nm=NULL_CEWI; /* [sng] Name of MPAS latitude boundary variable latVertex */ char *vrt_lon_nm=NULL_CEWI; /* [sng] Name of MPAS longitude boundary variable lonVertex */ int dmn_id_bnd=NC_MIN_INT; /* [id] Dimension ID for spatial bounds */ int dmn_id_col=NC_MIN_INT; /* [id] Dimension ID for unstructured grids */ int dmn_id_lat=NC_MIN_INT; /* [id] Dimension ID for latitude */ int dmn_id_lon=NC_MIN_INT; /* [id] Dimension ID for longitude */ int dmn_id_vrt=NC_MIN_INT; /* [id] Dimension ID for MPAS vertices */ /* Begin CF-coordinates block */ cf_crd_sct *cf=NULL; char *rgr_var; /* [sng] Variable for special regridding treatment */ nco_bool flg_cf=False; /* [flg] Follow CF Coordinates convention to find and infer grid */ rgr_var=rgr->var_nm; if(rgr_var){ /* Infer grid from special variable Intended to be variable that has both horizontal dimensions and "coordinates" attribute, e.g., ncks --cdl -m ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc | grep coordinates 4LFTX_221_SPDY_S113:coordinates = "gridlat_221 gridlon_221" ; Usage: ncks -O -D 3 --rgr infer --rgr_var=4LFTX_221_SPDY_S113 --rgr grid=~/grd_narr.nc ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc ~/foo.nc */ char crd_sng[]="coordinates"; /* CF-standard coordinates attribute name */ cf=(cf_crd_sct *)nco_malloc(sizeof(cf_crd_sct)); cf->crd=False; /* [flg] CF coordinates information is complete */ cf->crd_id[0]=NC_MIN_INT; /* [id] Coordinate ID, first */ cf->crd_id[1]=NC_MIN_INT; /* [id] Coordinate ID, second */ cf->crd_nm[0]=NULL; /* [sng] Coordinate name, first */ cf->crd_nm[1]=NULL; /* [sng] Coordinate name, second */ cf->crd_sng=NULL; /* [sng] Coordinates attribute value */ cf->dmn_id[0]=NC_MIN_INT; /* [id] Dimension ID, first */ cf->dmn_id[1]=NC_MIN_INT; /* [id] Dimension ID, second */ cf->dmn_nm[0]=NULL; /* [sng] Dimension name, first */ cf->dmn_nm[1]=NULL; /* [sng] Dimension name, second */ cf->unt_sng[0]=NULL; /* [sng] Units string, first coordinate */ cf->unt_sng[1]=NULL; /* [sng] Units string, second coordinate */ cf->var_id=NC_MIN_INT; /* [id] Coordinate variable ID */ cf->var_nm=NULL; /* [sng] Coordinates variable name */ cf->var_type=NC_NAT; /* [enm] Coordinates variable type */ if((rcd=nco_inq_varid_flg(in_id,rgr_var,&cf->var_id)) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports special \"coordinates\" variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd */ cf->crd_sng=nco_char_att_get(in_id,cf->var_id,crd_sng); if(cf->crd_sng){ cf->crd=True; }else{ /* !rcd && att_typ */ (void)fprintf(stderr,"%s: WARNING %s reports coordinates variable %s does not have character-valued \"coordinates\" attribute. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd && att_typ */ /* Valid coordinates attribute requires two coordinate names separated by space character */ char *crd_nm[NCO_MAX_CRD_PER_VAR]; /* [sng] Coordinate name start position */ char *crd_dpl; /* [sng] Modifiable duplicate of coordinates string */ char *spc_ptr; /* [sng] Pointer to space character (' ') */ int crd_nbr=0; /* [nbr] Number of names in coordinates attribute */ int crd_spt=0; /* [nbr] Number of "spatial-like" (that include "degree" in units) coordinates */ int crd_idx=0; /* [idx] Counter for coordinate names */ for(crd_idx=0;crd_idx<NCO_MAX_CRD_PER_VAR;crd_idx++) crd_nm[crd_idx]=NULL; crd_dpl=(char *)strdup(cf->crd_sng); /* Search for spaces starting from end of string */ while((spc_ptr=strrchr(crd_dpl,' '))){ crd_nm[crd_nbr]=spc_ptr+1L; crd_nbr++; /* NUL-terminate so next search ends here */ *spc_ptr='\0'; } /* !sbs_ptr */ /* Final coordinate name begins where coordinate string starts */ crd_nm[crd_nbr]=crd_dpl; /* Change crd_nbr from 0-based index to actual coordinate number */ crd_nbr++; if(crd_nbr < 2){ (void)fprintf(stderr,"%s: WARNING %s found only %d coordinate(s) in \"coordinates\" attribute \"%s\", at least two are required. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,crd_nbr,cf->crd_sng); goto skp_cf; } /* !crd_nbr */ /* If more than two coordinate names are present, choose first two (searching backwards from end) with "degree" in units attributes, otherwise just choose first two */ crd_idx=crd_spt=0; while(crd_spt < 2 && crd_idx < crd_nbr){ cf->crd_nm[crd_spt]=crd_nm[crd_idx]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[crd_spt],&cf->crd_id[crd_spt])) == NC_NOERR){ cf->unt_sng[crd_spt]=nco_char_att_get(in_id,cf->crd_id[crd_spt],unt_sng); if(cf->unt_sng[crd_spt]){ if(strcasestr(cf->unt_sng[crd_spt],"degree")){ /* Increment count of spatial-like coordinates... */ crd_spt++; }else{ /* ...or free() memory allocated during search */ cf->unt_sng[crd_spt]=(char *)nco_free(cf->unt_sng[crd_spt]); } /* !strcasestr() */ crd_idx++; } /* !rcd && att_typ */ } /* !rcd */ } /* !crd_spt */ /* If while()-loop above was successful, our search is over Otherwise, use first two coordinate names regardless of units, and print more diagnostics */ if(crd_spt < 2){ cf->crd_nm[0]=crd_nm[0]; cf->crd_nm[1]=crd_nm[1]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[0],&cf->crd_id[0])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0]); goto skp_cf; } /* !rcd */ if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[1],&cf->crd_id[1])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1]); goto skp_cf; } /* !rcd */ cf->unt_sng[0]=nco_char_att_get(in_id,cf->crd_id[0],unt_sng); if(cf->unt_sng[0]){ if(!strcasestr(cf->unt_sng[0],"degree")) (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->unt_sng[0]); } /* !rcd && att_typ */ cf->unt_sng[1]=nco_char_att_get(in_id,cf->crd_id[1],unt_sng); if(cf->unt_sng[1]){ if(!strcasestr(cf->unt_sng[1],"degree")) (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1],cf->unt_sng[1]); } /* !rcd && att_typ */ } /* !crd_spt */ int crd_rnk; /* [nbr] Coordinate rank */ rcd=nco_inq_varndims(in_id,cf->crd_id[0],&crd_rnk); if(crd_rnk != 2){ (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s has %i dimension(s). Skipping CF coordinates method.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],crd_rnk); goto skp_cf; } /* !crd_rnk */ rcd=nco_inq_vardimid(in_id,cf->crd_id[0],cf->dmn_id); cf->dmn_nm[0]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); cf->dmn_nm[1]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); rcd=nco_inq_dimname(in_id,cf->dmn_id[0],cf->dmn_nm[0]); rcd=nco_inq_dimname(in_id,cf->dmn_id[1],cf->dmn_nm[1]); /* "coordinates" convention does not guarantee lat, lon are specified in that order Use "units" values, if any, to determine order In absence of "units", assume order is lat, lon */ nco_bool crd0_is_lat=False; /* [flg] First coordinate is latitude */ nco_bool crd0_is_lon=False; /* [flg] First coordinate is longitude */ nco_bool crd1_is_lat=False; /* [flg] Second coordinate is latitude */ nco_bool crd1_is_lon=False; /* [flg] Second coordinate is longitude */ if(cf->unt_sng[0]){ if(!strcasecmp(cf->unt_sng[0],"degrees_north") || !strcasecmp(cf->unt_sng[0],"degree_north") || !strcasecmp(cf->unt_sng[0],"degree_N") || !strcasecmp(cf->unt_sng[0],"degrees_N") || !strcasecmp(cf->unt_sng[0],"degreeN") || !strcasecmp(cf->unt_sng[0],"degreesN")) crd0_is_lat=True; if(!strcasecmp(cf->unt_sng[0],"degrees_east") || !strcasecmp(cf->unt_sng[0],"degree_east") || !strcasecmp(cf->unt_sng[0],"degree_E") || !strcasecmp(cf->unt_sng[0],"degrees_E") || !strcasecmp(cf->unt_sng[0],"degreeE") || !strcasecmp(cf->unt_sng[0],"degreesE")) crd0_is_lon=True; } /* endif */ if(cf->unt_sng[1]){ if(!strcasecmp(cf->unt_sng[1],"degrees_north") || !strcasecmp(cf->unt_sng[1],"degree_north") || !strcasecmp(cf->unt_sng[1],"degree_N") || !strcasecmp(cf->unt_sng[1],"degrees_N") || !strcasecmp(cf->unt_sng[1],"degreeN") || !strcasecmp(cf->unt_sng[1],"degreesN")) crd1_is_lat=True; if(!strcasecmp(cf->unt_sng[1],"degrees_east") || !strcasecmp(cf->unt_sng[1],"degree_east") || !strcasecmp(cf->unt_sng[1],"degree_E") || !strcasecmp(cf->unt_sng[1],"degrees_E") || !strcasecmp(cf->unt_sng[1],"degreeE") || !strcasecmp(cf->unt_sng[1],"degreesE")) crd1_is_lon=True; } /* endif */ assert((crd0_is_lat && crd1_is_lon) || (crd0_is_lon && crd1_is_lat)); int idx_lat; int idx_lon; if(crd0_is_lat && crd1_is_lon){ idx_lat=0; idx_lon=1; }else{ idx_lat=1; idx_lon=0; } /* endif */ /* Dimensions and coordinates have been vetted. Store as primary lookup names. Dimensions are always returned in order [LRV,MRV]=[0,1] LRV is along-track direction, and MRV is across-track (at least in NASA data) Internally we label LRV as "lat" and MRV as "lon" so that code looks similar for curvilinear and rectangular grids */ dmn_id_lat=cf->dmn_id[0]; dmn_id_lon=cf->dmn_id[1]; /* Subtlety: lat_nm_in is coordinate (variable+dimension) name when specified from command-line (as in nco_grd_nfr()), dimension name when found through CF-method (as in nco_rgr_wgt()). This confusing distinction could be avoided by passing command-line dimension names through-to nco_rgr_wgt(). However, that route would require complex priorities for what to do when passing command-line coordinate names not dimension names and visa-versa. */ //lat_nm_in=strdup(cf->dmn_nm[0]); //lon_nm_in=strdup(cf->dmn_nm[1]); lat_nm_in=strdup(cf->crd_nm[idx_lat]); lon_nm_in=strdup(cf->crd_nm[idx_lon]); /* Next four lines unnecessary in nco_rgr_wgt() which only needs dimension names (it reads input coordinates from map- not data-file) */ lat_ctr_id=cf->crd_id[idx_lat]; lon_ctr_id=cf->crd_id[idx_lon]; lat_dmn_nm=strdup(cf->dmn_nm[0]); lon_dmn_nm=strdup(cf->dmn_nm[1]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s \"coordinates\" attribute \"%s\" points to coordinates %s and %s. Latitude coordinate \"%s\" has LRV (along-track) and MRV (across-track) dimensions \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,rgr_var,cf->crd_sng,cf->crd_nm[0],cf->crd_nm[1],cf->crd_nm[idx_lat],cf->dmn_nm[0],cf->dmn_nm[1]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s Coordinates %s and %s \"units\" values are \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->crd_nm[1],cf->unt_sng[0] ? cf->unt_sng[0] : "(non-existent)",cf->unt_sng[1] ? cf->unt_sng[1] : "(non-existent)"); /* Clean-up CF coordinates memory */ if(crd_dpl) crd_dpl=(char *)nco_free(crd_dpl); if(cf->crd_sng) cf->crd_sng=(char *)nco_free(cf->crd_sng); if(cf->dmn_nm[0]) cf->dmn_nm[0]=(char *)nco_free(cf->dmn_nm[0]); if(cf->dmn_nm[1]) cf->dmn_nm[1]=(char *)nco_free(cf->dmn_nm[1]); if(cf->unt_sng[0]) cf->unt_sng[0]=(char *)nco_free(cf->unt_sng[0]); if(cf->unt_sng[1]) cf->unt_sng[1]=(char *)nco_free(cf->unt_sng[1]); } /* !rgr_var */ /* goto skp_cf */ skp_cf: /* free() any abandoned cf structure now */ if(!flg_cf) if(cf) cf=(cf_crd_sct *)nco_free(cf); rcd=NC_NOERR; /* End CF-coordinates block */ /* Locate fields that must be present in input file Required variables are usually latitude and longitude Currently these variables must be in root group This fails for, e.g., OMI L2 which has coordinates /GEOLOCATION_DATA/[Latitude,Longitude] fxm: Generalize with traversal table so usual suspect coordinates may be in any group */ if(lat_ctr_id == NC_MIN_INT){ if(rgr->lat_nm_in && (rcd=nco_inq_varid_flg(in_id,rgr->lat_nm_in,&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup(rgr->lat_nm_in); else if((rcd=nco_inq_varid_flg(in_id,"latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latitude"); else if((rcd=nco_inq_varid_flg(in_id,"Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("Latitude"); /* AMSR, HIRDLS, TRMM */ else if((rcd=nco_inq_varid_flg(in_id,"lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("lat"); /* CAM */ else if((rcd=nco_inq_varid_flg(in_id,"lat_d",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("lat_d"); /* EAM dynamics grid */ else if((rcd=nco_inq_varid_flg(in_id,"Lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("Lat"); else if((rcd=nco_inq_varid_flg(in_id,"XLAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("XLAT"); /* WRF */ else if((rcd=nco_inq_varid_flg(in_id,"XLAT_M",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("XLAT_M"); /* Unknown */ else if((rcd=nco_inq_varid_flg(in_id,"LAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("LAT"); /* MAR/RACMO */ else if((rcd=nco_inq_varid_flg(in_id,"LATIXY",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("LATIXY"); /* CISM/CLM/ELM */ else if((rcd=nco_inq_varid_flg(in_id,"TLAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("TLAT"); /* CICE, POP */ else if((rcd=nco_inq_varid_flg(in_id,"ULAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("ULAT"); /* CICE, POP */ else if((rcd=nco_inq_varid_flg(in_id,"latCell",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latCell"); /* MPAS-O/I */ else if((rcd=nco_inq_varid_flg(in_id,"nav_lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("nav_lat"); /* NEMO */ else if((rcd=nco_inq_varid_flg(in_id,"rlat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("rlat"); /* RACMO */ else if((rcd=nco_inq_varid_flg(in_id,"global_latitude0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("global_latitude0"); /* Oxford */ else if((rcd=nco_inq_varid_flg(in_id,"latitude0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latitude0"); /* Oxford NB: Must search for global_* first */ else if((rcd=nco_inq_varid_flg(in_id,"CO_Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("CO_Latitude"); /* MLS */ else if((rcd=nco_inq_varid_flg(in_id,"S1_Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("S1_Latitude"); /* GPM */ else if((rcd=nco_inq_varid_flg(in_id,"yc",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("yc"); /* RTM */ else if((rcd=nco_inq_varid_flg(in_id,"south_north",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("south_north"); /* StackOverflow question https://stackoverflow.com/questions/68896581 */ else if((rcd=nco_inq_varid_flg(in_id,"gridlat_0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("gridlat_0"); /* NWS HRRR */ } /* !lat_ctr_id */ if(lon_ctr_id == NC_MIN_INT){ if(rgr->lon_nm_in && (rcd=nco_inq_varid_flg(in_id,rgr->lon_nm_in,&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup(rgr->lon_nm_in); else if((rcd=nco_inq_varid_flg(in_id,"longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("longitude"); else if((rcd=nco_inq_varid_flg(in_id,"Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("Longitude"); /* AMSR, TRMM */ else if((rcd=nco_inq_varid_flg(in_id,"lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("lon"); /* CAM */ else if((rcd=nco_inq_varid_flg(in_id,"lon_d",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("lon"); /* EAM dynamics grid */ else if((rcd=nco_inq_varid_flg(in_id,"Lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("Lon"); else if((rcd=nco_inq_varid_flg(in_id,"XLONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("XLONG"); /* WRF */ else if((rcd=nco_inq_varid_flg(in_id,"XLONG_M",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("XLONG_M"); /* Unknown */ else if((rcd=nco_inq_varid_flg(in_id,"LON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("LON"); /* MAR/RACMO */ else if((rcd=nco_inq_varid_flg(in_id,"LONGXY",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("LONGXY"); /* CISM/CLM/ELM */ else if((rcd=nco_inq_varid_flg(in_id,"TLON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("TLON"); /* CICE */ else if((rcd=nco_inq_varid_flg(in_id,"TLONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("TLONG"); /* POP */ else if((rcd=nco_inq_varid_flg(in_id,"ULON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("ULON"); /* CICE */ else if((rcd=nco_inq_varid_flg(in_id,"ULONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("ULONG"); /* POP */ else if((rcd=nco_inq_varid_flg(in_id,"lonCell",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("lonCell"); /* MPAS-O/I */ else if((rcd=nco_inq_varid_flg(in_id,"nav_lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("nav_lon"); /* NEMO */ else if((rcd=nco_inq_varid_flg(in_id,"rlon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("rlon"); /* RACMO */ else if((rcd=nco_inq_varid_flg(in_id,"global_longitude0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("global_longitude0"); /* Oxford NB: Must search for global_* first */ else if((rcd=nco_inq_varid_flg(in_id,"longitude0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("longitude0"); /* Oxford */ else if((rcd=nco_inq_varid_flg(in_id,"CO_Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("CO_Longitude"); /* MLS */ else if((rcd=nco_inq_varid_flg(in_id,"S1_Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("S1_Longitude"); /* GPM */ else if((rcd=nco_inq_varid_flg(in_id,"xc",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("xc"); /* RTM */ else if((rcd=nco_inq_varid_flg(in_id,"west_east",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("west_east"); /* StackOverflow question https://stackoverflow.com/questions/68896581 */ else if((rcd=nco_inq_varid_flg(in_id,"gridlon_0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("gridlon_0"); /* NWS HRRR */ } /* !lon_ctr_id */ if(!lat_nm_in || !lon_nm_in){ (void)fprintf(stdout,"%s: ERROR %s unable to identify latitude and/or longitude variable.\nHINT: Potential causes and workarounds for this include: 1. Coordinate variables must be in the root directory (not in a group). If this might be the problem, try to \"flatten\" the input file before regridding it (see http://nco.sf.net/nco.html#flatten). 2. Horizontal dimensions with \"unusual\" names are hard to identify unless the user designates them somehow. ncremap will search for horizontal dimensions named in the \"coordinates\" attribute in a template variable specified with the \"-V rgr_var\" option. 3. NCO will also search its own internal database for likely names of horizontal coordinate variables (lat, latitude, LAT, XLAT, etc.). Contact the NCO project to have your idiosyncratic coordinate names added to the internal database.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !lat_nm_in */ /* Rank of coordinates determines whether grid is curvilinear */ rcd+=nco_inq_varndims(in_id,lat_ctr_id,&lat_rnk); rcd+=nco_inq_varndims(in_id,lon_ctr_id,&lon_rnk); /* If lat_ctr and lon_ctr share same and only dimension then grid is unstructured */ if(lat_rnk*lon_rnk == 1){ rcd+=nco_inq_vardimid(in_id,lat_ctr_id,&dmn_id_lat); rcd+=nco_inq_vardimid(in_id,lon_ctr_id,&dmn_id_lon); if(dmn_id_lat == dmn_id_lon){ dmn_id_col=dmn_id_lat; dmn_id_lat=NC_MIN_INT; dmn_id_lon=NC_MIN_INT; rcd+=nco_inq_dimname(in_id,dmn_id_col,dmn_nm); col_dmn_nm=(char *)strdup(dmn_nm); flg_grd_1D=True; } /* !unstructured */ } /* lat_rnk == lon_rnk == 1 */ if(lat_rnk*lon_rnk == 1 && dmn_id_lat != NC_MIN_INT && dmn_id_lon != NC_MIN_INT){ flg_grd_crv=False; flg_grd_2D=True; } /* !lat_rnk */ if(lat_rnk == dmn_nbr_2D || lon_rnk == dmn_nbr_2D){ flg_grd_crv=True; flg_grd_2D=False; } /* !lat_rnk */ if(lat_rnk > dmn_nbr_2D || lon_rnk > dmn_nbr_2D){ (void)fprintf(stdout,"%s: ERROR %s reports an identified grid variable (%s with rank %d and/or %s with rank %d) has rank greater than two---grid variables currently must have rank 1 or 2.\nHINT: If grid variables do not vary in time, then temporally average them (with, e.g., ncwa -a time in.nc out.nc) prior to inferring grid\n",nco_prg_nm_get(),fnc_nm,lat_nm_in,lat_rnk,lon_nm_in,lon_rnk); nco_exit(EXIT_FAILURE); } /* !3D */ if(lat_rnk*lon_rnk != 1 && lat_rnk*lon_rnk != 4) assert(False); /* Scrutinize coordinates for their dimensions NB: Unstructured already known */ if(flg_grd_2D){ rcd+=nco_inq_dimname(in_id,dmn_id_lat,dmn_nm); lat_dmn_nm=(char *)strdup(dmn_nm); rcd+=nco_inq_dimname(in_id,dmn_id_lon,dmn_nm); lon_dmn_nm=(char *)strdup(dmn_nm); } /* !flg_grd_2D */ if(flg_grd_crv){ rcd+=nco_inq_vardimid(in_id,lat_ctr_id,dmn_ids); /* fxm: use cf struct and match with units name, if any? normally curvilinear grid dimensions are just pixel dimensions that are not aligned north-south or east-west */ dmn_id_lat=dmn_ids[0]; dmn_id_lon=dmn_ids[1]; rcd+=nco_inq_dimname(in_id,dmn_id_lat,dmn_nm); lat_dmn_nm=(char *)strdup(dmn_nm); rcd+=nco_inq_dimname(in_id,dmn_id_lon,dmn_nm); lon_dmn_nm=(char *)strdup(dmn_nm); } /* !flg_grd_crv */ if(!(lat_dmn_nm && lon_dmn_nm) && !col_dmn_nm){ (void)fprintf(stdout,"%s: ERROR %s unable to identify latitude and/or longitude dimension and/or column dimension.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !col_dmn_nm !lat_dmn_nm !lon_dmn_nm */ /* Locate spatial dimensions that may be present NB: bounds dimensions may present a special problem CAM-FV and CAM-SE use nbnd for temporal bounds and have no spatial bounds dimension CAM3 uses tbnd for temporal bounds and has no spatial bounds dimension CICE and POP use d2 for temporal bounds, and CICE uses nvertices for spatial bounds while POP uses nothing Hence search for nvertices before nbnd to ensure spatial bound is found first */ if((rcd=nco_inq_dimid_flg(in_id,"nv",&dmn_id_bnd)) == NC_NOERR) bnd_dmn_nm=strdup("nv"); /* fxm */ else if((rcd=nco_inq_dimid_flg(in_id,"nvertices",&dmn_id_bnd)) == NC_NOERR) bnd_dmn_nm=strdup("nvertices"); /* CICE */ else if((rcd=nco_inq_dimid_flg(in_id,"maxEdges",&dmn_id_bnd)) == NC_NOERR) bnd_dmn_nm=strdup("maxEdges"); /* MPAS */ if((rcd=nco_inq_dimid_flg(in_id,"nVertices",&dmn_id_vrt)) == NC_NOERR) vrt_dmn_nm=strdup("nVertices"); /* MPAS */ /* Use dimension IDs to get dimension sizes and grid size */ if(flg_grd_1D){ rcd+=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr); lat_nbr=lon_nbr=col_nbr; }else{ rcd+=nco_inq_dimlen(in_id,dmn_id_lat,&lat_nbr); rcd+=nco_inq_dimlen(in_id,dmn_id_lon,&lon_nbr); col_nbr=NC_MIN_INT; } /* !flg_grd_1D */ if(dmn_id_bnd != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_bnd,&grd_crn_nbr); if(dmn_id_bnd != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_bnd,&bnd_nbr); if(dmn_id_vrt != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_vrt,&vrt_nbr); if(flg_grd_1D){ /* Unstructured grid (e.g., CAM-SE) */ grd_rnk_nbr=dmn_nbr_1D; grd_typ=nco_grd_2D_unk; lat_typ=nco_grd_lat_unk; lon_typ=nco_grd_lon_unk; /* 1D grids without their own boundaries are at the mercy of the weight generator */ if(dmn_id_bnd == NC_MIN_INT){ (void)fprintf(stdout,"%s: WARNING %s reports an unstructured grid without spatial boundary information. NCO can copy but not infer spatial boundaries from unstructured grids. Thus NCO will not write spatial bounds to the gridfile inferred from this input file. Instead, the weight generator that ingests this gridfile must generate weights for gridcells with unknown spatial extent. This is feasible for grids and mappings where weights masquerade as areas and are determined by underlying grid and interpolation type (e.g., bilinear remapping of spectral element grid). Unfortunately, the ESMF_RegridWeightGen (ERWG) program requires cell interfaces in both grid files, so ERWG will break on this gridfile. Other weight generators such as TempestRemap may be more successful with this SCRIP file.\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT Re-run the regridder, this time adding the \"-s src_grd\" option to specify the source grid file in SCRIP format. That SCRIP file will have the spatial bounds information required by the ESMF_RegridWeightGen (ERWG) program, so that the regridder will circumvent inferring the underlying grid through its black but fragile magic.\n",nco_prg_nm_get()); flg_wrt_crn=False; /* Input could actually be from grid with no polygonal definition, e.g., CAM-SE Corner number is non-deterministic since, e.g., CAM-SE dual grid can be fit to quadrilaterals, pentagons, chevrons, etc. Bounds will not be diagnosed so safe to set grd_crn_nbr to harmless (though weird) value like 4 However, ERWG requires presence of valid corner dimension "grid_corners" and arrays in input SCRIP file So ERWG will break when reading this SCRIP file regardless of whether it contains arrays (with bogus values) By default do not write grid corner values */ grd_crn_nbr=4; } /* !dmn_id_bnd */ if(bnd_nbr == 2){ /* Unstructured grids with bounds information (e.g., OCO2) may use a pseudo-rectangular convention of archiving latitude and longitude bounds as 2xN (rather than 4XN) arrays even though cell have four corners. "convention" is that two latitudes and two longitudes can specify rectangular boundary cell In this case, bnd_nbr=grd_crn_nbr=2=sizeof(nv)=sizeof(nvertices) currently Set number of corners to rectangular and leave bnd_nbr as is */ grd_crn_nbr=4; flg_1D_psd_rct_bnd=True; } /* !bnd_nbr */ if(!strcmp(bnd_dmn_nm,"maxEdges")){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Unstructured grid has dimension \"%s\" which indicates an MPAS grid. Will attempt to locate other MPAS information (dimension nVertices and variables verticesOnCell, lonVertex, and latVertex) to construct SCRIP-compliant bounds variables...\n",nco_prg_nm_get(),bnd_dmn_nm); if((rcd=nco_inq_varid_flg(in_id,"verticesOnCell",&vrt_cll_id)) == NC_NOERR) vrt_cll_nm=strdup("verticesOnCell"); if((rcd=nco_inq_varid_flg(in_id,"lonVertex",&vrt_lon_id)) == NC_NOERR) vrt_lon_nm=strdup("lonVertex"); if((rcd=nco_inq_varid_flg(in_id,"latVertex",&vrt_lat_id)) == NC_NOERR) vrt_lat_nm=strdup("latVertex"); if(dmn_id_vrt != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_vrt,&vrt_nbr); if(vrt_dmn_nm && vrt_cll_nm && vrt_lon_nm && vrt_lat_nm){ flg_1D_mpas_bnd=True; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Found all MPAS information needed to construct SCRIP-compliant bounds variables.\n",nco_prg_nm_get()); }else{ (void)fprintf(stdout,"%s: INFO Unable to find all MPAS information needed to construct SCRIP-compliant bounds variables. Will not write bounds coordinates. This will degrade usefulness of SCRIP file for regridding schemes (e.g., conservative) that require cell boundaries.\n",nco_prg_nm_get()); (void)fprintf(stdout,"%s: HINT Often MPAS restart files contain the required bounds variables (verticesOnCell, lonVertex, latVertex) that normal MPAS data files lack. Try inferring the SCRIP grid from a restart file not a normal time-varying output dataset.\n",nco_prg_nm_get()); flg_wrt_crn=False; } /* !vrt_cll_nm */ } /* !bnd_dmn_nm */ }else if(flg_grd_2D){ /* !flg_grd_1D */ /* Assume 2D grid of uninitialized type */ grd_rnk_nbr=dmn_nbr_2D; grd_typ=nco_grd_2D_nil; lat_typ=nco_grd_lat_nil; lon_typ=nco_grd_lon_nil; /* Assume rectangular grids that do not specify otherwise use quadrilaterals */ if(dmn_id_bnd == NC_MIN_INT) grd_crn_nbr=4; /* Sometimes we infer from a 2D grid, like those produced by nco_grd_mk(), that has bounds with nv=2 This signals rectangular gridcell bounds are interfaces not vertices (to save half the space) These rectangles really have four corners so we change grd_crn_nbr (not bnd_nbr) accordingly */ if(grd_crn_nbr == 2) grd_crn_nbr=4; /* Convention is to archive only two bounds for rectangular grids (since sides are identical) Non-quadrilateral rectangular grids are untested */ if(grd_crn_nbr == 4) bnd_nbr=2; }else if(flg_grd_crv){ /* !flg_grd_2D */ /* Assume curvilinear grid (e.g., WRF) */ flg_grd_2D=False; grd_rnk_nbr=dmn_nbr_2D; grd_typ=nco_grd_2D_unk; lat_typ=nco_grd_lat_unk; lon_typ=nco_grd_lon_unk; /* Assume curvilinear grids that do not specify otherwise use quadrilaterals */ if(dmn_id_bnd == NC_MIN_INT) grd_crn_nbr=4; /* Assume quadrilaterals are, well, quadrilaterals (e.g., rhomboids) not necessarily rectangles Non-quadrilateral curvilinear grids are untested */ if(grd_crn_nbr == 4) bnd_nbr=4; else assert(False); } /* !flg_grd_crv */ /* Allocate space for output data */ if(flg_grd_1D) grd_sz_nbr=col_nbr; else grd_sz_nbr=lat_nbr*lon_nbr; dmn_sz_int=(int *)nco_malloc(grd_rnk_nbr*nco_typ_lng((nc_type)NC_INT)); area=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); msk=(int *)nco_malloc(grd_sz_nbr*nco_typ_lng((nc_type)NC_INT)); if(flg_grd_1D){ if(bnd_nbr != NC_MIN_INT) lat_bnd=(double *)nco_malloc(grd_sz_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); if(bnd_nbr != NC_MIN_INT) lon_bnd=(double *)nco_malloc(grd_sz_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); }else if(flg_grd_2D){ /* !flg_grd_1D */ lat_bnd=(double *)nco_malloc(lat_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(lat_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lon_bnd=(double *)nco_malloc(lon_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(lon_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(lon_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); }else if(flg_grd_crv){ /* !flg_grd_2D */ lat_bnd=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lon_bnd=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); } /* !flg_grd_crv */ grd_ctr_lat=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_ctr_lon=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lat=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lon=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); /* Locate fields that may be present in input file */ if((rcd=nco_inq_varid_flg(in_id,"lat_bnds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_bnds"); else if((rcd=nco_inq_varid_flg(in_id,"latt_bounds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latt_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"latu_bounds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latu_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"lat_ntf",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_ntf"); else if((rcd=nco_inq_varid_flg(in_id,"lat_vertices",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_vertices"); else if((rcd=nco_inq_varid_flg(in_id,"latitude_bnds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latitude_bnds"); /* OCO2 */ else if((rcd=nco_inq_varid_flg(in_id,"LatitudeCornerpoints",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("LatitudeCornerpoints"); /* OMI */ if((rcd=nco_inq_varid_flg(in_id,"lon_bnds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_bnds"); else if((rcd=nco_inq_varid_flg(in_id,"lont_bounds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lont_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"lonu_bounds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lonu_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"lon_ntf",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_ntf"); else if((rcd=nco_inq_varid_flg(in_id,"lon_vertices",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_vertices"); else if((rcd=nco_inq_varid_flg(in_id,"longitude_bnds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("longitude_bnds"); /* OCO2 */ else if((rcd=nco_inq_varid_flg(in_id,"LongitudeCornerpoints",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("LongitudeCornerpoints"); /* OMI */ if((rcd=nco_inq_varid_flg(in_id,"area",&area_id)) == NC_NOERR) area_nm_in=strdup("area"); else if((rcd=nco_inq_varid_flg(in_id,"Area",&area_id)) == NC_NOERR) area_nm_in=strdup("Area"); else if((rcd=nco_inq_varid_flg(in_id,"areaCell",&area_id)) == NC_NOERR) area_nm_in=strdup("areaCell"); /* MPAS-O/I */ else if((rcd=nco_inq_varid_flg(in_id,"grid_area",&area_id)) == NC_NOERR) area_nm_in=strdup("grid_area"); else if((rcd=nco_inq_varid_flg(in_id,"area_d",&area_id)) == NC_NOERR) area_nm_in=strdup("area_d"); /* EAM dynamics grid */ else if((rcd=nco_inq_varid_flg(in_id,"area_p",&area_id)) == NC_NOERR) area_nm_in=strdup("area_p"); /* EAM physics grid */ // else if((rcd=nco_inq_varid_flg(in_id,"aice",&area_id)) == NC_NOERR) area_nm_in=strdup("aice"); /* CICE time-dependent ice area (3D), not total gridcell area */ else if((rcd=nco_inq_varid_flg(in_id,"tarea",&area_id)) == NC_NOERR) area_nm_in=strdup("tarea"); /* CICE time-invariant state-variable gridcell area (2D) */ else if((rcd=nco_inq_varid_flg(in_id,"uarea",&area_id)) == NC_NOERR) area_nm_in=strdup("uarea"); /* CICE time-invariant dynamics variables (2D) */ msk_nm_in=rgr->msk_var; if(msk_nm_in){ if(!strcasecmp(msk_nm_in,"none")){ /* 20170814: Some variables named "*mask*" are, e.g., quality control masks not regridding masks per se */ msk_nm_in=(char *)nco_free(msk_nm_in); }else{ /* User-supplied name overrides database */ rcd=nco_inq_varid(in_id,msk_nm_in,&msk_id); } /* !msk_nm_in */ }else{ /* Otherwise search database */ if((rcd=nco_inq_varid_flg(in_id,"mask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("mask"); else if((rcd=nco_inq_varid_flg(in_id,"Mask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("Mask"); else if((rcd=nco_inq_varid_flg(in_id,"mask_b",&msk_id)) == NC_NOERR) msk_nm_in=strdup("mask_b"); else if((rcd=nco_inq_varid_flg(in_id,"grid_imask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("grid_imask"); else if((rcd=nco_inq_varid_flg(in_id,"landmask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("landmask"); /* ALM/CLM */ else if((rcd=nco_inq_varid_flg(in_id,"tmask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("tmask"); /* CICE */ } /* !msk_nm_in */ /* Mask field requires special handling for non-conformant models */ if(msk_id != NC_MIN_INT){ /* 20151201: All models tested define mask as NC_INT except CICE which uses NC_FLOAT 20160111: Few observations tested define mask. Exceptions include AMSR and GHRSST. AMSR uses NC_SHORT to store bitmasks. Bitmask is 1 for missing data, and up to 128 for various quality levels of valid data. Hence, almost better to ignore AMSR mask variable. GHRSST uses NC_BYTE for its 3D "mask" bit-mask of surface-type values 1,2,4,8,16. */ rcd=nco_inq_varndims(in_id,msk_id,&msk_rnk_nbr); if(msk_rnk_nbr != grd_rnk_nbr && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports input mask variable \"%s\" is rank %d while grid is rank %ld so will use first timestep/layer to determine output mask\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,msk_rnk_nbr,grd_rnk_nbr); rcd=nco_inq_vartype(in_id,msk_id,&msk_typ); msk_unn.vp=(void *)nco_malloc(grd_sz_nbr*nco_typ_lng(msk_typ)); } /* !msk */ /* All grids: Some real-world datasets violate convention that coordinates ought never have missing values CICE lists missing value for lat/lon_ctr arrays (TLAT, TLONG) and re-uses that for bounds arrays (latt_bounds, lont_bounds) that do not bother to have their own missing value attributes Without counter-example, assume has_mss_val_bnd=has_mss_val_ctr and mss_val_bnd_dbl=mss_val_ctr_dbl */ has_mss_val_bnd=has_mss_val_ctr=nco_mss_val_get_dbl(in_id,lat_ctr_id,&mss_val_ctr_dbl); char *att_val; char *area_unt=NULL; /* [sng] Dimensional units used in area */ char *ngl_unt=NULL; /* [sng] Angular units used in coordinates */ long att_sz; nc_type att_typ; nco_bool flg_area_sr=True; /* [flg] Input area is in sterradians not something weird like km2 */ nco_bool flg_crd_rdn=False; /* [flg] Input coordinates are in radians not degrees */ if(flg_grd_1D){ /* Obtain fields that must be present in unstructured input file */ dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); /* Obtain fields that may be present in unstructured input file */ if(area_id != NC_MIN_INT) rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); if(msk_id != NC_MIN_INT){ if(msk_rnk_nbr > grd_rnk_nbr){ /* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */ for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=1L; } /* !dmn_idx */ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=col_nbr; } /* !msk_rnk_nbr */ rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ); } /* !msk_id */ dmn_srt[0]=dmn_srt[1]=0L; if(flg_1D_psd_rct_bnd){ dmn_cnt[0]=col_nbr; dmn_cnt[1]=bnd_nbr; if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); }else if(flg_1D_mpas_bnd){ const long grd_crn_nbrm1=grd_crn_nbr-1L; /* [nbr] Number of corners in gridcell minus one */ vrt_cll=(int *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng((nc_type)NC_INT)); vrt_lat=(double *)nco_malloc(vrt_nbr*nco_typ_lng(crd_typ)); vrt_lon=(double *)nco_malloc(vrt_nbr*nco_typ_lng(crd_typ)); dmn_cnt[0]=col_nbr; dmn_cnt[1]=grd_crn_nbr; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports dimension sizes bnd_nbr=%ld, col_nbr=%ld, grd_crn_nbr=%ld, vrt_nbr=%ld\n",nco_prg_nm_get(),fnc_nm,bnd_nbr,col_nbr,grd_crn_nbr,vrt_nbr); if(vrt_cll_id != NC_MIN_INT) rcd=nco_get_vara(in_id,vrt_cll_id,dmn_srt,dmn_cnt,vrt_cll,(nc_type)NC_INT); dmn_cnt[0]=vrt_nbr; if(vrt_lat_id != NC_MIN_INT) rcd=nco_get_vara(in_id,vrt_lat_id,dmn_srt,dmn_cnt,vrt_lat,crd_typ); if(vrt_lon_id != NC_MIN_INT) rcd=nco_get_vara(in_id,vrt_lon_id,dmn_srt,dmn_cnt,vrt_lon,crd_typ); rcd=nco_inq_att_flg(in_id,vrt_lat_id,unt_sng,&att_typ,&att_sz); if(rcd == NC_NOERR && att_typ == NC_CHAR){ att_val=(char *)nco_malloc((att_sz+1L)*nco_typ_lng(att_typ)); rcd+=nco_get_att(in_id,vrt_lat_id,unt_sng,att_val,att_typ); /* NUL-terminate attribute before using strstr() */ att_val[att_sz]='\0'; /* Match "radian" and "radians" */ if(strstr(att_val,"radian")) flg_crd_rdn=True; if(att_val) ngl_unt=(char *)strdup(att_val); if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ for(col_idx=0;col_idx<col_nbr;col_idx++){ idx=col_idx*grd_crn_nbr; for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ ttl_idx=idx+crn_idx; vrt_idx=vrt_cll[ttl_idx]; assert(vrt_idx >= 0); //if(vrt_idx >= vrt_nbr) (void)fprintf(stdout,"%s: WARNING %s input gridcell %ld corner %ld has extreme MPAS input verticesOnCell value %ld (maximum valid vertex = vrt_nbr-1 = %ld-1 = %ld)\n",nco_prg_nm_get(),fnc_nm,col_idx,crn_idx,vrt_idx,vrt_nbr,vrt_nbr-1); if(vrt_idx == 0){ /* 20201220: Convert values of zero to neighboring valid vertex index */ for(idx_fst=1;idx_fst<grd_crn_nbr;idx_fst++){ idx_tmp=crn_idx+idx_fst; /* Wrap to initial corner of this cell when candidate corner would be in next cell */ if(idx_tmp > grd_crn_nbrm1) idx_tmp-=grd_crn_nbr; ttl_idx=idx+idx_tmp; vrt_idx=vrt_cll[ttl_idx]; if(vrt_idx != 0) break; } /* !idx_fst */ assert(idx_fst < grd_crn_nbr); } /* !vrt_idx */ /* 20201220: Stored vertex indices use Fortran-based convention---subtract one for C */ vrt_idx--; lat_crn[ttl_idx]=vrt_lat[vrt_idx]; lon_crn[ttl_idx]=vrt_lon[vrt_idx]; //(void)fprintf(stdout,"%s: DEBUG %s reports col_idx = %ld, crn_idx = %ld, ttl_idx = %ld, vrt_idx = %ld, vrt_lat = %g, vrt_lon = %g\n",nco_prg_nm_get(),fnc_nm,col_idx,crn_idx,ttl_idx,vrt_idx,vrt_lat[vrt_idx],vrt_lon[vrt_idx]); } /* !crn_idx */ } /* !col_idx */ }else{ /* !flg_1D_mpas_bnd */ dmn_cnt[0]=col_nbr; dmn_cnt[1]=grd_crn_nbr; if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_crn,crd_typ); if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_crn,crd_typ); } /* !flg_1D_psd_rct_bnd */ } /* !flg_grd_1D */ if(flg_grd_crv){ /* Obtain fields that must be present in curvilinear input file */ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); /* 20150923: Also input, if present in curvilinear file, corners, area, and mask area and mask are same size as lat and lon */ if(area_id != NC_MIN_INT) rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); if(msk_id != NC_MIN_INT){ if(msk_rnk_nbr > grd_rnk_nbr){ /* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */ for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=1L; } /* !dmn_idx */ dmn_srt[dmn_idx]=dmn_srt[dmn_idx+1]=0L; dmn_cnt[dmn_idx]=lat_nbr; dmn_cnt[dmn_idx+1]=lon_nbr; } /* !msk_rnk_nbr */ rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ); } /* !msk_id */ /* Corners are on curvilinear corner grid Rectangular boundaries (i.e., lat_bnd=[lat_nbr,2]) DNE for curvilinear grids Read-in *_crn arrays in curvilinear grids, and *_bnd arrays for rectilinear grids Rank-ordering of corner arrays is usually lat_nbr,lon_nbr,grd_crn_nbr as produced/expected by SCRIP However some datasets, e.g., OMI DOMINO use grd_crn_nbr,lat_nbr,lon_nbr Sigh... */ dmn_srt[0]=dmn_srt[1]=dmn_srt[2]=0L; if(lat_bnd_id != NC_MIN_INT && lon_bnd_id != NC_MIN_INT){ rcd=nco_inq_vardimid(in_id,lat_bnd_id,dmn_ids); if((dmn_ids[0] == dmn_id_lat && dmn_ids[1] == dmn_id_lon) || (dmn_ids[0] == dmn_id_lon && dmn_ids[1] == dmn_id_lat)){ dmn_id_bnd=dmn_ids[2]; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; dmn_cnt[2]=grd_crn_nbr; }else if((dmn_ids[1] == dmn_id_lat && dmn_ids[2] == dmn_id_lon) || (dmn_ids[1] == dmn_id_lon && dmn_ids[2] == dmn_id_lat)){ dmn_id_bnd=dmn_ids[0]; dmn_cnt[0]=grd_crn_nbr; dmn_cnt[1]=lat_nbr; dmn_cnt[2]=lon_nbr; flg_crn_grd_lat_lon=True; }else{ (void)fprintf(stdout,"%s: WARNING %s confused by dimension-ordering of latitude bounds variable \"%s\". Will ignore this bounds variable and attempt to extrapolate vertices from centers internally...\n",nco_prg_nm_get(),fnc_nm,lat_nm_in); lat_bnd_id=NC_MIN_INT; lon_bnd_id=NC_MIN_INT; } /* !dmn_ids */ rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_crn,crd_typ); rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_crn,crd_typ); if(flg_crn_grd_lat_lon){ /* Permute corner arrays from non-canonical (grd_nbr,lat_nbr,lon_nbr) to canonical (lat_nbr,lon_nbr,grd_nbr) order */ double *lat_crn_tmp=NULL; double *lon_crn_tmp=NULL; lat_crn_tmp=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_crn_tmp=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); memcpy(lat_crn_tmp,lat_crn,grd_sz_nbr*grd_crn_nbr*sizeof(double)); memcpy(lon_crn_tmp,lon_crn,grd_sz_nbr*grd_crn_nbr*sizeof(double)); for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ for(idx=0;idx<grd_sz_nbr;idx++){ lat_idx=idx/lon_nbr; lon_idx=idx%lon_nbr; /* NB: Variables differ (lat vs. lon) but indexes are identical in next two lines */ lat_crn[lat_idx*lon_nbr*grd_crn_nbr+lon_idx*grd_crn_nbr+crn_idx]=lat_crn_tmp[crn_idx*grd_sz_nbr+idx]; lon_crn[lat_idx*lon_nbr*grd_crn_nbr+lon_idx*grd_crn_nbr+crn_idx]=lon_crn_tmp[crn_idx*grd_sz_nbr+idx]; } /* !idx */ } /* !crn_idx */ if(lat_crn_tmp) lat_crn_tmp=(double *)nco_free(lat_crn_tmp); if(lon_crn_tmp) lon_crn_tmp=(double *)nco_free(lon_crn_tmp); /* In this code branch, thought to be executed only for OMI DOMINO grids, re-compute grid center arrays (known to contain missing values) as centroids of supplied grid corners */ for(idx=0;idx<grd_sz_nbr;idx++){ lat_idx=idx/lon_nbr; lon_idx=idx%lon_nbr; lat_ctr[idx]=0.25*(lat_crn[idx*grd_crn_nbr+0L]+lat_crn[idx*grd_crn_nbr+1L]+lat_crn[idx*grd_crn_nbr+2L]+lat_crn[idx*grd_crn_nbr+3L]); lon_ctr[idx]=nco_lon_crn_avg_brnch(lon_crn[idx*grd_crn_nbr+0L],lon_crn[idx*grd_crn_nbr+1L],lon_crn[idx*grd_crn_nbr+2L],lon_crn[idx*grd_crn_nbr+3L]); } /* !idx */ } /* !flg_crd_grd_lat_lon */ } /* !lat_bnd_id */ } /* !flg_grd_crv */ if(flg_grd_2D){ int lon_psn_in=1L; /* [idx] Ordinal position of longitude dimension in rectangular grid variables like area */ int lat_psn_in=0L; /* [idx] Ordinal position of latitude dimension in rectangular grid variables like area */ int tpl_id=NC_MIN_INT; /* [id] ID of template field */ /* Obtain fields that must be present in input file */ dmn_srt[0L]=0L; dmn_cnt[0L]=lat_nbr; rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); dmn_srt[0L]=0L; dmn_cnt[0L]=lon_nbr; rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); if(lat_ctr[1L] < lat_ctr[0L]) flg_s2n=False; /* Use fields that may be present in input file to override, if necessary, default lon/lat order area and mask are both suitable templates for determining input lat/lon ordering NB: Algorithm assumes area is same rank as grid, and falls-back to mask if that has same rank as grid */ if(area_id != NC_MIN_INT) tpl_id=area_id; else if(msk_id != NC_MIN_INT && msk_rnk_nbr == grd_rnk_nbr) tpl_id=msk_id; if(tpl_id != NC_MIN_INT){ int tpl_rnk_nbr; var_id=tpl_id; /* NB: Template variable rank may exceed two with --msk_[src/dst] (e.g., SST(time,lat,lon)) */ rcd=nco_inq_varndims(in_id,var_id,&tpl_rnk_nbr); rcd=nco_inq_vardimid(in_id,var_id,dmn_ids); /* fxm: Optimize discovery of lat/lon ordering */ for(dmn_idx=0;dmn_idx<grd_rnk_nbr;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_ids[dmn_idx],dmn_nm); rcd+=nco_inq_dimlen(in_id,dmn_ids[dmn_idx],&dmn_sz); if(!strcmp(dmn_nm,lat_dmn_nm)){ assert(dmn_sz == lat_nbr); assert(dmn_idx == 0); lat_psn_in=dmn_idx; } /* !lat */ if(!strcmp(dmn_nm,lon_dmn_nm)){ assert(dmn_sz == lon_nbr); assert(dmn_idx == 1); lon_psn_in=dmn_idx; } /* !lon */ } /* !dmn_idx */ } /* !tpl */ /* Obtain fields that may be present in input file */ if(area_id != NC_MIN_INT){ var_id=area_id; rcd=nco_inq_vardimid(in_id,var_id,dmn_ids); dmn_srt[lat_psn_in]=0L; dmn_cnt[lat_psn_in]=lat_nbr; dmn_srt[lon_psn_in]=0L; dmn_cnt[lon_psn_in]=lon_nbr; rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); } /* !area */ if(msk_id != NC_MIN_INT){ var_id=msk_id; rcd=nco_inq_vardimid(in_id,var_id,dmn_ids); dmn_srt[lat_psn_in]=0L; dmn_cnt[lat_psn_in]=lat_nbr; dmn_srt[lon_psn_in]=0L; dmn_cnt[lon_psn_in]=lon_nbr; if(msk_rnk_nbr != grd_rnk_nbr){ /* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */ for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=1L; } /* !dmn_idx */ dmn_srt[dmn_idx]=dmn_srt[dmn_idx+1]=0L; dmn_cnt[dmn_idx+lat_psn_in]=lat_nbr; dmn_cnt[dmn_idx+lon_psn_in]=lon_nbr; } /* !msk_rnk_nbr */ rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ); } /* !msk */ /* Rectangular boundaries are often on "abbreviated" bounds grid (two bounds per center) Read-in *_crn arrays for 1D and curvilinear grids, and *_bnd arrays for rectilinear grids */ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=bnd_nbr; if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lon_nbr; dmn_cnt[1]=bnd_nbr; if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); } /* !flg_grd_2D */ /* Obtain units, if any, of input area */ if(area_id != NC_MIN_INT){ rcd=nco_inq_att_flg(in_id,area_id,unt_sng,&att_typ,&att_sz); if(rcd == NC_NOERR && att_typ == NC_CHAR){ att_val=(char *)nco_malloc((att_sz+1L)*nco_typ_lng(att_typ)); rcd+=nco_get_att(in_id,area_id,unt_sng,att_val,att_typ); /* NUL-terminate attribute before using strstr() */ att_val[att_sz]='\0'; if(!strcasestr(att_val,"radian")) flg_area_sr=False; if(att_val) area_unt=(char *)strdup(att_val); if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ } /* !area_id */ /* Additional information that may be required for any input grid */ if(area_id != NC_MIN_INT) has_mss_val_area=nco_mss_val_get_dbl(in_id,area_id,&mss_val_area_dbl); if(msk_id != NC_MIN_INT) has_mss_val_msk=nco_mss_val_get_dbl(in_id,msk_id,&mss_val_msk_dbl); /* 20160115: AMSR coordinates are packed as NC_SHORT with scale_value=0.01f. What to do? Is it worth unpacking everything? */ int flg_pck; /* [flg] Variable is packed on disk */ rcd=nco_inq_var_packing(in_id,lat_ctr_id,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports lat_ctr variable \"%s\" is packed so results unpredictable. HINT: If grid-generation causes problems, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,lat_nm_in); rcd=nco_inq_var_packing(in_id,lon_ctr_id,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports lon_ctr variable \"%s\" is packed so results unpredictable. HINT: If grid-generation causes problems, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,lon_nm_in); /* Close input netCDF file */ nco_close(in_id); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in); /* Above this line, fl_in and in_id refer to input file to be regridded Below this line, fl_out and out_id refer to grid-file to be output */ dfl_lvl=rgr->dfl_lvl; fl_out=rgr->fl_grd; fl_out_fmt=rgr->fl_out_fmt; if(!fl_out){ (void)fprintf(stdout,"%s: ERROR %s filename for inferred SCRIP grid-file is uninitialized, supply it with \"ncks --rgr grid=filename.nc\" or \"ncremap -R '--rgr grid=filename.nc'\"\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT ncremap supplies an automatically generated default name for any output SCRIP grid-file. Users of the standalone regridder (ncks) must explicitly specify a name for the inferred SCRIP grid-file.\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* !fl_out */ /* Define output variable values */ int lon_psn; /* [idx] Ordinal position of longitude dimension in rectangular grid dimension-size array */ int lat_psn; /* [idx] Ordinal position of latitude dimension in rectangular grid dimension-size array */ if(grd_rnk_nbr == dmn_nbr_1D){ dmn_sz_int[0]=col_nbr; }else if(grd_rnk_nbr == dmn_nbr_2D){ /* !dmn_nbr_1D */ /* SCRIP introduced [lon,lat] convention because more natural for Fortran NB: This [lon,lat] convention applies ONLY to grid_dims variable Write all other SCRIP variables as [lat,lon] Nonsensical? Yes, but backwards compatibility is priceless */ lon_psn=0; lat_psn=1; dmn_sz_int[lon_psn]=lon_nbr; dmn_sz_int[lat_psn]=lat_nbr; } /* !dmn_nbr_2D */ if(flg_grd_crv){ /* For curvilinear grids first, if necessary, infer corner boundaries Then perform sanity check using same code on inferred and copied grids */ if(False && has_mss_val_bnd && grd_crn_nbr == 4 && !strcmp(lat_bnd_nm,"latt_bounds") && !strcmp(lon_bnd_nm,"lont_bounds") && lat_bnd_id != NC_MIN_INT && lon_bnd_id != NC_MIN_INT){ /* Only CESM CICE is known to fit these constraints Cell center locations are (misleadingly) reported in a regular, rectangular, regional grid Cell corners/boundaries are regular only in SH, curvilinear in NH, i.e., displaced or tripole grid Grid is from southernmost Antarctic Ocean latitude and longitude near 79S,320E to North Pole Nominal centers do not agree with true centers computed from corners CICE may run in decomposed/unstructured mode, each column writes separately to output buffer? This could explain missing coordinates in non-ocean gridcells However, land points are completely masked (grid centers and corners are missing) Oversight? Why not write coordinates for land-masked cells? Regridder needs corners so we fill-in missing boundaries with derived grid Gave up on inferring 20170521 once tri-pole grid complexity became apparent */ const long idx_dbg=rgr->idx_dbg; double lat_ctr_drv; /* [dgr] Latitude center, derived */ double lon_ctr_drv; /* [dgr] Longitude center, derived */ double lat_crn_drv; /* [dgr] Latitude corner, derived */ double lon_crn_drv; /* [dgr] Longitude corner, derived */ long idx_ctr_sth; /* [idx] Index of southern neighbor */ long idx_ctr_nrt; /* [idx] Index of northern neighbor */ long idx_crn_sth; /* [idx] Index of southern neighbor */ long idx_crn_nrt; /* [idx] Index of northern neighbor */ long lon_idx_crr; /* [idx] Current longitude index */ long lon_vld_frs; /* [idx] First valid longitude in latitude row */ long *lon_vld_prv=NULL; /* [idx] Previous valid longitude in latitude row */ long *lon_vld_nxt=NULL; /* [idx] Next valid longitude in latitude row */ lon_vld_prv=(long *)nco_malloc(lon_nbr*sizeof(long)); lon_vld_nxt=(long *)nco_malloc(lon_nbr*sizeof(long)); /* First valid gridcell sets west and south bounds of entire grid */ for(idx_ctr=0;idx_ctr<grd_sz_nbr;idx_ctr++){ if(lat_ctr[idx_ctr] != mss_val_ctr_dbl) break; } /* !grd_sz_nbr */ assert(idx_ctr != grd_sz_nbr); idx_crn=idx_ctr*grd_crn_nbr; lat_sth=lat_crn[idx_crn]; lat_ncr=lat_crn[idx_crn+3]-lat_crn[idx_crn]; /* ul-ll */ lon_wst=lon_crn[idx_crn]; lon_ncr=lon_crn[idx_crn+1]-lon_crn[idx_crn]; /* lr-ll */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s will assume grid is regional CICE in curvilinear format with masked land. Will diagnose missing cell boundaries and centers from present boundaries and centers in grid of size lat_nbr=%ld, lon_nbr=%ld.\n",nco_prg_nm_get(),fnc_nm,lat_nbr,lon_nbr); for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ idx_ctr=lat_idx*lon_nbr; /* Find first valid longitude at this latitude */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++) if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break; lon_vld_frs=lon_idx; /* 20170519: Verified all tri-pole grid latitudes have at least one valid point */ if(lon_vld_frs == -1L) abort(); for(lon_idx_crr=0;lon_idx_crr<lon_nbr;lon_idx++){ /* Find previous and next valid longitude for all longitudes at this latitude Cells can be their own previous/next valid longitude */ lon_vld_prv[lon_idx_crr]=-1L; lon_vld_nxt[lon_idx_crr]=-1L; /* Start from current longitude and move left (west)... */ for(lon_idx=lon_idx_crr;lon_idx>=0;lon_idx--) if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break; if(lon_idx >= 0) lon_vld_prv[lon_idx_crr]=lon_idx; /* Start from current longitude and move right (east)... */ for(lon_idx=lon_idx_crr;lon_idx<lon_nbr;lon_idx++) if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break; if(lon_idx < lon_nbr) lon_vld_nxt[lon_idx_crr]=lon_idx; /* Wrap west if previous valid cell not found */ lon_vld_prv[lon_idx_crr]=lon_vld_prv[lon_nbr-1L]; /* Wrap east if next valid cell not found */ lon_vld_nxt[lon_idx_crr]=lon_vld_nxt[0]; } /* !lon_idx_crr */ /* Derive centers and corners for each missing point */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx_ctr=lat_idx*lon_nbr+lon_idx; idx_crn=idx_ctr*grd_crn_nbr; if(lat_ctr[idx_ctr] != mss_val_ctr_dbl){ lat_sth=lat_crn[idx_crn]; lat_ncr=lat_crn[idx_crn+3]-lat_crn[idx_crn]; /* ul-ll */ lat_ctr_drv=lat_sth+0.5*lat_ncr; lat_crn_drv=lat_sth; lon_wst=lon_crn[idx_crn]; lon_ncr=lon_crn[idx_crn+1]-lon_crn[idx_crn]; /* lr-ll */ lon_ctr_drv=lon_wst+lon_ncr*(lon_idx+0.5); if(nco_dbg_lvl_get() >= nco_dbg_std && idx_ctr == idx_dbg) (void)fprintf(stdout,"%s: DEBUG %s idx=%ld lat_idx=%ld, lon_idx=%ld, lat_sth=%g, lat_ncr=%g, lon_wst=%g, lon_ncr=%g\n",nco_prg_nm_get(),fnc_nm,idx_ctr,lat_idx,lon_idx,lat_sth,lat_ncr,lon_wst,lon_ncr); } /* !idx_ctr */ if(lat_ctr[idx_ctr] == mss_val_ctr_dbl){ if(lat_idx != 0L){ /* Not bottom row */ idx_ctr_sth=idx_ctr-lon_nbr; if(lat_ctr[idx_ctr_sth] != mss_val_ctr_dbl){ /* Copy southern corners from northern corners of southern neighbor */ idx_crn_sth=idx_ctr_sth*grd_crn_nbr; lat_crn[idx_crn+0L]=lat_crn[idx_crn_sth+3L]; lat_crn[idx_crn+1L]=lat_crn[idx_crn_sth+2L]; lon_crn[idx_crn+0L]=lon_crn[idx_crn_sth+3L]; lon_crn[idx_crn+1L]=lon_crn[idx_crn_sth+2L]; } /* !mss_val */ } /* !lat_idx */ if(lat_idx != lat_nbr-1L){ /* Not top row */ idx_ctr_nrt=idx_ctr+lon_nbr; if(lat_ctr[idx_ctr_nrt] != mss_val_ctr_dbl){ /* Copy northern corners from southern corners of northern neighbor */ idx_crn_nrt=idx_ctr_nrt*grd_crn_nbr; lat_crn[idx_crn+2L]=lat_crn[idx_crn_nrt+1L]; lat_crn[idx_crn+3L]=lat_crn[idx_crn_nrt+0L]; lon_crn[idx_crn+2L]=lon_crn[idx_crn_nrt+1L]; lon_crn[idx_crn+3L]=lon_crn[idx_crn_nrt+0L]; } /* !mss_val */ } /* !lat_idx */ /* Got to here before giving up Idea was to interpolate missing cell corners between previous and next valid cell */ /* Algorithm assumes lon_wst never changes (too simple for displaced/tri_pole) */ lon_ctr_drv=lon_wst+lon_ncr*(lon_idx+0.5); lon_crn_drv=lon_wst+lon_ncr*lon_idx; if(lon_ctr_drv >= 360.0) lon_ctr_drv-=360.0; lat_ctr[idx_ctr]=lat_ctr_drv; lon_ctr[idx_ctr]=lon_ctr_drv; lat_crn[idx_crn+0L]=lat_crn[idx_crn+1L]=lat_crn_drv; lat_crn[idx_crn+2L]=lat_crn[idx_crn+3L]=lat_crn_drv+lat_ncr; lon_crn[idx_crn+0L]=lon_crn[idx_crn+3L]=lon_crn_drv; lon_crn[idx_crn+1L]=lon_crn[idx_crn+2L]=lon_crn_drv+lon_ncr; /* Branch-cut rule */ if(lon_crn_drv+lon_ncr >= 360.0){ lon_crn[idx_crn+0L]=lon_crn[idx_crn+3L]=lon_crn_drv-360.0; lon_crn[idx_crn+1L]=lon_crn[idx_crn+2L]=lon_crn_drv+lon_ncr-360.0; } /* !brnch */ } /* !mss_val */ } /* !lon_idx */ } /* !lat_idx */ if(lon_vld_nxt) lon_vld_nxt=(long *)nco_free(lon_vld_nxt); if(lon_vld_prv) lon_vld_prv=(long *)nco_free(lon_vld_prv); } /* !False || !CICE */ if(lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT){ /* Interfaces (ntf) and boundaries (bnd) for curvilinear grids are ill-defined since sides need not follow latitudes nor meridians Simplest representation that contains equivalent information to interfaces/boundaries is grid corners array Diagnose grid corners from midpoints Most curvilinear data (e.g., WRF) is dimensioned lat x lon unlike SCRIP which uses lon x lat Hence we keep lat_ctr, lon_ctr, lat_crn, lon_crn with same order (likely lat x lon) as data file from which we infer grid Always use input order to write skeleton file Change that order, if necessary, to write SCRIP grid file In the interior of a curvilinear grid, nine points contribute to the four corners of a quadrilateral surrounding each center point These are the three points above the point, the three points at the same latitude, and the three points beneath the point In other words, a nine-point stencil is required to define the four corners inferred around each gridcell center It is cleanest to use this stencil only once for all cells in the "real"-grid, including those on the edges, not the interior For this to work cleanly we define an enlarged "fake"-grid where we pre-copy the values that lead to the desired extrapolation on "real"-grid edges Inspired by array-based solutions to integration of PDEs on meshes in Juri Toomre's class NB: implementation is not robust to missing value points in interior of grid. Hopefully grids have no missing values in coordinate variables, although they may have missing values in non-grid fields (e.g., mask, temperature) */ double *lat_ctr_fk; /* [dgr] Latitude grid with extrapolated boundaries necessary for 9-point template to find four grid corners for each real center */ double *lon_ctr_fk; /* [dgr] Longitude grid with extrapolated boundaries necessary for 9-point template to find four grid corners for each real center */ lat_ctr_fk=(double *)nco_malloc((lat_nbr+2)*(lon_nbr+2)*sizeof(double)); lon_ctr_fk=(double *)nco_malloc((lat_nbr+2)*(lon_nbr+2)*sizeof(double)); long int idx_rl; /* [idx] Index into real unrolled array */ long int idx_fk; /* [idx] Index into fake unrolled array */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ /* lat idx on real grid */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ /* lon idx on real grid */ idx_rl=lat_idx*lon_nbr+lon_idx; idx_fk=(lat_idx+1)*(lon_nbr+2)+lon_idx+1; /* Copy real grid to interior of fake grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]; lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]; } /* !lon */ } /* !lat */ /* Formulae to extrapolate sides and corners of fake grid are written as a starting lat/lon plus or minus adjustment Adjustment is positive-definite if grid monotonically increases in latitude and longitude from LL to UR 20160111: Use macros/functions to determine longitude adjustments that are always less than 180 This ensures all longitudes contributing to extrapolated longitude are from same branch cut */ /* Bottom row */ lat_idx=0; /* lat idx of extrapolated point on fake grid */ for(lon_idx=1;lon_idx<lon_nbr+1;lon_idx++){ /* lon idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on bottom row of fake grid */ idx_rl=lat_idx*lon_nbr+lon_idx-1; /* 1D-offset of neighboring point on bottom row of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]-(lat_ctr[idx_rl+lon_nbr]-lat_ctr[idx_rl]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]-nco_lon_dff_brnch_dgr(lon_ctr[idx_rl+lon_nbr],lon_ctr[idx_rl]); } /* !lon */ /* Top row */ lat_idx=lat_nbr+1; /* lat idx of extrapolated point on fake grid */ for(lon_idx=1;lon_idx<lon_nbr+1;lon_idx++){ /* lon idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on top row of fake grid */ idx_rl=(lat_nbr-1)*lon_nbr+lon_idx-1; /* 1D-offset of neighboring point on top row of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]+(lat_ctr[idx_rl]-lat_ctr[idx_rl-lon_nbr]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]+nco_lon_dff_brnch_dgr(lon_ctr[idx_rl],lon_ctr[idx_rl-lon_nbr]); } /* !lon */ /* Left side */ lon_idx=0; /* lon idx of extrapolated point on fake grid */ for(lat_idx=1;lat_idx<lat_nbr+1;lat_idx++){ /* lat idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on left side of fake grid */ idx_rl=(lat_idx-1)*lon_nbr+lon_idx; /* 1D-offset of neighboring point on left side of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]-(lat_ctr[idx_rl+1]-lat_ctr[idx_rl]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]-nco_lon_dff_brnch_dgr(lon_ctr[idx_rl+1],lon_ctr[idx_rl]); } /* !lat */ /* Right side */ lon_idx=lon_nbr+1; /* lon idx of extrapolated point on fake grid */ for(lat_idx=1;lat_idx<lat_nbr+1;lat_idx++){ /* lat idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on right side of fake grid */ idx_rl=(lat_idx-1)*lon_nbr+lon_idx-2; /* 1D-offset of neighboring point on right side of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]+(lat_ctr[idx_rl]-lat_ctr[idx_rl-1]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]+nco_lon_dff_brnch_dgr(lon_ctr[idx_rl],lon_ctr[idx_rl-1]); } /* !lat */ /* LL */ lat_ctr_fk[0]=lat_ctr_fk[lon_nbr+2]-(lat_ctr_fk[2*(lon_nbr+2)]-lat_ctr_fk[lon_nbr+2]); lon_ctr_fk[0]=lon_ctr_fk[1]-nco_lon_dff_brnch_dgr(lon_ctr_fk[2],lon_ctr_fk[1]); /* LR */ lat_ctr_fk[lon_nbr+1]=lat_ctr_fk[2*(lon_nbr+2)-1]-(lat_ctr_fk[3*(lon_nbr+2)-1]-lat_ctr_fk[2*(lon_nbr+2)-1]); lon_ctr_fk[lon_nbr+1]=lon_ctr_fk[lon_nbr]+nco_lon_dff_brnch_dgr(lon_ctr_fk[lon_nbr],lon_ctr_fk[lon_nbr-1]); /* UR */ lat_ctr_fk[(lat_nbr+2)*(lon_nbr+2)-1]=lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-1]+(lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-1]-lat_ctr_fk[lat_nbr*(lon_nbr+2)-1]); lon_ctr_fk[(lat_nbr+2)*(lon_nbr+2)-1]=lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-2]+nco_lon_dff_brnch_dgr(lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-2],lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-3]); /* UL */ lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)]=lat_ctr_fk[lat_nbr*(lon_nbr+2)]+(lat_ctr_fk[lat_nbr*(lon_nbr+2)]-lat_ctr_fk[(lat_nbr-1)*(lon_nbr+2)]); lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)]=lon_ctr_fk[lat_nbr*(lon_nbr+2)+1]-nco_lon_dff_brnch_dgr(lon_ctr_fk[lat_nbr*(lon_nbr+2)+2],lon_ctr_fk[lat_nbr*(lon_nbr+2)+1]); if(nco_dbg_lvl_get() >= nco_dbg_std){ long idx_dbg; idx_dbg=rgr->idx_dbg; (void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Fake Center [lat,lon]=[%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,lat_ctr_fk[idx_dbg],lon_ctr_fk[idx_dbg]); } /* !dbg */ long int lat_idx_fk; /* [idx] Index into fake (extrapolated) latitude array */ long int lon_idx_fk; /* [idx] Index into fake (extrapolated) longitude array */ long int idx_fk_crn_ll_ctr_ll; long int idx_fk_crn_ll_ctr_lr; long int idx_fk_crn_ll_ctr_ur; long int idx_fk_crn_ll_ctr_ul; long int idx_fk_crn_lr_ctr_ll; long int idx_fk_crn_lr_ctr_lr; long int idx_fk_crn_lr_ctr_ur; long int idx_fk_crn_lr_ctr_ul; long int idx_fk_crn_ur_ctr_ll; long int idx_fk_crn_ur_ctr_lr; long int idx_fk_crn_ur_ctr_ur; long int idx_fk_crn_ur_ctr_ul; long int idx_fk_crn_ul_ctr_ll; long int idx_fk_crn_ul_ctr_lr; long int idx_fk_crn_ul_ctr_ur; long int idx_fk_crn_ul_ctr_ul; double *crn_lat; double *crn_lon; crn_lat=(double *)nco_malloc(grd_crn_nbr*sizeof(double)); crn_lon=(double *)nco_malloc(grd_crn_nbr*sizeof(double)); size_t wrn_nbr_max=20; size_t wrn_nbr=0; for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ /* 9-point template valid at all interior (non-edge) points in real grid, and at all points (including edges) in fake grid Read variables idx_crn_ll_ctr_ul as "index of upper left gridcell center that contributes to lower-left gridcell corner" Algorithms execute in counter-clockwise (CCW) direction: lower-left, lower-right, upper-right, upper-left lat_idx and lon_idx are true indices and are used to write into grd_crn_lat/lon arrays lat_idx_fk and lon_idx_fk are indices into fake arrays with extrapolated boundaries and are used to read data from fake arrays */ lon_idx_fk=lon_idx+1; lat_idx_fk=lat_idx+1; idx_rl=lat_idx*lon_nbr+lon_idx; idx_fk=lat_idx_fk*(lon_nbr+2)+lon_idx_fk; /* Determine index into fake array (valid everywhere it is applied) Comments after each equation are formula for real index (valid only at interior gridcells) */ idx_fk_crn_ll_ctr_ll=idx_fk-(lon_nbr+2)-1; // (lat_idx-1)*lon_nbr+lon_idx-1 idx_fk_crn_ll_ctr_lr=idx_fk-(lon_nbr+2); // (lat_idx-1)*lon_nbr+lon_idx idx_fk_crn_ll_ctr_ur=idx_fk; // lat_idx*lon_nbr+lon_idx idx_fk_crn_ll_ctr_ul=idx_fk-1; // lat_idx*lon_nbr+lon_idx-1; idx_fk_crn_lr_ctr_ll=idx_fk-(lon_nbr+2); // (lat_idx-1)*lon_nbr+lon_idx idx_fk_crn_lr_ctr_lr=idx_fk-(lon_nbr+2)+1; // (lat_idx-1)*lon_nbr+lon_idx+1 idx_fk_crn_lr_ctr_ur=idx_fk+1; // lat_idx*lon_nbr+lon_idx+1 idx_fk_crn_lr_ctr_ul=idx_fk; // lat_idx*lon_nbr+lon_idx; idx_fk_crn_ur_ctr_ll=idx_fk; // lat_idx*lon_nbr+lon_idx idx_fk_crn_ur_ctr_lr=idx_fk+1; // lat_idx*lon_nbr+lon_idx+1 idx_fk_crn_ur_ctr_ur=idx_fk+(lon_nbr+2)+1; // (lat_idx+1)*lon_nbr+lon_idx+1 idx_fk_crn_ur_ctr_ul=idx_fk+(lon_nbr+2); // (lat_idx+1)*lon_nbr+lon_idx; idx_fk_crn_ul_ctr_ll=idx_fk-1; // lat_idx*lon_nbr+lon_idx-1 idx_fk_crn_ul_ctr_lr=idx_fk; // lat_idx*lon_nbr+lon_idx idx_fk_crn_ul_ctr_ur=idx_fk+(lon_nbr+2); // (lat_idx+1)*lon_nbr+lon_idx idx_fk_crn_ul_ctr_ul=idx_fk+(lon_nbr+2)-1; // (lat_idx+1)*lon_nbr+lon_idx-1; /* 20160111: Algorithm requires that all longitudes in template be on same "branch cut" If, say, LL longitude is 179.0 and LR longitude is -179.0 then their sum and average are zero, not 180.0 or -180.0 as desired Routines labeled "*_brnch" in the following ensure that branch-cut rules are followed */ idx_crn_ll=grd_crn_nbr*idx_rl+0; lat_crn[idx_crn_ll]=0.25*(lat_ctr_fk[idx_fk_crn_ll_ctr_ll]+lat_ctr_fk[idx_fk_crn_ll_ctr_lr]+lat_ctr_fk[idx_fk_crn_ll_ctr_ur]+lat_ctr_fk[idx_fk_crn_ll_ctr_ul]); lon_crn[idx_crn_ll]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ll_ctr_ll],lon_ctr_fk[idx_fk_crn_ll_ctr_lr],lon_ctr_fk[idx_fk_crn_ll_ctr_ur],lon_ctr_fk[idx_fk_crn_ll_ctr_ul]); idx_crn_lr=grd_crn_nbr*idx_rl+1; lat_crn[idx_crn_lr]=0.25*(lat_ctr_fk[idx_fk_crn_lr_ctr_ll]+lat_ctr_fk[idx_fk_crn_lr_ctr_lr]+lat_ctr_fk[idx_fk_crn_lr_ctr_ur]+lat_ctr_fk[idx_fk_crn_lr_ctr_ul]); lon_crn[idx_crn_lr]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_lr_ctr_ll],lon_ctr_fk[idx_fk_crn_lr_ctr_lr],lon_ctr_fk[idx_fk_crn_lr_ctr_ur],lon_ctr_fk[idx_fk_crn_lr_ctr_ul]); idx_crn_ur=grd_crn_nbr*idx_rl+2; lat_crn[idx_crn_ur]=0.25*(lat_ctr_fk[idx_fk_crn_ur_ctr_ll]+lat_ctr_fk[idx_fk_crn_ur_ctr_lr]+lat_ctr_fk[idx_fk_crn_ur_ctr_ur]+lat_ctr_fk[idx_fk_crn_ur_ctr_ul]); lon_crn[idx_crn_ur]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ur_ctr_ll],lon_ctr_fk[idx_fk_crn_ur_ctr_lr],lon_ctr_fk[idx_fk_crn_ur_ctr_ur],lon_ctr_fk[idx_fk_crn_ur_ctr_ul]); idx_crn_ul=grd_crn_nbr*idx_rl+3; lat_crn[idx_crn_ul]=0.25*(lat_ctr_fk[idx_fk_crn_ul_ctr_ll]+lat_ctr_fk[idx_fk_crn_ul_ctr_lr]+lat_ctr_fk[idx_fk_crn_ul_ctr_ur]+lat_ctr_fk[idx_fk_crn_ul_ctr_ul]); lon_crn[idx_crn_ul]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ul_ctr_ll],lon_ctr_fk[idx_fk_crn_ul_ctr_lr],lon_ctr_fk[idx_fk_crn_ul_ctr_ur],lon_ctr_fk[idx_fk_crn_ul_ctr_ul]); crn_lat[0]=lat_crn[idx_crn_ll]; crn_lat[1]=lat_crn[idx_crn_lr]; crn_lat[2]=lat_crn[idx_crn_ur]; crn_lat[3]=lat_crn[idx_crn_ul]; crn_lon[0]=lon_crn[idx_crn_ll]; crn_lon[1]=lon_crn[idx_crn_lr]; crn_lon[2]=lon_crn[idx_crn_ur]; crn_lon[3]=lon_crn[idx_crn_ul]; /* 20210411: From 2016 until today, nco_ccw_chk() overwrote fourth (UL) with first (LL) corner */ flg_ccw=nco_ccw_chk(crn_lat,crn_lon,grd_crn_nbr,idx_ccw,rcr_lvl); if(!flg_ccw && wrn_nbr < wrn_nbr_max){ (void)fprintf(stdout,"%s: %s WARNING reports non-CCW gridcell at idx=%li, (lat,lon)_idx=(%li,%li), (lat,lon) = (%g, %g)\n",nco_prg_nm_get(),fnc_nm,idx_rl,lat_idx,lon_idx,lat_ctr[lat_idx],lon_ctr[lon_idx]); wrn_nbr++; if(wrn_nbr == wrn_nbr_max) (void)fprintf(stdout,"%s: %s INFO Number of non-CCW errors reached maximum = %li, not printing anymore\n",nco_prg_nm_get(),fnc_nm,wrn_nbr_max); } /* endif */ lat_crn[idx_crn_ll]=crn_lat[0]; lat_crn[idx_crn_lr]=crn_lat[1]; lat_crn[idx_crn_ur]=crn_lat[2]; lat_crn[idx_crn_ul]=crn_lat[3]; lon_crn[idx_crn_ll]=crn_lon[0]; lon_crn[idx_crn_lr]=crn_lon[1]; lon_crn[idx_crn_ur]=crn_lon[2]; lon_crn[idx_crn_ul]=crn_lon[3]; } /* !lon */ } /* !lat */ if(lat_ctr_fk) lat_ctr_fk=(double *)nco_free(lat_ctr_fk); if(lon_ctr_fk) lon_ctr_fk=(double *)nco_free(lon_ctr_fk); if(crn_lon) crn_lon=(double *)nco_free(crn_lon); if(crn_lat) crn_lat=(double *)nco_free(crn_lat); } /* !(lat_bnd_id && lon_bnd_id) */ } /* !flg_grd_crv */ if(flg_1D_psd_rct_bnd){ double lon_brnch_min; double lon_brnch_max; double lon_dff; assert(grd_crn_nbr == 4); /* Make boundaries that were provided as pseudo-rectangular branch-cut-compliant */ for(col_idx=0;col_idx<col_nbr;col_idx++){ lon_brnch_min=(lon_bnd[2*col_idx] <= lon_bnd[2*col_idx+1]) ? lon_bnd[2*col_idx] : lon_bnd[2*col_idx+1]; lon_brnch_max=(lon_bnd[2*col_idx] >= lon_bnd[2*col_idx+1]) ? lon_bnd[2*col_idx] : lon_bnd[2*col_idx+1]; lon_dff=lon_brnch_max-lon_brnch_min; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports 1D pseudo-rectangular bounds branch-cut straddle at col_idx=%ld lon_brnch_max, lon_brnch_min, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,col_idx,lon_brnch_max,lon_brnch_min,lon_dff); lon_brnch_max-=360.0; }else if(lon_dff <= -180.0){ lon_brnch_max+=360.0; } /* !lon_dff */ /* Extra condition to convert CW bounds to CCW bounds (necessary for OCO2) */ if(lon_brnch_min <= lon_brnch_max){ lon_bnd[2*col_idx]=lon_brnch_min; lon_bnd[2*col_idx+1]=lon_brnch_max; }else{ lon_bnd[2*col_idx]=lon_brnch_max; lon_bnd[2*col_idx+1]=lon_brnch_min; } /* end else */ } /* !col_idx */ /* Convert boundaries that were provided as pseudo-rectangular to corners */ for(col_idx=0;col_idx<col_nbr;col_idx++){ idx=grd_crn_nbr*col_idx; /* fxm: OCO2 provides boundaries in CW not CCW orientation */ lon_crn[idx]=lon_bnd[2*col_idx]; /* LL */ lon_crn[idx+1]=lon_bnd[2*col_idx+1]; /* LR */ lon_crn[idx+2]=lon_bnd[2*col_idx+1]; /* UR */ lon_crn[idx+3]=lon_bnd[2*col_idx]; /* UL */ lat_crn[idx]=lat_bnd[2*col_idx]; /* LL */ lat_crn[idx+1]=lat_bnd[2*col_idx]; /* LR */ lat_crn[idx+2]=lat_bnd[2*col_idx+1]; /* UR */ lat_crn[idx+3]=lat_bnd[2*col_idx+1]; /* UL */ /* fxm: OCO2 provides boundaries in CW not CCW orientation */ } /* !col_idx */ } /* flg_1D_psd_rct_bnd */ if(flg_grd_crv || flg_1D_psd_rct_bnd){ /* As of 20160308, use same sanity check for 1D pseudo-rectangular grids as for curvilinear grids Pseudo-rectangular grids rely on user-produced boundaries that may be psychotic (CW, non-branch-cut) Starting 20151205, use same sanity check for both inferred and copied curvilinear grids 20151129: Curvilinear extrapolation technique above yields corners outside [-90.0,90.0], [-180.0,360.0] Also, it may assume input is ascending swath and fail for descending swaths Complications not fully addressed: Swaths may (verify this) turn from ascending to descending, or visa-versa, when satellite crosses latitude extrema Swaths may cross the date-line (and back!) */ /* Determine numeric bounds of input coordinate system */ double lon_min_min; double lon_max_max; nco_bool NCO_LON_0_TO_360=True; if(has_mss_val_ctr){ for(idx=0;idx<grd_sz_nbr;idx++) if(lon_ctr[idx] != mss_val_ctr_dbl && lon_ctr[idx] < 0.0) break; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(lon_ctr[idx] < 0.0) break; } /* !has_mss_val_ctr */ if(idx != grd_sz_nbr) NCO_LON_0_TO_360=False; if(NCO_LON_0_TO_360){ lon_min_min=0.0; lon_max_max=360.0; }else{ lon_min_min=-180.0; lon_max_max=180.0; } /* !NCO_LON_0_TO_360 */ /* Correct for extrapolation outside boundaries */ for(idx=0;idx<grd_sz_nbr*grd_crn_nbr;idx++){ idx_ctr=idx/grd_crn_nbr; if(has_mss_val_ctr) if(lat_ctr[idx_ctr] == mss_val_ctr_dbl) continue; if(lat_crn[idx] < -90.0 || lat_crn[idx] > 90.0 || lon_crn[idx] < lon_min_min || lon_crn[idx] > lon_max_max){ idx_crn_ll=grd_crn_nbr*idx_ctr+0; idx_crn_lr=grd_crn_nbr*idx_ctr+1; idx_crn_ur=grd_crn_nbr*idx_ctr+2; idx_crn_ul=grd_crn_nbr*idx_ctr+3; if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s reports %s corner outside canonical bounds at idx = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,(lat_bnd_id == NC_MIN_INT) ? "inferred" : "copied",idx_ctr,lat_ctr[idx_ctr],lon_ctr[idx_ctr],lat_crn[idx_crn_ll],lon_crn[idx_crn_ll],lat_crn[idx_crn_lr],lon_crn[idx_crn_lr],lat_crn[idx_crn_ur],lon_crn[idx_crn_ur],lat_crn[idx_crn_ul],lon_crn[idx_crn_ul]); /* Restrict grid to real latitudes and to the 360-degree range detected from input cell-centers */ if(lat_crn[idx] < -90.0) lat_crn[idx]=-90.0; if(lat_crn[idx] > 90.0) lat_crn[idx]=90.0; if(lon_crn[idx] < lon_min_min) lon_crn[idx]+=360.0; if(lon_crn[idx] > lon_max_max) lon_crn[idx]-=360.0; } /* !sanity */ } /* !idx */ /* Vertices (for valid points) are now within 360 degrees (either [0,360] or [-180,180]) implied by input coordinate system Curvilinear inferred grid are, by construction, branch-cut compliant fxm: Curvilinear and 1D pseudo-rectangular grids prescribed by (i.e., read-in from) input may not be branch-cut compliant */ if(nco_dbg_lvl_get() >= nco_dbg_std){ long idx_dbg; idx_dbg=rgr->idx_dbg; idx_crn_ll=grd_crn_nbr*idx_dbg+0; idx_crn_lr=grd_crn_nbr*idx_dbg+1; idx_crn_ur=grd_crn_nbr*idx_dbg+2; idx_crn_ul=grd_crn_nbr*idx_dbg+3; (void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,lat_ctr[idx_dbg],lon_ctr[idx_dbg],lat_crn[idx_crn_ll],lon_crn[idx_crn_ll],lat_crn[idx_crn_lr],lon_crn[idx_crn_lr],lat_crn[idx_crn_ur],lon_crn[idx_crn_ur],lat_crn[idx_crn_ul],lon_crn[idx_crn_ul]); } /* !dbg */ } /* !flg_grd_crv || flg_1D_psd_rct_bnd */ if(flg_grd_crv){ /* Copy centers into empty output array */ for(idx=0;idx<grd_sz_nbr;idx++){ grd_ctr_lat[idx]=lat_ctr[idx]; grd_ctr_lon[idx]=lon_ctr[idx]; } /* !idx */ /* Copy inferred or copied (from input) sanity-checked corners into empty output array */ for(idx=0;idx<grd_sz_nbr*grd_crn_nbr;idx++){ grd_crn_lat[idx]=lat_crn[idx]; grd_crn_lon[idx]=lon_crn[idx]; } /* !idx */ } /* !flg_grd_crv */ /* 20150512 Many 2D datasets have bad bounds Primary example is Gaussian grids archived by CESM models that use midpoint rule rather than iterate to compute interfaces from quadrature points Such files have correct gw arrays and incorrect cell bounds flg_dgn_bnd allows nco_grd_nfr() to override faulty boundaries in file with correct bounds */ const nco_bool flg_dgn_bnd=rgr->flg_dgn_bnd; /* [flg] Diagnose rather than copy inferred bounds */ const long lat_nbr_hlf=lat_nbr/2L; // [nbr] Half number of latitudes (e.g., lat_nbr_hlf=32 for lat_nbr=64 and 65) if(flg_grd_2D){ if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){ if(nco_dbg_lvl_get() >= nco_dbg_std && flg_dgn_bnd) (void)fprintf(stdout,"%s: INFO %s will diagnose cell boundaries from cell centers...\n",nco_prg_nm_get(),fnc_nm); /* Derive interfaces (ntf) and bounds (bnd) from midpoints approximation applied to center data NB: Simplistically derived interfaces (ntf) only valid on some rectangular grids (not on Gaussian grids) These inferred-from-midpoint interfaces/bounds are overwritten in next block once lat grid is known */ if(flg_s2n) lat_ntf[0L]=lat_ctr[0L]-0.5*(lat_ctr[1L]-lat_ctr[0L]); else lat_ntf[0L]=lat_ctr[0L]+0.5*(lat_ctr[0L]-lat_ctr[1L]); if(lat_ntf[0L] < -90.0) lat_ntf[0L]=-90.0; /* NB: lat_ntf[0] can be same as lat_ctr[0] for cap grid */ if(lat_ntf[0L] > 90.0) lat_ntf[0L]=90.0; for(lat_idx=0L;lat_idx<lat_nbr-1L;lat_idx++) lat_ntf[lat_idx+1L]=0.5*(lat_ctr[lat_idx]+lat_ctr[lat_idx+1L]); if(flg_s2n) lat_ntf[lat_nbr]=lat_ctr[lat_nbr-1L]+0.5*(lat_ctr[lat_nbr-1L]-lat_ctr[lat_nbr-2L]); else lat_ntf[lat_nbr]=lat_ctr[lat_nbr-1L]-0.5*(lat_ctr[lat_nbr-2L]-lat_ctr[lat_nbr-1L]); if(lat_ntf[lat_nbr] > 90.0) lat_ntf[lat_nbr]=90.0; /* NB: lat_ntf[lat_nbr] can be same as lat_ctr[lat_nbr-1] for cap grid */ if(lat_ntf[lat_nbr] < -90.0) lat_ntf[lat_nbr]=-90.0; /* NB: lat_ntf[lat_nbr] can be same as lat_ctr[lat_nbr-1] for cap grid */ if(flg_s2n) lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); /* fabs() ensures positive-definite span for N->S grids */ lon_ntf[0L]=lon_ctr[0L]-0.5*(lon_ctr[1L]-lon_ctr[0L]); for(lon_idx=0;lon_idx<lon_nbr-1L;lon_idx++) lon_ntf[lon_idx+1L]=0.5*(lon_ctr[lon_idx]+lon_ctr[lon_idx+1L]); lon_ntf[lon_nbr]=lon_ctr[lon_nbr-1L]+0.5*(lon_ctr[lon_nbr-1L]-lon_ctr[lon_nbr-2L]); lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L]; for(idx=0;idx<lon_nbr;idx++){ lon_bnd[2L*idx]=lon_ntf[idx]; lon_bnd[2L*idx+1L]=lon_ntf[idx+1L]; } /* !idx */ for(idx=0;idx<lat_nbr;idx++){ lat_bnd[2L*idx]=lat_ntf[idx]; lat_bnd[2L*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ }else{ /* !(lat_bnd_id && lon_bnd_id) */ /* Derive interfaces (ntf) from bounds (bnd) data on disk */ for(idx=0;idx<lon_nbr;idx++) lon_ntf[idx]=lon_bnd[2L*idx]; lon_ntf[lon_nbr]=lon_bnd[2L*lon_nbr-1L]; for(idx=0;idx<lat_nbr;idx++) lat_ntf[idx]=lat_bnd[2L*idx]; lat_ntf[lat_nbr]=lat_bnd[2L*lat_nbr-1L]; lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); /* fabs() ensures positive-definite span for N->S grids */ lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L]; } /* !(lat_bnd_id && lon_bnd_id) */ } /* !flg_grd_2D */ if(flg_grd_2D){ /* Diagnose type of two-dimensional input grid by testing second latitude center against formulae */ double lat_ctr_tst_eqa; double lat_ctr_tst_fv; if(flg_s2n) lat_ctr_tst_eqa=lat_ntf[0L]+lat_spn*1.5/lat_nbr; else lat_ctr_tst_eqa=lat_ntf[0L]-lat_spn*1.5/lat_nbr; if(flg_s2n) lat_ctr_tst_fv=lat_ntf[0L]+lat_spn/(lat_nbr-1L); else lat_ctr_tst_fv=lat_ntf[0L]-lat_spn/(lat_nbr-1L); double lat_ctr_tst_gss; /* In diagnosing grids, agreement with input to single-precision is "good enough for government work" Hence some comparisons cast from double to float before comparison 20150526: T42 grid from SCRIP and related maps are only accurate to ~eight digits 20150611: map_ne120np4_to_fv801x1600_bilin.150418.nc has yc_b[1600]=-89.775000006 not expected exact value lat_ctr[1]=-89.775000000000006 20170521: T62 grid from NCEP-NCAR Reanalysis 1 worse than single precision, has yc_[192]=-86.6531 not expected exact value lat_ctr[1]=-86.6531671712612, relative difference is 7.86021e-07 20191008: T62 grid from NCEP-NCAR Reanalysis 2 worse than single precision, has yc_[92]=-86.6531 not expected exact value lat_ctr[1]=-86.6531671712612, relative difference is 7.86021e-07 */ if(nco_dbg_lvl_get() >= nco_dbg_scl && !flg_s2n) (void)fprintf(stderr,"%s: INFO %s reports that grid inferral has detected a 2D grid that runs from north-to-south, not south-to-north. Support for creating/inferring 2D N-to-S grids was added in NCO 4.7.7 (September, 2018) and should work fine.\nHINT: If present command fails, report problem to developers and then re-try inferring grid after reversing input dataset's latitude coordinate (with, e.g., ncpdq -a time,-lat,lon in.nc out.nc)\n",nco_prg_nm_get(),fnc_nm); if((float)lat_ctr[1L] == (float)lat_ctr_tst_eqa) lat_typ=nco_grd_lat_eqa; if((float)lat_ctr[1L] == (float)lat_ctr_tst_fv) lat_typ=nco_grd_lat_fv; double *lat_sin=NULL_CEWI; // [frc] Sine of Gaussian latitudes double precision double *wgt_Gss=NULL; // [frc] Gaussian weights double precision if(lat_typ == nco_grd_lat_nil){ /* Check for Gaussian grid */ lat_sin=(double *)nco_malloc(lat_nbr*sizeof(double)); wgt_Gss=(double *)nco_malloc(lat_nbr*sizeof(double)); (void)nco_lat_wgt_gss(lat_nbr,flg_s2n,lat_sin,wgt_Gss); lat_ctr_tst_gss=rdn2dgr*asin(lat_sin[1L]); /* Gaussian weights on output grid will be double-precision accurate Grid itself is kept as user-specified so area diagnosed by ESMF_RegridWeightGen may be slightly inconsistent with weights */ const double eps_rlt_cnv_gss=1.0e-6; // Convergence criterion (1.0e-7 fails for NCEP NCAR Reanalysis 1!) if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG %s reports lat_ctr[1]=%g, lat_ctr_tst_gss=%g, fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss))=%g\n",nco_prg_nm_get(),fnc_nm,lat_ctr[1],lat_ctr_tst_gss,fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss))); if(fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss)) < eps_rlt_cnv_gss) lat_typ=nco_grd_lat_gss; } /* !Gaussian */ if(lat_typ == nco_grd_lat_nil){ /* If still of unknown type, this 2D grid may be weird This occurs, e.g., with POP3 destination grid Change gridtype from nil (which means not-yet-set) to unknown (which means none of the others matched) */ lat_typ=nco_grd_lat_unk; } /* !nil */ /* Currently grd_lat_typ and grd_2D_typ are equivalent, though that may be relaxed in future */ if(lat_typ == nco_grd_lat_unk) grd_typ=nco_grd_2D_unk; else if(lat_typ == nco_grd_lat_gss) grd_typ=nco_grd_2D_gss; else if(lat_typ == nco_grd_lat_fv) grd_typ=nco_grd_2D_fv; else if(lat_typ == nco_grd_lat_eqa) grd_typ=nco_grd_2D_eqa; else assert(False); /* Diagnose latitude interfaces from gridcell centers (if boundaries not provided) */ if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){ //if(flg_s2n) lat_nrt=lat_ntf[lat_nbr]; else lat_nrt=lat_ntf[0L]; lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); switch(lat_typ){ case nco_grd_lat_fv: lat_ncr=lat_spn/(lat_nbr-1L); if(flg_s2n) lat_ntf[1L]=lat_ntf[0L]+0.5*lat_ncr; else lat_ntf[1L]=lat_ntf[0L]-0.5*lat_ncr; for(lat_idx=2;lat_idx<lat_nbr;lat_idx++) if(flg_s2n) lat_ntf[lat_idx]=lat_ntf[1L]+(lat_idx-1L)*lat_ncr; else lat_ntf[lat_idx]=lat_ntf[1L]-(lat_idx-1L)*lat_ncr; break; case nco_grd_lat_eqa: lat_ncr=lat_spn/lat_nbr; for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) if(flg_s2n) lat_ntf[lat_idx]=lat_ntf[0L]+lat_idx*lat_ncr; else lat_ntf[lat_idx]=lat_ntf[0L]-lat_idx*lat_ncr; break; case nco_grd_lat_gss: for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]); /* First guess for lat_ntf is midway between Gaussian abscissae */ for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=0.5*(lat_ctr[lat_idx-1L]+lat_ctr[lat_idx]); /* Iterate guess until area between interfaces matches Gaussian weight */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++){ double fofx_at_x0; /* [frc] Function to iterate evaluated at current guess */ double dfdx_at_x0; /* [frc] Derivative of equation evaluated at current guess */ // 20190531: Wuyin Lin reports this convergence criterion fails on ECMWF F640 grid // Probably because latitude iterations assume s2n grid and ECMWF is n2s // Possibly also because latitude coordinates are stored in single precision // Implement precision-dependent convergence criterion, e.g., 1.0e-15 and 1.0e-7 for double- and single-precision, respectively? const double eps_rlt_cnv=1.0e-15; // Convergence criterion (1.0e-16 pushes double precision to the brink) itr_cnt=0; lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; while(fabs(fofx_at_x0) > eps_rlt_cnv){ /* Newton-Raphson iteration: Let x=lat_ntf[lat_idx], y0=lat_ntf[lat_idx-1], gw = Gaussian weight (exact solution) f(x)=sin(dgr2rdn*x)-sin(dgr2rdn*y0)-gw=0 # s2n grid f(x)=sin(dgr2rdn*y0)-sin(dgr2rdn*x)-gw=0 # n2s grid dfdx(x)= dgr2rdn*cos(dgr2rdn*x) # s2n grid dfdx(x)=-dgr2rdn*cos(dgr2rdn*x) # n2s grid x_better=x0-f(x0)/f'(x0) */ dfdx_at_x0=dgr2rdn*cos(dgr2rdn*lat_ntf[lat_idx]); if(!flg_s2n) dfdx_at_x0=-dfdx_at_x0; lat_ntf[lat_idx]+=fofx_at_x0/dfdx_at_x0; /* NB: not sure why this is minus not plus but it works :) */ lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; if(++itr_cnt > itr_nbr_max){ (void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %ld\n",nco_prg_nm_get(),fnc_nm,fabs(fofx_at_x0),itr_nbr_max,lat_idx); nco_exit(EXIT_FAILURE); } /* endif */ } /* !while */ } /* !lat_idx */ /* Use Gaussian grid symmetry to obtain same interfaces in both hemispheres (avoids cumulative rounding errors) */ if(lat_nbr%2){ /* lat_nbr is odd */ for(lat_idx=1L;lat_idx<=lat_nbr_hlf+1L;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx+1L]; }else{ /* lat_nbr is even */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx]; } /* !flg_lat_evn */ if(lat_sin) lat_sin=(double *)nco_free(lat_sin); break; case nco_grd_lat_unk: /* No generic formula exists so use interfaces already read or diagnosed as midpoints between centers */ break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ if(lat_typ == nco_grd_lat_gss){ /* 20170510: First approximation above to exterior interfaces for Gaussian grid are ~ +/-89 degrees Loops below recompute interior interfaces only Southern- and northern-most interfaces must be explicitly assigned Inferral test for Gaussian grid _assumes_ global grid Hence WLOG can assign [-90.0, 90.0] to Gaussian grid exterior boundaries */ if(flg_s2n) lat_ntf[0L]=-90.0; else lat_ntf[0L]=90.0; if(flg_s2n) lat_ntf[lat_nbr]=90.0; else lat_ntf[lat_nbr]=-90.0; } /* !nco_grd_lat_gss */ /* Now that final latitude interfaces are known for all grid-types, assign to boundaries, overwriting provisional values stored there earlier */ for(idx=0;idx<lat_nbr;idx++){ lat_bnd[2L*idx]=lat_ntf[idx]; lat_bnd[2L*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ } /* !(lat_bnd_id && lon_bnd_id) */ /* Use centers and boundaries to diagnose latitude weights */ switch(lat_typ){ case nco_grd_lat_eqa: case nco_grd_lat_fv: for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx])); break; case nco_grd_lat_gss: for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=wgt_Gss[lat_idx]; break; case nco_grd_lat_unk: for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx])); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING %s reports unknown input latitude grid-type. Guessing that weights for grid of rectangles is OK.\n",nco_prg_nm_get(),fnc_nm); break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ /* Diagnose type of longitude grid by testing second longitude center against formulae */ lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L]; lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; if(lon_typ == nco_grd_lon_nil){ if( (float)lon_ctr[0L] == 0.0f && (float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_Grn_ctr; else if((float)lon_ctr[0L] == -180.0f && (float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_180_ctr; else if((float)lon_ntf[0L] == 0.0f && (float)lon_ntf[1L] == (float)(lon_ntf[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_Grn_wst; else if((float)lon_ntf[0L] == -180.0f && (float)lon_ntf[1L] == (float)(lon_ntf[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_180_wst; else if((float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_bb; else lon_typ=nco_grd_lon_unk; } /* !lon_typ */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input 2D grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_2D_sng(grd_typ)); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input latitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lat_sng(lat_typ)); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input longitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lon_sng(lon_typ)); } /* !flg_grd_2D */ if(flg_grd_2D){ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0;idx<lat_nbr;idx++){ (void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", "); } /* end loop over lat */ for(idx=0;idx<lon_nbr;idx++){ (void)fprintf(stdout,"lon[%li] = %g, vertices = ",idx,lon_ctr[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lon_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", "); } /* end loop over lon */ } /* endif dbg */ /* Fuzzy test of latitude weight normalization */ //const double eps_rlt_max=1.0e-14; /* [frc] Round-off error tolerance: Used 1.0e-14 until 20180904 */ const double eps_rlt_max=1.0e-12; /* [frc] Round-off error tolerance: Used 1.0e-12 since 20180904 */ lat_wgt_ttl=0.0; for(idx=0;idx<lat_nbr;idx++) lat_wgt_ttl+=lat_wgt[idx]; if(grd_typ == nco_grd_2D_fv || grd_typ == nco_grd_2D_eqa){ double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */ lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd[2*(lat_nbr-1)+1L])-sin(dgr2rdn*lat_bnd[0L])); if(grd_typ != nco_grd_2D_unk && fabs(1.0-lat_wgt_ttl/lat_wgt_ttl_xpc) > eps_rlt_max){ (void)fprintf(stdout,"%s: ERROR %s reports grid normalization does not meet precision tolerance eps_rlt_max = %20.15f\nlat_wgt_ttl = %20.15f, lat_wgt_ttl_xpc = %20.15f, lat_wgt_frc = %20.15f, eps_rlt = %20.15f\n",nco_prg_nm_get(),fnc_nm,eps_rlt_max,lat_wgt_ttl,lat_wgt_ttl_xpc,lat_wgt_ttl/lat_wgt_ttl_xpc,1.0-lat_wgt_ttl/lat_wgt_ttl_xpc); nco_exit(EXIT_FAILURE); } /* !imprecise */ } /* !nco_grd_lat_eqa, !nco_grd_lat_fv */ } /* !flg_grd_2D */ if(flg_grd_2D){ assert(grd_crn_nbr == 4); if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){ /* If interfaces were diagnosed from centers, copy corners from interfaces */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx=grd_crn_nbr*lon_idx; lon_crn[idx]=lon_ntf[lon_idx]; /* LL */ lon_crn[idx+1L]=lon_ntf[lon_idx+1L]; /* LR */ lon_crn[idx+2L]=lon_ntf[lon_idx+1L]; /* UR */ lon_crn[idx+3L]=lon_ntf[lon_idx]; /* UL */ } /* !lon_idx */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ idx=grd_crn_nbr*lat_idx; lat_crn[idx]=lat_ntf[lat_idx]; /* LL */ lat_crn[idx+1L]=lat_ntf[lat_idx]; /* LR */ lat_crn[idx+2L]=lat_ntf[lat_idx+1L]; /* UR */ lat_crn[idx+3L]=lat_ntf[lat_idx+1L]; /* UL */ } /* !lat_idx */ }else{ /* !lat_bnd_id */ /* If boundaries were provided in input dataset, copy corners from boundaries */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx=grd_crn_nbr*lon_idx; lon_crn[idx]=lon_bnd[2*lon_idx]; /* LL */ lon_crn[idx+1L]=lon_bnd[2*lon_idx+1L]; /* LR */ lon_crn[idx+2L]=lon_bnd[2*lon_idx+1L]; /* UR */ lon_crn[idx+3L]=lon_bnd[2*lon_idx]; /* UL */ } /* !lon_idx */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ idx=grd_crn_nbr*lat_idx; lat_crn[idx]=lat_bnd[2*lat_idx]; /* LL */ lat_crn[idx+1L]=lat_bnd[2*lat_idx]; /* LR */ lat_crn[idx+2L]=lat_bnd[2*lat_idx+1L]; /* UR */ lat_crn[idx+3L]=lat_bnd[2*lat_idx+1L]; /* UL */ } /* !lat_idx */ } /* !lat_bnd_id */ } /* !flg_grd_2D */ /* lat/lon_crn will not change anymore so stuff rectangular arrays into unrolled arrays */ if(flg_grd_1D){ for(idx=0;idx<grd_sz_nbr;idx++){ grd_ctr_lat[idx]=lat_ctr[idx]; grd_ctr_lon[idx]=lon_ctr[idx]; if(flg_wrt_crn){ for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; grd_crn_lat[idx2]=lat_crn[idx2]; grd_crn_lon[idx2]=lon_crn[idx2]; } /* !crn */ }else{ /* !flg_wrt_crn */ /* Defaults for ERWG when corners are unknown */ for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; grd_crn_lat[idx2]=0.0; grd_crn_lon[idx2]=0.0; } /* !crn */ } /* !flg_wrt_crn */ } /* !col */ } /* !flg_grd_1D */ if(flg_grd_2D){ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx=lat_idx*lon_nbr+lon_idx; grd_ctr_lat[idx]=lat_ctr[lat_idx]; grd_ctr_lon[idx]=lon_ctr[lon_idx]; for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; lat_idx2=lat_idx*grd_crn_nbr+crn_idx; lon_idx2=lon_idx*grd_crn_nbr+crn_idx; grd_crn_lat[idx2]=lat_crn[lat_idx2]; grd_crn_lon[idx2]=lon_crn[lon_idx2]; } /* !crn */ } /* !lon */ } /* !lat */ /* 20190613: Convert CW quadrilaterals to CCW quadrilaterals so TempestRemap accepts grids Default construction/inferral method orders corners CCW and CW for s2n and n2s grids, respectively */ if(!flg_s2n){ for(idx=0L;idx<grd_sz_nbr;idx++){ idx2=grd_crn_nbr*idx; flg_ccw=nco_ccw_chk(grd_crn_lat+idx2,grd_crn_lon+idx2,grd_crn_nbr,idx_ccw,rcr_lvl); } /* !idx */ } /* !flg_s2n */ } /* !flg_grd_2D */ /* Find span of all grids */ double lat_max; /* [dgr] Maximum latitude */ double lat_min; /* [dgr] Minimum latitude */ double lon_max; /* [dgr] Maximum longitude */ double lon_min; /* [dgr] Minimum longitude */ idx_ctr=0; if(has_mss_val_ctr){ /* Find first non-missing value center and thus corners */ for(idx_ctr=0;idx_ctr<grd_sz_nbr;idx_ctr++){ if(grd_ctr_lat[idx_ctr] != mss_val_ctr_dbl) break; } /* !grd_sz_nbr */ assert(idx_ctr != grd_sz_nbr); } /* !has_mss_val_ctr */ if(flg_wrt_crn){ /* Grids with corner boundaries supplied or inferred */ lon_max=grd_crn_lon[idx_ctr*grd_crn_nbr]; lat_max=grd_crn_lat[idx_ctr*grd_crn_nbr]; lon_min=grd_crn_lon[idx_ctr*grd_crn_nbr]; lat_min=grd_crn_lat[idx_ctr*grd_crn_nbr]; for(idx=1;idx<grd_sz_nbr*grd_crn_nbr;idx++){ idx_ctr=idx/grd_crn_nbr; if(has_mss_val_ctr) if(grd_ctr_lat[idx_ctr] == mss_val_ctr_dbl) continue; lat_max=(grd_crn_lat[idx] > lat_max) ? grd_crn_lat[idx] : lat_max; lon_max=(grd_crn_lon[idx] > lon_max) ? grd_crn_lon[idx] : lon_max; lat_min=(grd_crn_lat[idx] < lat_min) ? grd_crn_lat[idx] : lat_min; lon_min=(grd_crn_lon[idx] < lon_min) ? grd_crn_lon[idx] : lon_min; } /* !idx */ }else{ /* !flg_wrt_crn */ /* 20170424: Diagnose grid-extent when corners were not provided or inferred This is usually (always?) for 1d unstructured grids with only centers provided */ lon_max=grd_ctr_lon[idx_ctr]; lat_max=grd_ctr_lat[idx_ctr]; lon_min=grd_ctr_lon[idx_ctr]; lat_min=grd_ctr_lat[idx_ctr]; for(idx_ctr=1;idx_ctr<grd_sz_nbr;idx_ctr++){ if(has_mss_val_ctr) if(grd_ctr_lat[idx_ctr] == mss_val_ctr_dbl) continue; lat_max=(grd_ctr_lat[idx_ctr] > lat_max) ? grd_ctr_lat[idx_ctr] : lat_max; lon_max=(grd_ctr_lon[idx_ctr] > lon_max) ? grd_ctr_lon[idx_ctr] : lon_max; lat_min=(grd_ctr_lat[idx_ctr] < lat_min) ? grd_ctr_lat[idx_ctr] : lat_min; lon_min=(grd_ctr_lon[idx_ctr] < lon_min) ? grd_ctr_lon[idx_ctr] : lon_min; } /* !idx_ctr */ } /* flg_wrt_crn */ lat_spn=lat_max-lat_min; lon_spn=lon_max-lon_min; /* Use strict rules for rectangular grids, looser for spans that are inferred, or center-to-center not corner-to-corner */ if(flg_grd_2D){ if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; }else{ /* !flg_grd_2D */ if((float)lon_spn >= 340.0f && (float)lat_spn >= 170.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; } /* flg_wrt_crn */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports grid resolution %li x %li, spans %g x %g degrees: [%g <= lat <= %g], [%g <= lon <= %g]\n",nco_prg_nm_get(),fnc_nm,lat_nbr,lon_nbr,lat_spn,lon_spn,lat_min,lat_max,lon_min,lon_max); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input grid-extent: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_xtn_sng(nco_grd_xtn)); /* Write ERWG hints if filenames provided and grid is regional */ char *fl_hnt=NULL; char *fl_hnt_dst=NULL; char *fl_hnt_src=NULL; if(rgr->fl_hnt_dst) fl_hnt=fl_hnt_dst=rgr->fl_hnt_dst; if(rgr->fl_hnt_src) fl_hnt=fl_hnt_src=rgr->fl_hnt_src; if(nco_grd_xtn == nco_grd_xtn_rgn && fl_hnt){ const char *fl_mode="w"; FILE *fp_hnt; /* [fl] Hint file (for ERWG switches) file handle */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s writing ERWG weight-generation regional hint to file %s\n",nco_prg_nm_get(),fnc_nm,fl_hnt); /* Open output file */ if((fp_hnt=fopen(fl_hnt,fl_mode)) == NULL){ (void)fprintf(stderr,"%s: ERROR unable to open hint output file %s\n",nco_prg_nm_get(),fl_hnt); nco_exit(EXIT_FAILURE); } /* end if */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: Opened hint file %s\n",nco_prg_nm_get(),fl_hnt); if(fl_hnt_src) (void)fprintf(fp_hnt,"--src_regional"); if(fl_hnt_dst) (void)fprintf(fp_hnt,"--dst_regional"); rcd=fclose(fp_hnt); if(rcd != 0){ (void)fprintf(stderr,"%s: ERROR unable to close hint output file %s\n",nco_prg_nm_get(),fl_hnt); nco_exit(EXIT_FAILURE); } /* end if */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: Closed hint file %s\n",nco_prg_nm_get(),fl_hnt); } /* !nco_grd_xtn */ /* Diagnose area if necessary 20170510: ALM/CLM "area" is _FillValue=1.0e36f over ocean and total gridcell area in km2 (not multiplied by landfrac) elsewhere Writing this ALM/CLM "area" variable to gridfile, then using with ERWG --user_areas could be disastrous (depending on mask array and interpolation type) On the other hand CAM "area" variable is exactly what we want for gridfile Input areas are considered "untrustworthy" iff they have _and use_ missing value attribute Re-diagnose areas considered untrustworthy so output area array does not contain missing values */ if(flg_wrt_crn && has_mss_val_area){ const double mss_val_dbl=mss_val_area_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(area[idx] == mss_val_dbl) break; if(idx < grd_sz_nbr) use_mss_val_area=True; if(nco_dbg_lvl_get() >= nco_dbg_fl && use_mss_val_area) (void)fprintf(stdout,"%s: INFO %s reports input area field %s is considered untrustworthy because it uses missing values, will diagnose area from cell boundaries instead...\n",nco_prg_nm_get(),fnc_nm,area_nm_in); } /* !has_mss_val_area */ /* 20170511: There remain a handful of cases when input area should be diagnosed not copied These include using ncremap in SGS mode when inferred grids must use sensible area units Otherwise an inferred grid with area [km2] from ALM/CLM might be combined with area [sr] from NCO This would bias ERWG --user_areas produced values by ~10^10 Setting flg_dgn_area ensures inferred area uses [sr] */ const nco_bool flg_dgn_area=rgr->flg_dgn_area; /* [flg] Diagnose rather than copy inferred area */ if(flg_wrt_crn && /* If bounds are available to compute area and ... */ (area_id == NC_MIN_INT || /* Area is not in input file ... */ use_mss_val_area || /* Area is untrustworthy */ flg_dgn_area)){ /* User/application explicitly requests diagnostic area */ /* Not absolutely necessary to diagnose area because ERWG will diagnose and output area itself _unless_ --user_areas option is given */ if(nco_dbg_lvl_get() >= nco_dbg_std && flg_dgn_area) (void)fprintf(stdout,"%s: INFO %s reports diagnosing area from cell boundaries...\n",nco_prg_nm_get(),fnc_nm); if(flg_grd_crv || flg_grd_1D){ /* Area of arbitrary unstructured or curvilinear grids requires spherical trigonometry */ nco_sph_plg_area(rgr,grd_crn_lat,grd_crn_lon,grd_sz_nbr,grd_crn_nbr,area); }else if(flg_grd_2D){ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr;lon_idx++) area[lat_idx*lon_nbr+lon_idx]=fabs(dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*(sin(dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(dgr2rdn*lat_bnd[2*lat_idx]))); /* fabs() ensures positive area in n2s grids */ } /* !flg_grd_2D */ } /* !area_id */ /* ERWG will fail unless grid file has mask variable Use nul-mask (all points included) whenever input mask variable not supplied/detected Define nul-mask true everywhere and overwrite with false below Input mask can be any type and output mask will always be NC_INT */ for(idx=0;idx<grd_sz_nbr;idx++) msk[idx]=1; if(msk_id != NC_MIN_INT){ /* Change missing-value-masked points to 0 integer mask for SCRIP grids (SCRIP has no missing value convention) Input mask can be any type and output mask will always be NC_INT Applications: ALM/CLM mask (landmask) is NC_FLOAT and defines though does not use NC_FLOAT missing value CICE mask (tmask/umask) is NC_FLOAT and defines and uses NC_FLOAT missing value RACMO mask is NC_FLOAT and defines though does not use NC_FLOAT missing value AMSR mask is NC_SHORT and has no missing value GHRSST mask is NC_BYTE and is a multi-valued surface-type flag with missing value == -1b */ if(msk_typ != NC_INT){ if(nco_dbg_lvl_get() == nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s mask variable \"%s\" has odd type = %s. Re-run with higher debugging level for more information.\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,nco_typ_sng(msk_typ)); if(nco_dbg_lvl_get() > nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s mask variable \"%s\" has odd type = %s. Regridding weight generators require a mask variable of type NC_INT to specify points to include/exclude as sources/destinations. Points where the mask variable is zero or the missing value will be excluded (ignored) in regridding, all other points will be included. When inferring gridfiles, NCO assumes the first variable with a \"mask\"-like name (\"mask\", \"Mask\", \"grid_imask\", \"landmask\", or \"tmask\"), or the variable designated by the \"--msk_[src/dst]=msk_nm\" option, is this mask. However the variable \"%s\" in this file is not type NC_INT and so may not be intended as a regridding mask, hence this oh so pleasant informational WARNING. To prevent NCO from interpreting \"%s\" as a regridding mask, specify \"--msk_src=none\" and/or \"--msk_dst=none\", as appropriate. To utilize some other variable as the mask variable, specify \"--msk_src=msk_nm\" and/or \"--msk_dst=msk_nm\", as appropriate. Mask treatment is subtle, and NCO tries to \"do the right thing\". Whether it does is often easiest to discern by visual inspection of the regridded results in a turn-key viewer like Panoply or ncview.\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,nco_typ_sng(msk_typ),msk_nm_in,msk_nm_in); } /* msk_typ */ switch(msk_typ){ case NC_FLOAT: if(has_mss_val_msk){ const float mss_val_flt=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.fp[idx] == mss_val_flt || msk_unn.fp[idx] == 0.0f) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.fp[idx] == 0.0f) msk[idx]=0; } /* !mss_val */ break; case NC_DOUBLE: if(has_mss_val_msk){ const double mss_val_dbl=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.dp[idx] == mss_val_dbl || msk_unn.dp[idx] == 0.0) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.dp[idx] == 0.0) msk[idx]=0; } /* !mss_val */ break; case NC_INT: if(has_mss_val_msk){ const int mss_val_int=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.ip[idx] == mss_val_int || msk_unn.ip[idx] == 0) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.ip[idx] == 0) msk[idx]=0; } /* !mss_val */ break; case NC_SHORT: /* http://stackoverflow.com/questions/208433/how-do-i-write-a-short-literal-in-c */ if(has_mss_val_msk){ const short mss_val_sht=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.sp[idx] == mss_val_sht || msk_unn.sp[idx] == ((short)0)) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.sp[idx] == ((short)0)) msk[idx]=0; /* 20160111: AMSR kludge fxm */ // for(idx=0;idx<grd_sz_nbr;idx++) if(msk[idx] == 1) msk[idx]=0; } /* !mss_val */ break; case NC_BYTE: if(has_mss_val_msk){ const nco_byte mss_val_byt=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.bp[idx] == mss_val_byt || msk_unn.bp[idx] == ((nco_byte)0)) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.bp[idx] == ((nco_byte)0)) msk[idx]=0; /* 20170811: GHRSST kludge? */ } /* !mss_val */ break; default: (void)fprintf(stderr,"%s: ERROR %s mask variable \"%s\" has unsupported type = %s\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,nco_typ_sng(msk_typ)); nco_dfl_case_generic_err(); return NCO_ERR; break; } /* !msk_typ */ if(msk_unn.vp) msk_unn.vp=(void *)nco_free(msk_unn.vp); } /* !msk_id */ if(nco_dbg_lvl_get() >= nco_dbg_scl){ lat_wgt_ttl=0.0; area_ttl=0.0; if(flg_grd_2D){ (void)fprintf(stderr,"%s: INFO %s reports inferred rectangular latitude grid area diagnostics (lat_wgt_ttl and frc_lat_wgt should be valid):\n",nco_prg_nm_get(),fnc_nm); for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt_ttl+=lat_wgt[lat_idx]; }else{ (void)fprintf(stderr,"%s: INFO %s reports inferred unstructured or curvilinear latitude grid area diagnostics (ignore lat_wgt_ttl and frc_lat_wgt):\n",nco_prg_nm_get(),fnc_nm); } /* !flg_grd_2D */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr;lon_idx++) area_ttl+=area[lat_idx*lon_nbr+lon_idx]; (void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_ttl,area_ttl/(4.0*M_PI)); assert(area_ttl > 0.0); /* Protect following assertion since area might be in, e.g., km2 (ELM, RACMO) */ if(flg_area_sr) assert(area_ttl <= 4.0*M_PI); const double eps_rlt_area=1.0e-12; /* [frc] Error tolerance for global area */ if(nco_grd_xtn == nco_grd_xtn_glb){ if(fabs(1.0-area_ttl/(4.0*M_PI)) > eps_rlt_area) (void)fprintf(stdout,"%s: WARNING %s reports area for inferred global grid differs from true global area (4*pi sr) by greater than allowed fraction %g\n",nco_prg_nm_get(),fnc_nm,eps_rlt_area); } /* !nco_grd_xtn_glb */ } /* !dbg */ /* Open grid file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Define dimensions */ /* 20151230 ERWG appears to require presence of corner arrays in grid file even when they are not used (e.g., bilinear) But ERWG will break when corner values are bad. Default is do not write bad corner values. Uncomment next line to write bad corner values. */ /* flg_wrt_crn=True; */ if(flg_wrt_crn) rcd=nco_def_dim(out_id,grd_crn_nm,grd_crn_nbr,&dmn_id_grd_crn); rcd=nco_def_dim(out_id,grd_sz_nm,grd_sz_nbr,&dmn_id_grd_sz); rcd=nco_def_dim(out_id,grd_rnk_nm,grd_rnk_nbr,&dmn_id_grd_rnk); int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; /* Define variables */ (void)nco_def_var(out_id,dmn_sz_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_rnk,&dmn_sz_int_id); /* NB: Too small to deflate */ (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,msk_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_sz,&msk_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lat_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lon_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lon_id,shuffle,deflate,dfl_lvl); if(flg_wrt_crn){ dmn_ids[0]=dmn_id_grd_sz; dmn_ids[1]=dmn_id_grd_crn; (void)nco_def_var(out_id,grd_crn_lat_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_crn_lon_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lon_id,shuffle,deflate,dfl_lvl); } /* !flg_wrt_crn */ /* Define attributes */ aed_sct aed_mtd; char *att_nm; if(strstr(rgr->grd_ttl,"None given")){ const char att_fmt[]="NCO inferred this grid from input file %s"; att_val=(char *)nco_malloc((strlen(att_fmt)+strlen(rgr->fl_in)+1L)*sizeof(char)); sprintf(att_val,att_fmt,rgr->fl_in); }else{ att_val=strdup(rgr->grd_ttl); } /* !grd_ttl */ rcd=nco_char_att_put(out_id,NULL,"title",att_val); rcd=nco_char_att_put(out_id,NULL,"Conventions","SCRIP"); const char usr_cpp[]=TKN2SNG(USER); /* [sng] Hostname from C pre-processor */ rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO"); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ)); rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ)); rcd=nco_char_att_put(out_id,dmn_sz_nm,"long_name","Size(s) of horizontal dimensions (in Fortran storage order for historical reasons)"); if(flg_area_sr){ rcd=nco_char_att_put(out_id,area_nm,"long_name","Solid Angle Subtended on Source Grid"); rcd=nco_char_att_put(out_id,area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm,"units","steradian"); }else{ /* !flg_area_sr */ rcd=nco_char_att_put(out_id,area_nm,"long_name","Area on Source Grid"); // rcd=nco_char_att_put(out_id,area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm,"units",area_unt); } /* !flg_area_sr */ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"standard_name","latitude"); if(ngl_unt){ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,unt_sng,ngl_unt); }else{ /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees"); } /* !ngl_unt */ rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"standard_name","longitude"); if(ngl_unt){ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,unt_sng,ngl_unt); }else{ /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees"); } /* !ngl_unt */ if(flg_wrt_crn){ rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"long_name","Latitude of Grid Cell Vertices"); if(ngl_unt){ rcd=nco_char_att_put(out_id,grd_crn_lat_nm,unt_sng,ngl_unt); }else{ /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees"); } /* !ngl_unt */ rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"long_name","Longitude of Grid Cell Vertices"); if(ngl_unt){ rcd=nco_char_att_put(out_id,grd_crn_lon_nm,unt_sng,ngl_unt); }else{ /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees"); } /* !ngl_unt */ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"bounds",grd_crn_lat_nm); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"bounds",grd_crn_lon_nm); } /* !flg_wrt_crn */ rcd=nco_char_att_put(out_id,msk_nm,"long_name","Binary Integer Mask for Grid"); rcd=nco_char_att_put(out_id,msk_nm,"units","none"); /* Begin data mode */ (void)nco_enddef(out_id); /* Write variables */ dmn_srt[0]=0L; dmn_cnt[0]=grd_rnk_nbr; rcd=nco_put_vara(out_id,dmn_sz_int_id,dmn_srt,dmn_cnt,dmn_sz_int,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,msk_id,dmn_srt,dmn_cnt,msk,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ); if(flg_wrt_crn){ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lat_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lon_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ); } /* !flg_wrt_crn */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); fl_out=rgr->fl_ugrid; if(fl_out){ /* Test UGRID: Documentation: https://github.com/ugrid-conventions/ugrid-conventions Procedure: Create 1x1 skeleton file, infer UGRID and SCRIP grids from it ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr skl=${HOME}/skl_180x360.nc --rgr scrip=${HOME}/grd_180x360_SCRIP.nc --rgr latlon=180,360#lat_typ=eqa#lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr infer --rgr ugrid=${HOME}/grd_ugrid.nc --rgr scrip=${HOME}/grd_scrip.nc ~/skl_180x360.nc ~/foo.nc ncks --cdl -v mesh_node_y ~/grd_ugrid.nc ncks --cdl -v mesh_face_nodes,mesh_face_x,mesh_face_y -d nFaces,0 ~/grd_ugrid.nc ncks --cdl -v mesh_edge_nodes,mesh_edge_x,mesh_edge_y -d nEdges,0 ~/grd_ugrid.nc ncks --cdl -v grid_center_lat,grid_corner_lat -d grid_size,0,,360 -d grid_corners,0,3 ~/grd_scrip.nc ncks --cdl -m -M ~/grd_ugrid.nc */ char *dgx_nm=NULL_CEWI; /* [sng] Name of edge_coordinates x variable */ char *dgy_nm=NULL_CEWI; /* [sng] Name of edge_coordinates y variable */ char *dg_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as edges */ char *dg_nd_nm=NULL_CEWI; /* [sng] Name of edge_node_connectivity variable */ char *fcx_nm=NULL_CEWI; /* [sng] Name of face_coordinates x variable */ char *fcy_nm=NULL_CEWI; /* [sng] Name of face_coordinates y variable */ char *fc_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as faces */ char *fc_nd_nm=NULL_CEWI; /* [sng] Name of face_node_connectivity variable */ char *msh_nm=NULL_CEWI; /* [sng] Name of mesh topology variable */ char *nd_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes */ char *ndx_nm=NULL_CEWI; /* [sng] Name of node_coordinates x variable */ char *ndy_nm=NULL_CEWI; /* [sng] Name of node_coordinates y variable */ char *npe_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes-per-edge */ char *npf_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes-per-face */ double *dgx=NULL_CEWI; /* [dgr] Characteristic longitude of edges */ double *dgy=NULL_CEWI; /* [dgr] Characteristic latitude of edges */ double *fcx=NULL_CEWI; /* [dgr] Characteristic longitude of faces */ double *fcy=NULL_CEWI; /* [dgr] Characteristic latitude of faces */ double *ndx=NULL_CEWI; /* [dgr] Longitude of nodes */ double *ndy=NULL_CEWI; /* [dgr] Latitude of nodes */ int *dg_nd; /* [idx] edge_node_connectivity variable */ int *fc_nd; /* [idx] face_node_connectivity variable */ int dg_nd_id=NC_MIN_INT; /* [id] edge_node_connectivity variable ID */ int dgx_id=NC_MIN_INT; /* [id] Characteristic longitude of edges variable ID */ int dgy_id=NC_MIN_INT; /* [id] Characteristic latitude of edges variable ID */ int dmn_id_dg=NC_MIN_INT; /* [id] Dimension ID for edges */ int dmn_id_fc=NC_MIN_INT; /* [id] Dimension ID for faces */ int dmn_id_nd=NC_MIN_INT; /* [id] Dimension ID for nodes */ int dmn_id_npe=NC_MIN_INT; /* [id] Dimension ID for nodes-per-edge */ int dmn_id_npf=NC_MIN_INT; /* [id] Dimension ID for nodes-per-face */ int fc_nd_id=NC_MIN_INT; /* [id] face_node_connectivity variable ID */ int fcx_id=NC_MIN_INT; /* [id] Characteristic longitude of faces variable ID */ int fcy_id=NC_MIN_INT; /* [id] Characteristic latitude of faces variable ID */ int msh_id=NC_MIN_INT; /* [id] Mesh topology variable ID */ int msh_val=42; /* [id] Mesh topology variable value from Monty Python */ int ndx_id=NC_MIN_INT; /* [id] Longitude of mesh nodes variable ID */ int ndy_id=NC_MIN_INT; /* [id] Latitude of mesh nodes variable ID */ const long fc_nbr=grd_sz_nbr; /* [nbr] Number of faces in mesh */ const long npe_nbr=2; /* [nbr] Number of nodes per edge */ const long npf_nbr=grd_crn_nbr; /* [nbr] Number of nodes per face */ long dg_idx; /* [idx] Counting index for edges */ long dg_nbr=(long)NC_MIN_INT64; /* [nbr] Number of edges in mesh */ long fc_idx; /* [idx] Counting index for faces */ long nd_idx; /* [idx] Counting index for nodes */ long nd_nbr=(long)NC_MIN_INT64; /* [nbr] Number of nodes in mesh */ long srt_idx=0; /* [idx] start_index (C/Fortran) for edge_nodes, face_nodes */ if(!dgx_nm) dgx_nm=(char *)strdup("mesh_edge_x"); if(!dgy_nm) dgy_nm=(char *)strdup("mesh_edge_y"); if(!dg_dmn_nm) dg_dmn_nm=(char *)strdup("nEdges"); if(!fcx_nm) fcx_nm=(char *)strdup("mesh_face_x"); if(!fcy_nm) fcy_nm=(char *)strdup("mesh_face_y"); if(!fc_dmn_nm) fc_dmn_nm=(char *)strdup("nFaces"); if(!dg_nd_nm) dg_nd_nm=(char *)strdup("mesh_edge_nodes"); if(!fc_nd_nm) fc_nd_nm=(char *)strdup("mesh_face_nodes"); if(!msh_nm) msh_nm=(char *)strdup("mesh"); if(!nd_dmn_nm) nd_dmn_nm=(char *)strdup("nNodes"); if(!ndx_nm) ndx_nm=(char *)strdup("mesh_node_x"); if(!ndy_nm) ndy_nm=(char *)strdup("mesh_node_y"); if(!npe_dmn_nm) npe_dmn_nm=(char *)strdup("two"); if(!npf_dmn_nm) npf_dmn_nm=(char *)strdup("maxNodesPerFace"); if(flg_grd_1D){ (void)fprintf(stdout,"%s: ERROR %s UGRID output does not yet support 1D grids\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); }else if(flg_grd_2D){ /* Assume 2D grids are global and comprised of quadrilaterals */ switch(lat_typ){ case nco_grd_lat_fv: /* Currently all 2D grids are converted to the same UGRID representation fxm: Cap grids (e.g., FV) should eventually be written with a real cap, rather than as the "polar teeth" representation currently used. Polar teeth convention allows cap grid to be represented as rectangular on disk However, cap grids are better suited to non-rectangular UGRID meshes */ case nco_grd_lat_eqa: case nco_grd_lat_gss: /* Numbers of unique edges and nodes counted from South Pole (SP) to North Pole (NP) */ dg_nbr=lon_nbr*2+ /* SP: cells_per_lat*unique_edges_per_cell */ (lat_nbr-2)*lon_nbr*2+ /* Mid: lats*cells_per_lat*unique_edges_per_cell */ lon_nbr*1; /* NP: cells_per_lat*unique_edges_per_cell */ nd_nbr=1+lon_nbr*1+ /* SP: SP+cells_per_lat*unique_nodes_per_cell */ (lat_nbr-2)*lon_nbr*1+ /* Mid: lats*cells_per_lat*unique_nodes_per_cell */ 1; /* NP: NP */ break; case nco_grd_lat_unk: case nco_grd_lat_nil: default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ }else if(flg_grd_crv){ (void)fprintf(stdout,"%s: ERROR %s UGRID output does not yet support curvilinear grids\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !flg_grd */ dg_nd=(int *)nco_malloc(dg_nbr*npe_nbr*nco_typ_lng(NC_INT)); dgx=(double *)nco_malloc(dg_nbr*nco_typ_lng(crd_typ)); dgy=(double *)nco_malloc(dg_nbr*nco_typ_lng(crd_typ)); fc_nd=(int *)nco_malloc(fc_nbr*npf_nbr*nco_typ_lng(NC_INT)); fcx=(double *)nco_malloc(fc_nbr*nco_typ_lng(crd_typ)); fcy=(double *)nco_malloc(fc_nbr*nco_typ_lng(crd_typ)); ndx=(double *)nco_malloc(nd_nbr*nco_typ_lng(crd_typ)); ndy=(double *)nco_malloc(nd_nbr*nco_typ_lng(crd_typ)); const long int idx_fst_crn_ll=0; const long int idx_fst_crn_lr=1; const long int idx_fst_crn_ur=2; const long int idx_fst_crn_ul=3; /* Node Ordering: Each interior face requires one new node Node 0 at SP New latitude row moves next node North Add nodes to run West->East */ /* SP */ ndx[0]=lon_crn[0]; /* Longitude degenerate at SP, NP, keep same longitude as corner array */ ndy[0]=lat_crn[0]; /* Mid */ for(nd_idx=1;nd_idx<nd_nbr-1L;nd_idx++){ fc_idx=nd_idx-1L; lat_idx=fc_idx/lon_nbr; lon_idx=fc_idx%lon_nbr; ndx[nd_idx]=lon_crn[lon_idx*grd_crn_nbr+idx_fst_crn_ul]; ndy[nd_idx]=lat_crn[lat_idx*grd_crn_nbr+idx_fst_crn_ul]; } /* !nd_idx */ /* NP */ ndx[nd_nbr-1L]=lon_crn[(lon_nbr-1)*grd_crn_nbr+idx_fst_crn_ul]; ndy[nd_nbr-1L]=lat_crn[(lat_nbr-1)*grd_crn_nbr+idx_fst_crn_ul]; /* Edge Ordering: epf_nbr is number of distinct edges-per-face (incremental, for interior cells) Each additional interior rectangular gridcell requires two new edges: Edge 0 runs South->North for all cells Edge 1 runs West->East for all cells NP row requires only one new edge per face */ /* SP */ const int epf_nbr=2; /* [nbr] Number of distinct edges-per-face (incremental, for interior cells) */ for(fc_idx=0;fc_idx<lon_nbr;fc_idx++){ dg_idx=fc_idx*epf_nbr; /* Edge 0 */ dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx; dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+fc_idx+1L; /* Edge 1 */ dg_nd[(dg_idx+1L)*npe_nbr+0L]=srt_idx+fc_idx+1L; dg_nd[(dg_idx+1L)*npe_nbr+1L]=srt_idx+fc_idx+2L; } /* !fc_idx */ /* Mid */ for(fc_idx=lon_nbr;fc_idx<(lat_nbr-1L)*lon_nbr;fc_idx++){ dg_idx=fc_idx*epf_nbr; /* Edge 0 */ dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx+fc_idx-lon_nbr+1L; dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+fc_idx+1L; /* Edge 1 */ dg_nd[(dg_idx+1L)*npe_nbr+0L]=srt_idx+fc_idx+1L; dg_nd[(dg_idx+1L)*npe_nbr+1L]=srt_idx+fc_idx+2L; } /* !fc_idx */ /* NP */ for(fc_idx=fc_nbr-lon_nbr;fc_idx<fc_nbr;fc_idx++){ /* Only one new edge per face in last row, easiest to count backwards from last edge */ dg_idx=dg_nbr-(fc_nbr-fc_idx); /* NP faces require only only one new edge, Edge 0 */ dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx+fc_idx-lon_nbr+1L; dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+nd_nbr-1L; } /* !fc_idx */ /* SP */ for(fc_idx=0;fc_idx<lon_nbr;fc_idx++){ fc_nd[fc_idx*npf_nbr+0L]=srt_idx+0L; fc_nd[fc_idx*npf_nbr+1L]=srt_idx+fc_idx+2L; /* NB: CCW */ fc_nd[fc_idx*npf_nbr+2L]=srt_idx+fc_idx+1L; /* NB: CCW */ fc_nd[fc_idx*npf_nbr+3L]=mss_val_int_out; } /* !fc_idx */ /* Mid */ for(fc_idx=lon_nbr;fc_idx<fc_nbr-lon_nbr;fc_idx++){ fc_nd[fc_idx*npf_nbr+idx_fst_crn_ll]=srt_idx+fc_idx-lon_nbr+1L; fc_nd[fc_idx*npf_nbr+idx_fst_crn_lr]=srt_idx+fc_idx-lon_nbr+2L; fc_nd[fc_idx*npf_nbr+idx_fst_crn_ur]=srt_idx+fc_idx+2L; fc_nd[fc_idx*npf_nbr+idx_fst_crn_ul]=srt_idx+fc_idx+1L; } /* !fc_idx */ /* NP */ for(fc_idx=fc_nbr-lon_nbr;fc_idx<fc_nbr;fc_idx++){ fc_nd[fc_idx*npf_nbr+0L]=srt_idx+nd_nbr-(fc_nbr-fc_idx)-2L; fc_nd[fc_idx*npf_nbr+1L]=srt_idx+nd_nbr-(fc_nbr-fc_idx)-1L; fc_nd[fc_idx*npf_nbr+2L]=srt_idx+nd_nbr-1L; fc_nd[fc_idx*npf_nbr+3L]=mss_val_int_out; } /* !fc_idx */ /* Characteristic coordinates */ for(dg_idx=0;dg_idx<dg_nbr-1L;dg_idx++){ idx=dg_idx*npe_nbr; dgx[dg_idx]=0.5*(ndx[dg_nd[idx+0L]]+ndx[dg_nd[idx+1L]]); dgy[dg_idx]=0.5*(ndy[dg_nd[idx+0L]]+ndy[dg_nd[idx+1L]]); } /* !dg_idx */ /* Degenerate longitude at SP, NP, causes weird characterisic longitude unless special care taken */ for(fc_idx=0;fc_idx<fc_nbr-1L;fc_idx++){ idx=fc_idx*npf_nbr; if(fc_idx < lon_nbr){ fcx[fc_idx]=0.5*(ndx[fc_nd[idx+1L]]+ndx[fc_nd[idx+2L]]); }else if(fc_idx >= fc_nbr-lon_nbr-1){ fcx[fc_idx]=0.5*(ndx[fc_nd[idx+0L]]+ndx[fc_nd[idx+1L]]); }else if(fc_nd[idx+3L] != mss_val_int_out){ /* fxm for fcx use nco_lon_crn_avg_brnch() and 3-node version too */ fcx[fc_idx]=0.25*(ndx[fc_nd[idx+0L]]+ndx[fc_nd[idx+1L]]+ndx[fc_nd[idx+2L]]+ndx[fc_nd[idx+3L]]); }else{ abort(); } /* !fc_idx */ if(fc_nd[idx+3L] != mss_val_int_out) fcy[fc_idx]=0.25*(ndy[fc_nd[idx+0L]]+ndy[fc_nd[idx+1L]]+ndy[fc_nd[idx+2L]]+ndy[fc_nd[idx+3L]]); else fcy[fc_idx]=0.33*(ndy[fc_nd[idx+0L]]+ndy[fc_nd[idx+1L]]+ndy[fc_nd[idx+2L]]); } /* !fc_idx */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); rcd=nco_def_dim(out_id,dg_dmn_nm,dg_nbr,&dmn_id_dg); rcd=nco_def_dim(out_id,fc_dmn_nm,fc_nbr,&dmn_id_fc); rcd=nco_def_dim(out_id,nd_dmn_nm,nd_nbr,&dmn_id_nd); rcd=nco_def_dim(out_id,npe_dmn_nm,npe_nbr,&dmn_id_npe); rcd=nco_def_dim(out_id,npf_dmn_nm,npf_nbr,&dmn_id_npf); dmn_ids[0]=dmn_id_dg; dmn_ids[1]=dmn_id_npe; rcd=nco_def_var(out_id,dg_nd_nm,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids,&dg_nd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dg_nd_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_fc; dmn_ids[1]=dmn_id_npf; rcd=nco_def_var(out_id,fc_nd_nm,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids,&fc_nd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fc_nd_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,msh_nm,(nc_type)NC_INT,dmn_nbr_0D,(int *)NULL,&msh_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msh_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,ndx_nm,crd_typ,dmn_nbr_1D,&dmn_id_nd,&ndx_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ndx_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,ndy_nm,crd_typ,dmn_nbr_1D,&dmn_id_nd,&ndy_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ndy_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,dgx_nm,crd_typ,dmn_nbr_1D,&dmn_id_dg,&dgx_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dgx_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,dgy_nm,crd_typ,dmn_nbr_1D,&dmn_id_dg,&dgy_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dgy_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,fcx_nm,crd_typ,dmn_nbr_1D,&dmn_id_fc,&fcx_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fcx_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,fcy_nm,crd_typ,dmn_nbr_1D,&dmn_id_fc,&fcy_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fcy_id,shuffle,deflate,dfl_lvl); if(strstr(rgr->grd_ttl,"None given")){ const char att_fmt[]="NCO constructed this UGRID grid from scratch"; att_val=(char *)nco_malloc((strlen(att_fmt)+strlen(rgr->fl_in)+1L)*sizeof(char)); sprintf(att_val,att_fmt); }else{ att_val=strdup(rgr->grd_ttl); } /* !grd_ttl */ rcd=nco_char_att_put(out_id,NULL,"title",att_val); rcd=nco_char_att_put(out_id,NULL,"Conventions","CF-1.6, UGRID-1.0"); rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO"); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,msh_nm,"cf_role","mesh_topology"); rcd=nco_char_att_put(out_id,msh_nm,"standard_name","mesh_topology"); rcd=nco_char_att_put(out_id,msh_nm,"long_name","Topology data"); att_nm=strdup("topology_dimension"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=msh_nm; aed_mtd.id=msh_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&val_two; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,msh_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); aed_mtd.sz=strlen(ndx_nm)+strlen(ndy_nm)+1L; att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR)); (void)sprintf(att_val,"%s %s",ndx_nm,ndy_nm); rcd=nco_char_att_put(out_id,msh_nm,"node_coordinates",att_val); rcd=nco_char_att_put(out_id,msh_nm,"face_node_connectivity",fc_nd_nm); aed_mtd.sz=strlen(fcx_nm)+strlen(fcy_nm)+1L; att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR)); (void)sprintf(att_val,"%s %s",fcx_nm,fcy_nm); rcd=nco_char_att_put(out_id,msh_nm,"face_coordinates",att_val); rcd=nco_char_att_put(out_id,msh_nm,"face_dimension",fc_dmn_nm); rcd=nco_char_att_put(out_id,msh_nm,"edge_node_connectivity",dg_nd_nm); aed_mtd.sz=strlen(dgx_nm)+strlen(dgy_nm)+1L; att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR)); (void)sprintf(att_val,"%s %s",dgx_nm,dgy_nm); rcd=nco_char_att_put(out_id,msh_nm,"edge_coordinates",att_val); rcd=nco_char_att_put(out_id,msh_nm,"edge_dimension",dg_dmn_nm); rcd=nco_char_att_put(out_id,ndx_nm,"standard_name","longitude"); rcd=nco_char_att_put(out_id,ndx_nm,"long_name","Longitude of mesh nodes"); rcd=nco_char_att_put(out_id,ndx_nm,"units","degrees_east"); rcd=nco_char_att_put(out_id,ndy_nm,"standard_name","latitude"); rcd=nco_char_att_put(out_id,ndy_nm,"long_name","Latitude of mesh nodes"); rcd=nco_char_att_put(out_id,ndy_nm,"units","degrees_north"); rcd=nco_char_att_put(out_id,dg_nd_nm,"cf_role","edge_node_connectivity"); rcd=nco_char_att_put(out_id,dg_nd_nm,"long_name","Maps every edge to the two nodes that it connects"); att_nm=strdup("start_index"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=dg_nd_nm; aed_mtd.id=dg_nd_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&val_zero; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,dg_nd_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,fc_nd_nm,"cf_role","face_node_connectivity"); rcd=nco_char_att_put(out_id,fc_nd_nm,"long_name","Maps every face to its corner nodes"); att_nm=strdup("start_index"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=fc_nd_nm; aed_mtd.id=fc_nd_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&val_zero; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,fc_nd_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); att_nm=strdup("_FillValue"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=fc_nd_nm; aed_mtd.id=fc_nd_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&mss_val_int_out; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,fc_nd_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,dgx_nm,"standard_name","longitude"); rcd=nco_char_att_put(out_id,dgx_nm,"long_name","Characteristic longitude of 2D mesh face"); rcd=nco_char_att_put(out_id,dgx_nm,"units","degrees_east"); rcd=nco_char_att_put(out_id,dgy_nm,"standard_name","latitude"); rcd=nco_char_att_put(out_id,dgy_nm,"long_name","Characteristic latitude of 2D mesh face"); rcd=nco_char_att_put(out_id,dgy_nm,"units","degrees_north"); rcd=nco_char_att_put(out_id,fcx_nm,"standard_name","longitude"); rcd=nco_char_att_put(out_id,fcx_nm,"long_name","Characteristic longitude of 2D mesh edge"); rcd=nco_char_att_put(out_id,fcx_nm,"units","degrees_east"); rcd=nco_char_att_put(out_id,fcy_nm,"standard_name","latitude"); rcd=nco_char_att_put(out_id,fcy_nm,"long_name","Characteristic latitude of 2D mesh edge"); rcd=nco_char_att_put(out_id,fcy_nm,"units","degrees_north"); /* Begin data mode */ (void)nco_enddef(out_id); (void)nco_put_vara(out_id,msh_id,dmn_srt,dmn_cnt,&msh_val,(nc_type)NC_INT); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=dg_nbr; dmn_cnt[1]=epf_nbr; (void)nco_put_vara(out_id,dg_nd_id,dmn_srt,dmn_cnt,dg_nd,(nc_type)NC_INT); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=fc_nbr; dmn_cnt[1]=npf_nbr; (void)nco_put_vara(out_id,fc_nd_id,dmn_srt,dmn_cnt,fc_nd,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=nd_nbr; (void)nco_put_vara(out_id,ndx_id,dmn_srt,dmn_cnt,ndx,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=nd_nbr; (void)nco_put_vara(out_id,ndy_id,dmn_srt,dmn_cnt,ndy,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=dg_nbr; (void)nco_put_vara(out_id,dgx_id,dmn_srt,dmn_cnt,dgx,crd_typ); (void)nco_put_vara(out_id,dgy_id,dmn_srt,dmn_cnt,dgy,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=fc_nbr; (void)nco_put_vara(out_id,fcx_id,dmn_srt,dmn_cnt,fcx,crd_typ); (void)nco_put_vara(out_id,fcy_id,dmn_srt,dmn_cnt,fcy,crd_typ); /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); /* Free memory associated with output file */ if(dgx) dgx=(double *)nco_free(dgx); if(dgy) dgy=(double *)nco_free(dgy); if(dg_nd) dg_nd=(int *)nco_free(dg_nd); if(fcx) fcx=(double *)nco_free(fcx); if(fcy) fcy=(double *)nco_free(fcy); if(fc_nd) fc_nd=(int *)nco_free(fc_nd); if(ndx) ndx=(double *)nco_free(ndx); if(ndy) ndy=(double *)nco_free(ndy); /* Free strings */ if(dgx_nm) dgx_nm=(char *)nco_free(dgx_nm); if(dgy_nm) dgy_nm=(char *)nco_free(dgy_nm); if(dg_dmn_nm) dg_dmn_nm=(char *)nco_free(dg_dmn_nm); if(dg_nd_nm) dg_nd_nm=(char *)nco_free(dg_nd_nm); if(fcx_nm) fcx_nm=(char *)nco_free(fcx_nm); if(fcy_nm) fcy_nm=(char *)nco_free(fcy_nm); if(fc_dmn_nm) fc_dmn_nm=(char *)nco_free(fc_dmn_nm); if(fc_nd_nm) fc_nd_nm=(char *)nco_free(fc_nd_nm); if(msh_nm) msh_nm=(char *)nco_free(msh_nm); if(nd_dmn_nm) nd_dmn_nm=(char *)nco_free(nd_dmn_nm); if(ndx_nm) ndx_nm=(char *)nco_free(ndx_nm); if(ndy_nm) ndy_nm=(char *)nco_free(ndy_nm); if(npe_dmn_nm) npe_dmn_nm=(char *)nco_free(npe_dmn_nm); if(npf_dmn_nm) npf_dmn_nm=(char *)nco_free(npf_dmn_nm); } /* !fl_ugrid */ /* Free memory associated with input file */ if(dmn_sz_int) dmn_sz_int=(int *)nco_free(dmn_sz_int); if(msk) msk=(int *)nco_free(msk); if(area) area=(double *)nco_free(area); if(grd_ctr_lat) grd_ctr_lat=(double *)nco_free(grd_ctr_lat); if(grd_ctr_lon) grd_ctr_lon=(double *)nco_free(grd_ctr_lon); if(grd_crn_lat) grd_crn_lat=(double *)nco_free(grd_crn_lat); if(grd_crn_lon) grd_crn_lon=(double *)nco_free(grd_crn_lon); if(lat_bnd) lat_bnd=(double *)nco_free(lat_bnd); if(lat_crn) lat_crn=(double *)nco_free(lat_crn); if(lat_ctr) lat_ctr=(double *)nco_free(lat_ctr); if(lat_ntf) lat_ntf=(double *)nco_free(lat_ntf); if(lat_wgt) lat_wgt=(double *)nco_free(lat_wgt); if(lon_bnd) lon_bnd=(double *)nco_free(lon_bnd); if(lon_crn) lon_crn=(double *)nco_free(lon_crn); if(lon_ctr) lon_ctr=(double *)nco_free(lon_ctr); if(lon_ntf) lon_ntf=(double *)nco_free(lon_ntf); if(vrt_cll) vrt_cll=(int *)nco_free(vrt_cll); if(vrt_lat) vrt_lat=(double *)nco_free(vrt_lat); if(vrt_lon) vrt_lon=(double *)nco_free(vrt_lon); /* Free strings */ if(area_nm_in) area_nm_in=(char *)nco_free(area_nm_in); if(area_unt) area_unt=(char *)nco_free(area_unt); if(bnd_dmn_nm) bnd_dmn_nm=(char *)nco_free(bnd_dmn_nm); if(col_dmn_nm) col_dmn_nm=(char *)nco_free(col_dmn_nm); if(lat_bnd_nm) lat_bnd_nm=(char *)nco_free(lat_bnd_nm); if(lat_dmn_nm) lat_dmn_nm=(char *)nco_free(lat_dmn_nm); if(lat_nm_in) lat_nm_in=(char *)nco_free(lat_nm_in); if(lon_bnd_nm) lon_bnd_nm=(char *)nco_free(lon_bnd_nm); if(lon_dmn_nm) lon_dmn_nm=(char *)nco_free(lon_dmn_nm); if(lon_nm_in) lon_nm_in=(char *)nco_free(lon_nm_in); if(msk_nm_in) msk_nm_in=(char *)nco_free(msk_nm_in); if(ngl_unt) ngl_unt=(char *)nco_free(ngl_unt); if(vrt_cll_nm) vrt_cll_nm=(char *)nco_free(vrt_cll_nm); if(vrt_lat_nm) vrt_lat_nm=(char *)nco_free(vrt_lat_nm); if(vrt_lon_nm) vrt_lon_nm=(char *)nco_free(vrt_lon_nm); return rcd; } /* !nco_grd_nfr() */ double /* O [dgr] Longitude difference (lon_r-lon_l) */ nco_lon_dff_brnch_dgr /* [fnc] Subtract longitudes with branch-cut rules */ (double lon_r, /* I [dgr] Longitude on right of gridcell (subtractor) */ double lon_l) /* I [dgr] Longitude on left of gridcell (subtractee) */ { /* Purpose: Return difference of two longitudes in degrees Assume longitudes are within 180 degrees of eachother Default orientation is monotonically increasing longitude from left to right */ const char fnc_nm[]="nco_lon_dff_brnch_dgr()"; const double lon_dff=lon_r-lon_l; /* [dgr] Longitude difference (lon_r-lon_l) */ if(lon_dff >= 180.0){ (void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff); return lon_dff-360.0; }else if(lon_dff <= -180.0){ return lon_dff+360.0; } /* !lon_dff */ return lon_dff; } /* !nco_lon_dff_brnch_dgr() */ double /* O [rdn] Longitude difference (lon_r-lon_l) */ nco_lon_dff_brnch_rdn /* [fnc] Subtract longitudes with branch-cut rules */ (double lon_r, /* I [rdn] Longitude on right of gridcell (subtractor) */ double lon_l) /* I [rdn] Longitude on left of gridcell (subtractee) */ { /* Purpose: Return difference of two longitudes in radians Assume longitudes are within pi radians of eachother Default orientation is monotonically increasing longitude from left to right */ const char fnc_nm[]="nco_lon_dff_brnch_rdn()"; const double lon_dff=lon_r-lon_l; /* [rdn] Longitude difference (lon_r-lon_l) */ //nco_bool dbg_prn=False; /* [flg] Print warning when longitude difference is suspicious */ /* longitudes on different branch cuts are expected when computing polygon area, so warn only if requested with high debugging level */ if(lon_dff >= M_PI){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff); return lon_dff-M_PI-M_PI; }else if(lon_dff <= -M_PI){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff); return lon_dff+M_PI+M_PI; } /* !lon_dff */ return lon_dff; } /* !nco_lon_dff_brnch_rdn() */ double /* O [dgr] Longitude average */ nco_lon_crn_avg_brnch /* [fnc] Average quadrilateral longitude with branch-cut rules */ (double lon_ll, /* I [dgr] Longitude at lower left of gridcell */ double lon_lr, /* I [dgr] Longitude at lower right of gridcell */ double lon_ur, /* I [dgr] Longitude at upper right of gridcell */ double lon_ul) /* I [dgr] Longitude at upper left of gridcell */ { /* Purpose: Return average of four corner longitudes of quadrilateral Assume longitudes are within 180 degrees of eachother Default orientation is monotonically increasing longitude from left to right WLOG, adjust all longitudes to be on same branch as lon_ll */ const char fnc_nm[]="nco_lon_crn_avg_brnch()"; double lon_dff; /* [dgr] Longitude difference */ lon_dff=lon_lr-lon_ll; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_lr, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_lr,lon_ll,lon_dff); lon_lr-=360.0; }else if(lon_dff <= -180.0){ lon_lr+=360.0; } /* !lon_dff */ lon_dff=lon_ur-lon_ll; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_ur, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_ur,lon_ll,lon_dff); lon_ur-=360.0; }else if(lon_dff <= -180.0){ lon_ur+=360.0; } /* !lon_dff */ lon_dff=lon_ul-lon_ll; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_ul, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_ul,lon_ll,lon_dff); lon_ul-=360.0; }else if(lon_dff <= -180.0){ lon_ul+=360.0; } /* !lon_dff */ return 0.25*(lon_ll+lon_lr+lon_ur+lon_ul); } /* !nco_lon_crn_avg_brnch() */ double /* O [dgr] Longitude average */ nco_lon_ply_avg_brnch_dgr /* [fnc] Average polygon longitude with branch-cut rules */ (double *lon_crn, /* I [dgr] Longitude of gridcell corners */ long lon_nbr) /* I [nbr] Number of vertices in polygon */ { /* Purpose: Return average longitude of polygon vertices, i.e., centroid longitude Assume longitudes are within 180 degrees of one another Default orientation is monotonically increasing longitude from left to right WLOG, adjust all longitudes to be on same branch as lon_ll */ // const char fnc_nm[]="nco_lon_ply_avg_brnch()"; double lon_dff; /* [dgr] Longitude difference */ double lon_avg; /* [dgr] Longitude average */ int lon_idx; /* [idx] Polygon vertex index */ assert(lon_nbr != 0); lon_avg=lon_crn[0]; for(lon_idx=1;lon_idx<lon_nbr;lon_idx++){ lon_avg+=lon_crn[lon_idx]; lon_dff=lon_crn[lon_idx]-lon_crn[0]; if(lon_dff >= 180.0){ lon_avg-=360.0; }else if(lon_dff <= -180.0){ lon_avg+=360.0; } /* !lon_dff */ } /* !lon_idx */ return lon_avg/lon_nbr; } /* !nco_lon_ply_avg_brnch() */ nco_bool /* O [flg] Input corners were CCW */ nco_ccw_chk /* [fnc] Convert quadrilateral gridcell corners to CCW orientation */ (double * const crn_lat, /* [dgr] Latitude corners of gridcell */ double * const crn_lon, /* [dgr] Latitude corners of gridcell */ const int crn_nbr, /* [nbr] Number of corners per gridcell */ int idx_ccw, /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */ const int rcr_lvl) /* [nbr] Recursion level */ { /* Purpose: Determine whether corner vertices are oriented CCW If not, alter order so they are returned in CCW order Function can call itself, and rcr_lvl indicates recursion level: rcr_lvl=1: Called by host code, i.e., nco_grd_nfr() rcr_lvl=2: Called by itself, i.e., nco_ccw_chk() Assumptions: Quadrilateral vertices are already corrected to obey branch-cut rules, i.e., all vertices are on "same side" of dateline or Greenwich as appropriate Algorithm: Start crn_idx=0, i.e., quadrilateral LL corner Vector A runs from crn_idx=0 to crn_idx=1, i.e., quadrilateral LL->LR Vector B runs from crn_idx=1 to crn_idx=2, i.e., quadrilateral LR->UR Compute cross-product C = A x B C is normal to plane containing A and B Dot-product of C with radial vector to head A = tail B is positive if A and B are CCW if(ABC is CCW){ if(CDA is CCW) Done else Copy D:=A (make CDA degenerate, triangularize quadrilateral) endif }else(ABC is not CCW){ Assume entire quadrilateral is CW Take mirror image of quadrilateral by switching B with D If(new ABC is CCW){ If(CDA is CCW) Done else Copy D:=A (make CDA degenerate, triangularize quadrilateral) endif }else{ Fail (return False, meaning point should be masked) } All cases return True (i.e., CCW) from rcr_lvl=1 except last Last case returns False, and calling code should mask such an aberrant point */ const char fnc_nm[]="nco_ccw_chk()"; /* MSVC compiler chokes unless array size is compile-time constant */ const int CRN_NBR_MSVC=4; double sin_lat[CRN_NBR_MSVC]; double sin_lon[CRN_NBR_MSVC]; double cos_lat[CRN_NBR_MSVC]; double cos_lon[CRN_NBR_MSVC]; double A_tail_x,A_tail_y,A_tail_z; double A_head_x,A_head_y,A_head_z; double A_x,A_y,A_z; double B_tail_x,B_tail_y,B_tail_z; double B_head_x,B_head_y,B_head_z; double B_x,B_y,B_z; double C_x,C_y,C_z; double R_x,R_y,R_z; double lat_rdn; double lon_rdn; double dot_prd; int crn_idx; /* [idx] Corner idx */ int A_tail_idx,A_head_idx; int B_tail_idx,B_head_idx; nco_bool flg_ccw; /* [flg] Input is CCW */ assert(crn_nbr == CRN_NBR_MSVC); for(crn_idx=0;crn_idx<crn_nbr;crn_idx++){ lat_rdn=crn_lat[crn_idx]*M_PI/180.0; lon_rdn=crn_lon[crn_idx]*M_PI/180.0; sin_lat[crn_idx]=sin(lat_rdn); cos_lat[crn_idx]=cos(lat_rdn); sin_lon[crn_idx]=sin(lon_rdn); cos_lon[crn_idx]=cos(lon_rdn); } /* !crn_idx */ /* Calls from host code (i.e., nco_grd_nfr()) start at lower-left of quadrilateral ABCD = Point A = vertex 0 Calls from self can start from quadrilateral Point A or C To check triangle CDA, start at upper-right of quadrilateral ABCD = Point C = vertex 2 */ A_tail_idx=idx_ccw; A_head_idx=B_tail_idx=(A_tail_idx+1)%crn_nbr; B_head_idx=(B_tail_idx+1)%crn_nbr; A_tail_x=cos_lat[A_tail_idx]*cos_lon[A_tail_idx]; A_tail_y=cos_lat[A_tail_idx]*sin_lon[A_tail_idx]; A_tail_z=sin_lat[A_tail_idx]; A_head_x=B_tail_x=R_x=cos_lat[A_head_idx]*cos_lon[A_head_idx]; A_head_y=B_tail_y=R_y=cos_lat[A_head_idx]*sin_lon[A_head_idx]; A_head_z=B_tail_z=R_z=sin_lat[A_head_idx]; B_head_x=cos_lat[B_head_idx]*cos_lon[B_head_idx]; B_head_y=cos_lat[B_head_idx]*sin_lon[B_head_idx]; B_head_z=sin_lat[B_head_idx]; A_x=A_head_x-A_tail_x; A_y=A_head_y-A_tail_y; A_z=A_head_z-A_tail_z; B_x=B_head_x-B_tail_x; B_y=B_head_y-B_tail_y; B_z=B_head_z-B_tail_z; /* Cross-Product C = A x B */ C_x=A_y*B_z-B_y*A_z; C_y=-A_x*B_z+B_x*A_z; C_z=A_x*B_y-B_x*A_y; /* Dot-Product R dot C */ dot_prd=C_x*R_x+C_y*R_y+C_z*R_z; if(dot_prd > 0.0) flg_ccw=True; else flg_ccw=False; if(flg_ccw && crn_nbr == 4 && rcr_lvl == 1){ /* Original ABC is CCW, now check CDA */ idx_ccw=2; flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1); if(!flg_ccw){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports triangle ABC is and CDA is not CCW in quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Setting D:=A to triangularize quadrilateral.\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd); /* Triangularize quadrilateral D:=A */ /* 20210411: From 2016 until today, nco_ccw_chk() overwrote fourth (UL) with first (LL) corner right here even when flg_ccw was True :( */ crn_lat[3]=crn_lat[0]; crn_lon[3]=crn_lon[0]; return True; } /* !flg_ccw */ }else if(!flg_ccw && crn_nbr == 4 && rcr_lvl == 1){ /* Original ABC is not CCW 20160124: Simplistic fix: reverse gridpoint order This only works for quadrilaterals without degenerate points */ double crn_tmp; if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: INFO %s reports triangle ABC is non-CCW in quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Mirror-imaging...\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd); crn_tmp=crn_lat[1]; crn_lat[1]=crn_lat[3]; crn_lat[3]=crn_tmp; crn_tmp=crn_lon[1]; crn_lon[1]=crn_lon[3]; crn_lon[3]=crn_tmp; /* Check new triangle ABC */ idx_ccw=0; flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1); if(flg_ccw){ /* Inverted ABC is CCW, now check CDA */ idx_ccw=2; flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1); if(flg_ccw){ return True; }else{ if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: INFO %s reports triangle ABC is CCW after inversion, but triangle CDA is not at quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Setting D:=A to triangularize quadrilateral.\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd); /* Triangularize quadrilateral D:=A */ crn_lat[3]=crn_lat[0]; crn_lon[3]=crn_lon[0]; return True; } /* flg_ccw */ }else{ /* Original and Inverted ABC are not CCW */ if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports triangle ABC remains non-CCW after first inversion\n",nco_prg_nm_get(),fnc_nm); return False; } /* !flg_ccw */ } /* flg_ccw */ return flg_ccw; } /* !nco_ccw_chk() */
#include "nco_rgr.h" /* Regridding */ extern double min_dbl(double a, double b); extern double max_dbl(double a, double b); inline double min_dbl(double a, double b){return (a < b) ? a : b;} inline double max_dbl(double a, double b){return (a > b) ? a : b;} int /* O [enm] Return code */ nco_rgr_ctl /* [fnc] Control regridding logic */ (rgr_sct * const rgr, /* I/O [sct] Regridding structure */ trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */ { /* Purpose: Control regridding logic */ int rcd=NCO_NOERR; const char fnc_nm[]="nco_rgr_ctl()"; nco_bool flg_grd=False; /* [flg] Create SCRIP-format grid file */ nco_bool flg_map=False; /* [flg] Create ESMF-format mapfile */ nco_bool flg_nfr=False; /* [flg] Infer SCRIP-format grid file */ nco_bool flg_smf=False; /* [flg] ESMF regridding (unused) */ nco_bool flg_s1d=False; /* [flg] Unpack sparse-1D CLM/ELM variables */ nco_bool flg_tps=False; /* [flg] Tempest regridding (unused) */ nco_bool flg_vrt=False; /* [flg] Interpolate to new vertical grid */ nco_bool flg_wgt=False; /* [flg] Regrid with external weights */ /* Main control branching occurs here Branching complexity and utility will increase as regridding features are added */ if(rgr->flg_grd) flg_grd=True; if(rgr->flg_grd_src && rgr->flg_grd_dst && rgr->flg_wgt) flg_map=True; if(rgr->flg_nfr) flg_nfr=True; if(rgr->flg_wgt && !(rgr->flg_grd_src && rgr->flg_grd_dst)) flg_wgt=True; if(rgr->flg_s1d) flg_s1d=True; if(rgr->fl_vrt) flg_vrt=True; assert(!flg_smf); assert(!flg_tps); /* Create SCRIP-format grid file */ if(flg_grd) rcd=nco_grd_mk(rgr); /* Create ESMF-format map file */ if(flg_map) rcd=nco_map_mk(rgr); /* Infer SCRIP-format grid file from data file */ if(flg_nfr) rcd=nco_grd_nfr(rgr); /* Interpolate data file to new vertical grid */ if(flg_vrt) rcd=nco_ntp_vrt(rgr,trv_tbl); /* Unpack sparse-1D CLM/ELM variables into full file */ if(flg_s1d) rcd=nco_s1d_unpack(rgr,trv_tbl); /* Regrid data horizontally using weights from mapping file */ if(flg_wgt) rcd=nco_rgr_wgt(rgr,trv_tbl); /* Regrid using ESMF library 20150701: On-line weight generation with ESMF never worked well and was abandoned */ if(flg_smf){ #ifdef ENABLE_ESMF (void)fprintf(stderr,"%s: %s calling nco_rgr_esmf() to generate and apply regridding map\n",nco_prg_nm_get(),fnc_nm); rcd=nco_rgr_esmf(rgr); /* Close output and free dynamic memory */ (void)nco_fl_out_cls(rgr->fl_out,rgr->fl_out_tmp,rgr->out_id); #else /* !ENABLE_ESMF */ (void)fprintf(stderr,"%s: ERROR %s reports attempt to use ESMF regridding without built-in support. Re-configure with --enable_esmf.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); #endif /* !ENABLE_ESMF */ } /* !flg_smf */ /* Regrid using TempestRemap regridding 20180314: Weight generation with Tempest is implemented off-line via ncremap, not internally on-line However, do not deprecate this since TempestRemap2 has a library that could be accessed on-line */ if(flg_tps) rcd=nco_rgr_tps(rgr); return rcd; } /* end nco_rgr_ctl() */ rgr_sct * /* O [sct] Pointer to free'd regridding structure */ nco_rgr_free /* [fnc] Deallocate regridding structure */ (rgr_sct *rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Free all dynamic memory in regridding structure */ /* free() standalone command-line arguments */ if(rgr->cmd_ln) rgr->cmd_ln=(char *)nco_free(rgr->cmd_ln); if(rgr->grd_ttl) rgr->grd_ttl=(char *)nco_free(rgr->grd_ttl); if(rgr->fl_grd_src) rgr->fl_grd_src=(char *)nco_free(rgr->fl_grd_src); if(rgr->fl_grd_dst) rgr->fl_grd_dst=(char *)nco_free(rgr->fl_grd_dst); if(rgr->fl_hrz) rgr->fl_hrz=(char *)nco_free(rgr->fl_hrz); if(rgr->fl_in) rgr->fl_in=(char *)nco_free(rgr->fl_in); if(rgr->fl_map) rgr->fl_map=(char *)nco_free(rgr->fl_map); if(rgr->fl_msh) rgr->fl_msh=(char *)nco_free(rgr->fl_msh); if(rgr->fl_out) rgr->fl_out=(char *)nco_free(rgr->fl_out); if(rgr->fl_out_tmp) rgr->fl_out_tmp=(char *)nco_free(rgr->fl_out_tmp); if(rgr->fl_vrt) rgr->fl_vrt=(char *)nco_free(rgr->fl_vrt); if(rgr->var_nm) rgr->var_nm=(char *)nco_free(rgr->var_nm); if(rgr->xtn_var) rgr->xtn_var=(char **)nco_sng_lst_free(rgr->xtn_var,rgr->xtn_nbr); /* free() strings associated with grid properties */ if(rgr->fl_grd) rgr->fl_grd=(char *)nco_free(rgr->fl_grd); if(rgr->fl_hnt_dst) rgr->fl_hnt_dst=(char *)nco_free(rgr->fl_hnt_dst); if(rgr->fl_hnt_src) rgr->fl_hnt_src=(char *)nco_free(rgr->fl_hnt_src); if(rgr->fl_skl) rgr->fl_skl=(char *)nco_free(rgr->fl_skl); if(rgr->fl_ugrid) rgr->fl_ugrid=(char *)nco_free(rgr->fl_ugrid); /* Tempest */ if(rgr->drc_tps) rgr->drc_tps=(char *)nco_free(rgr->drc_tps); /* free() memory used to construct KVMs */ if(rgr->rgr_nbr > 0) rgr->rgr_arg=nco_sng_lst_free(rgr->rgr_arg,rgr->rgr_nbr); /* free() memory copied from KVMs */ if(rgr->area_nm) rgr->area_nm=(char *)nco_free(rgr->area_nm); if(rgr->bnd_nm) rgr->bnd_nm=(char *)nco_free(rgr->bnd_nm); if(rgr->bnd_tm_nm) rgr->bnd_tm_nm=(char *)nco_free(rgr->bnd_tm_nm); if(rgr->col_nm_in) rgr->col_nm_in=(char *)nco_free(rgr->col_nm_in); if(rgr->col_nm_out) rgr->col_nm_out=(char *)nco_free(rgr->col_nm_out); if(rgr->frc_nm) rgr->frc_nm=(char *)nco_free(rgr->frc_nm); if(rgr->ilev_nm_in) rgr->ilev_nm_in=(char *)nco_free(rgr->ilev_nm_in); if(rgr->ilev_nm_out) rgr->ilev_nm_out=(char *)nco_free(rgr->ilev_nm_out); if(rgr->lat_bnd_nm) rgr->lat_bnd_nm=(char *)nco_free(rgr->lat_bnd_nm); if(rgr->lat_nm_in) rgr->lat_nm_in=(char *)nco_free(rgr->lat_nm_in); if(rgr->lat_nm_out) rgr->lat_nm_out=(char *)nco_free(rgr->lat_nm_out); if(rgr->lat_vrt_nm) rgr->lat_vrt_nm=(char *)nco_free(rgr->lat_vrt_nm); if(rgr->lat_wgt_nm) rgr->lat_wgt_nm=(char *)nco_free(rgr->lat_wgt_nm); if(rgr->lev_nm_in) rgr->lev_nm_in=(char *)nco_free(rgr->lev_nm_in); if(rgr->lev_nm_out) rgr->lev_nm_out=(char *)nco_free(rgr->lev_nm_out); if(rgr->lon_bnd_nm) rgr->lon_bnd_nm=(char *)nco_free(rgr->lon_bnd_nm); if(rgr->lon_nm_in) rgr->lon_nm_in=(char *)nco_free(rgr->lon_nm_in); if(rgr->lon_nm_out) rgr->lon_nm_out=(char *)nco_free(rgr->lon_nm_out); if(rgr->lon_vrt_nm) rgr->lon_vrt_nm=(char *)nco_free(rgr->lon_vrt_nm); if(rgr->msk_nm) rgr->msk_nm=(char *)nco_free(rgr->msk_nm); if(rgr->plev_nm_in) rgr->plev_nm_in=(char *)nco_free(rgr->plev_nm_in); if(rgr->vrt_nm) rgr->vrt_nm=(char *)nco_free(rgr->vrt_nm); /* Lastly, free() regrid structure itself */ if(rgr) rgr=(rgr_sct *)nco_free(rgr); return rgr; } /* end nco_rgr_free() */ rgr_sct * /* O [sct] Regridding structure */ nco_rgr_ini /* [fnc] Initialize regridding structure */ (const char * const cmd_ln, /* I [sng] Command-line */ const int in_id, /* I [id] Input netCDF file ID */ char **rgr_arg, /* [sng] Regridding arguments */ const int rgr_arg_nbr, /* [nbr] Number of regridding arguments */ char * const rgr_in, /* I [sng] File containing fields to be regridded */ char * const rgr_out, /* I [sng] File containing regridded fields */ char * const rgr_grd_src, /* I [sng] File containing input grid */ char * const rgr_grd_dst, /* I [sng] File containing destination grid */ char * const rgr_hrz, /* I [sng] File containing horizontal coordinate grid */ char * const rgr_map, /* I [sng] File containing mapping weights from source to destination grid */ char * const rgr_var, /* I [sng] Variable for special regridding treatment */ char * const rgr_vrt, /* I [sng] File containing vertical coordinate grid */ const double wgt_vld_thr, /* I [frc] Weight threshold for valid destination value */ char **xtn_var, /* [sng] I Extensive variables */ const int xtn_nbr) /* [nbr] I Number of extensive variables */ { /* Purpose: Initialize regridding structure */ const char fnc_nm[]="nco_rgr_ini()"; rgr_sct *rgr; /* Allocate */ rgr=(rgr_sct *)nco_malloc(sizeof(rgr_sct)); /* Initialize variables directly or indirectly set via command-line (except for key-value arguments) */ rgr->cmd_ln=strdup(cmd_ln); /* [sng] Command-line */ rgr->flg_usr_rqs=False; /* [flg] User requested regridding */ rgr->out_id=int_CEWI; /* [id] Output netCDF file ID */ rgr->in_id=in_id; /* [id] Input netCDF file ID */ rgr->rgr_arg=rgr_arg; /* [sng] Regridding arguments */ rgr->rgr_nbr=rgr_arg_nbr; /* [nbr] Number of regridding arguments */ rgr->drc_tps=NULL; /* [sng] Directory where Tempest grids, meshes, and weights are stored */ rgr->flg_grd_src= rgr_grd_src ? True : False; /* [flg] User-specified input grid */ rgr->fl_grd_src=rgr_grd_src; /* [sng] File containing input grid */ rgr->flg_grd_dst= rgr_grd_dst ? True : False; /* [flg] User-specified destination grid */ rgr->fl_grd_dst=rgr_grd_dst; /* [sng] File containing destination grid */ rgr->fl_in=rgr_in; /* [sng] File containing fields to be regridded */ rgr->fl_out=rgr_out; /* [sng] File containing regridded fields */ rgr->fl_out_tmp=NULL_CEWI; /* [sng] Temporary file containing regridded fields */ rgr->flg_wgt= rgr_map ? True : False; /* [flg] User-specified mapping weights */ rgr->fl_map=rgr_map; /* [sng] File containing mapping weights from source to destination grid */ rgr->fl_hrz=rgr_hrz; /* [sng] [sng] File containing horizontal coordinate grid (for S1D) */ rgr->fl_vrt=rgr_vrt; /* [sng] [sng] File containing vertical coordinate grid */ rgr->var_nm=rgr_var; /* [sng] Variable for special regridding treatment */ rgr->xtn_var=xtn_var; /* [sng] Extensive variables */ rgr->xtn_nbr=xtn_nbr; /* [nbr] Number of extensive variables */ /* Did user explicitly request regridding? */ if(rgr_arg_nbr > 0 || rgr_grd_src != NULL || rgr_grd_dst != NULL || rgr_map != NULL || rgr_vrt != NULL) rgr->flg_usr_rqs=True; /* Initialize arguments after copying */ if(!rgr->fl_out) rgr->fl_out=(char *)strdup("/data/zender/rgr/rgr_out.nc"); if(!rgr->fl_grd_dst) rgr->fl_grd_dst=(char *)strdup("/data/zender/scrip/grids/remap_grid_T42.nc"); // if(!rgr->var_nm) rgr->var_nm=(char *)strdup("ORO"); if(nco_dbg_lvl_get() >= nco_dbg_crr){ (void)fprintf(stderr,"%s: INFO %s reports ",nco_prg_nm_get(),fnc_nm); (void)fprintf(stderr,"flg_usr_rqs = %d, ",rgr->flg_usr_rqs); (void)fprintf(stderr,"rgr_nbr = %d, ",rgr->rgr_nbr); (void)fprintf(stderr,"fl_grd_src = %s, ",rgr->fl_grd_src ? rgr->fl_grd_src : "NULL"); (void)fprintf(stderr,"fl_grd_dst = %s, ",rgr->fl_grd_dst ? rgr->fl_grd_dst : "NULL"); (void)fprintf(stderr,"fl_hrz = %s, ",rgr->fl_hrz ? rgr->fl_hrz : "NULL"); (void)fprintf(stderr,"fl_in = %s, ",rgr->fl_in ? rgr->fl_in : "NULL"); (void)fprintf(stderr,"fl_out = %s, ",rgr->fl_out ? rgr->fl_out : "NULL"); (void)fprintf(stderr,"fl_out_tmp = %s, ",rgr->fl_out_tmp ? rgr->fl_out_tmp : "NULL"); (void)fprintf(stderr,"fl_map = %s, ",rgr->fl_map ? rgr->fl_map : "NULL"); (void)fprintf(stderr,"fl_vrt = %s, ",rgr->fl_vrt ? rgr->fl_vrt : "NULL"); (void)fprintf(stderr,"\n"); } /* endif dbg */ /* Flags */ if(wgt_vld_thr == NC_MIN_DOUBLE){ rgr->flg_rnr=False; }else if(wgt_vld_thr >= 0.0 && wgt_vld_thr <= 1.0){ /* NB: Weight thresholds of 0.0 or nearly zero can lead to underflow or divide-by-zero errors */ // const double wgt_vld_thr_min=1.0e-10; /* [frc] Minimum weight threshold for valid destination value */ rgr->flg_rnr=True; rgr->wgt_vld_thr=wgt_vld_thr; }else{ (void)fprintf(stderr,"%s: ERROR weight threshold must be in [0.0,1.0] and user supplied wgt_vld_thr = %g\n",nco_prg_nm_get(),wgt_vld_thr); nco_exit(EXIT_FAILURE); } /* endif */ /* Parse extended kvm options */ char *sng_fnl=NULL; int cnv_nbr; /* [nbr] Number of elements converted by sscanf() */ int rgr_var_idx; /* [idx] Index over rgr_lst (i.e., all names explicitly specified in all "--rgr var1[,var2]=val" options) */ int rgr_var_nbr=0; kvm_sct *rgr_lst=NULL; /* [sct] List of all regrid specifications */ if(rgr_arg_nbr > 0){ /* Join arguments together */ sng_fnl=nco_join_sng(rgr_arg,rgr_arg_nbr); rgr_lst=nco_arg_mlt_prs(sng_fnl); if(sng_fnl) sng_fnl=(char *)nco_free(sng_fnl); /* Count number of keys */ for(rgr_var_idx=0;(rgr_lst+rgr_var_idx)->key;rgr_var_idx++,rgr_var_nbr++);/* !rgr_var_idx */ } /* !rgr_arg_nbr */ /* NULL-initialize key-value properties required for string variables */ rgr->area_nm=NULL; /* [sng] Name of variable containing gridcell area */ rgr->bnd_nm=NULL; /* [sng] Name of dimension to employ for spatial bounds */ rgr->bnd_tm_nm=NULL; /* [sng] Name of dimension to employ for temporal bounds */ rgr->col_nm_in=NULL; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ rgr->col_nm_out=NULL; /* [sng] Name of horizontal spatial output dimension on unstructured grid */ rgr->frc_nm=NULL; /* [sng] Name of variable containing gridcell fraction */ rgr->ilev_nm_in=NULL; /* [sng] Name of input dimension to recognize as vertical dimension at layer interfaces */ rgr->ilev_nm_out=NULL; /* [sng] Name of output vertical dimension at layer interfaces */ rgr->lat_bnd_nm=NULL; /* [sng] Name of rectangular boundary variable for latitude */ rgr->lat_dmn_nm=NULL; /* [sng] Name of latitude dimension in inferred grid */ rgr->lat_nm_in=NULL; /* [sng] Name of input dimension to recognize as latitude */ rgr->lat_nm_out=NULL; /* [sng] Name of output dimension for latitude */ rgr->lat_vrt_nm=NULL; /* [sng] Name of non-rectangular boundary variable for latitude */ rgr->lat_wgt_nm=NULL; /* [sng] Name of variable containing latitude weights */ rgr->lev_nm_in=NULL; /* [sng] Name of input dimension to recognize as vertical dimension at layer midpoints */ rgr->lev_nm_out=NULL; /* [sng] Name of output vertical dimension at layer midpoints */ rgr->lon_bnd_nm=NULL; /* [sng] Name of rectangular boundary variable for longitude */ rgr->lon_dmn_nm=NULL; /* [sng] Name of longitude dimension in inferred grid */ rgr->lon_nm_in=NULL; /* [sng] Name of dimension to recognize as longitude */ rgr->lon_nm_out=NULL; /* [sng] Name of output dimension for longitude */ rgr->lon_vrt_nm=NULL; /* [sng] Name of non-rectangular boundary variable for longitude */ rgr->msk_nm=NULL; /* [sng] Name of variable containing destination mask */ rgr->plev_nm_in=NULL; /* [sng] Name of input variable recognize as pure-pressure coordinate */ rgr->sgs_frc_nm=NULL; /* [sng] Name of variable sub-gridscale fraction */ rgr->sgs_msk_nm=NULL; /* [sng] Name of variable sub-gridscale mask */ rgr->vrt_nm=NULL; /* [sng] Name of dimension to employ for vertices */ /* Initialize key-value properties used in grid and weight generation */ rgr->area_mth=1; /* [enm] Method to compute grid cell area */ rgr->edg_typ=nco_edg_nil; /* [enm] Edge/Arc-type for triangle edges */ rgr->fl_grd=NULL; /* [sng] Name of SCRIP grid file to create */ rgr->fl_hnt_dst=NULL; /* [sng] ERWG hint destination */ rgr->fl_hnt_src=NULL; /* [sng] ERWG hint source */ rgr->fl_msh=NULL; /* [sng] Name of SCRIP intersection mesh file to create */ rgr->fl_skl=NULL; /* [sng] Name of skeleton data file to create */ rgr->fl_ugrid=NULL; /* [sng] Name of UGRID grid file to create */ rgr->flg_add_fll=False; /* [flg] Add _FillValue to fields with empty destination cells */ rgr->flg_area_out=True; /* [flg] Add area to output */ rgr->flg_cf_units=False; /* [flg] Generate CF-compliant (breaks ERWG 7.1.0r-) units fields in SCRIP-format grid files */ rgr->flg_cll_msr=True; /* [flg] Add cell_measures attribute */ rgr->flg_crv=False; /* [flg] Use curvilinear coordinates */ rgr->flg_dgn_area=False; /* [flg] Diagnose rather than copy inferred area */ rgr->flg_dgn_bnd=False; /* [flg] Diagnose rather than copy inferred bounds */ rgr->flg_erwg_units=True; /* [flg] Generate ERWG 7.1.0r-compliant SCRIP-format grid files */ rgr->flg_grd=False; /* [flg] Create SCRIP-format grid file */ rgr->flg_msk_apl=False; /* [flg] Apply msk_out to variables after regridding */ rgr->flg_msk_out=False; /* [flg] Add mask to output */ rgr->flg_nfr=False; /* [flg] Infer SCRIP-format grid file */ rgr->flg_s1d=False; /* [flg] Unpack sparse-1D CLM/ELM variables */ rgr->flg_stg=True; /* [flg] Write staggered grid with FV output */ rgr->grd_ttl=strdup("None given (supply with --rgr grd_ttl=\"Grid Title\")"); /* [enm] Grid title */ rgr->grd_typ=nco_grd_2D_eqa; /* [enm] Grid type */ rgr->idx_dbg=0; /* [idx] Index of gridcell for debugging */ rgr->lat_drc=nco_grd_lat_drc_s2n; /* [enm] Latitude grid direction */ rgr->lat_typ=nco_grd_lat_eqa; /* [enm] Latitude grid type */ rgr->lon_typ=nco_grd_lon_Grn_ctr; /* [enm] Longitude grid type */ rgr->lat_nbr=180; /* [nbr] Number of latitudes in destination grid */ rgr->lon_nbr=360; /* [nbr] Number of longitudes in destination grid */ rgr->lat_crv=0.0; /* [dgr] Latitudinal curvilinearity */ rgr->lon_crv=0.0; /* [dgr] Longitudinal curvilinearity */ rgr->lat_sth=NC_MAX_DOUBLE; /* [dgr] Latitude of southern edge of grid */ rgr->lon_wst=NC_MAX_DOUBLE; /* [dgr] Longitude of western edge of grid */ rgr->lat_nrt=NC_MAX_DOUBLE; /* [dgr] Latitude of northern edge of grid */ rgr->lon_est=NC_MAX_DOUBLE; /* [dgr] Longitude of eastern edge of grid */ rgr->msk_var=NULL; /* [sng] Mask-template variable */ rgr->ply_tri_mth=nco_ply_tri_mth_csz; /* [enm] Polygon-to-triangle decomposition method */ rgr->sgs_nrm=1.0; /* [sng] Sub-gridscale normalization */ rgr->tst=0L; /* [enm] Generic key for testing (undocumented) */ rgr->ntp_mth=nco_ntp_log; /* [enm] Interpolation method */ rgr->xtr_mth=nco_xtr_fll_ngh; /* [enm] Extrapolation method */ rgr->xtr_nsp=8; /* [sng] Extrapolation number of source points */ rgr->xtr_xpn=2.0; /* [sng] Exponent of distance in extrapolation (absolute value) */ rgr->wgt_typ=nco_wgt_con; /* [enm] Weight generation method */ /* Parse key-value properties */ char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */ for(rgr_var_idx=0;rgr_var_idx<rgr_var_nbr;rgr_var_idx++){ if(!strcmp(rgr_lst[rgr_var_idx].key,"grid") || !strcasecmp(rgr_lst[rgr_var_idx].key,"scrip")){ rgr->fl_grd=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_grd=True; continue; } /* !grid */ if(!strcmp(rgr_lst[rgr_var_idx].key,"hnt_dst") || !strcmp(rgr_lst[rgr_var_idx].key,"fl_hnt_dst")){ rgr->fl_hnt_dst=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !hnt_dst */ if(!strcmp(rgr_lst[rgr_var_idx].key,"hnt_src") || !strcmp(rgr_lst[rgr_var_idx].key,"fl_hnt_src")){ rgr->fl_hnt_src=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !hnt_src */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_var") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_var") || !strcmp(rgr_lst[rgr_var_idx].key,"mask") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_variable")){ rgr->msk_var=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_msk_out=True; continue; } /* !msk_var */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msh") || !strcmp(rgr_lst[rgr_var_idx].key,"mesh")){ rgr->fl_msh=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !msh */ if(!strcmp(rgr_lst[rgr_var_idx].key,"skl")){ rgr->fl_skl=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_grd=True; continue; } /* !skl */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"ugrid")){ rgr->fl_ugrid=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_nfr=True; continue; } /* !ugrid */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"fl_hrz") || !strcasecmp(rgr_lst[rgr_var_idx].key,"hrz")){ rgr->fl_hrz=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !hrz */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"fl_vrt") || !strcasecmp(rgr_lst[rgr_var_idx].key,"vrt")){ rgr->fl_vrt=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !vrt */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_area") || !strcmp(rgr_lst[rgr_var_idx].key,"no_area_out")){ rgr->flg_area_out=False; continue; } /* !area */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_msk") || !strcmp(rgr_lst[rgr_var_idx].key,"no_msk_out") || !strcmp(rgr_lst[rgr_var_idx].key,"no_mask") || !strcmp(rgr_lst[rgr_var_idx].key,"no_mask_out")){ rgr->flg_msk_out=False; continue; } /* !msk */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_apl") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_apply")){ rgr->flg_msk_apl=True; /* Ensure masked fields regridded with TR maps have _FillValue to guarantee BFB arithmetic with masked fields regridded with other maps that adhere to SCRIP/ESMF mask rules */ rgr->flg_add_fll=True; continue; } /* !msk_apl */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_out") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_out")){ rgr->flg_msk_out=True; continue; } /* !mask */ if(!strcmp(rgr_lst[rgr_var_idx].key,"add_fll") || !strcmp(rgr_lst[rgr_var_idx].key,"add_fill_value") || !strcmp(rgr_lst[rgr_var_idx].key,"fll_mpt") || !strcmp(rgr_lst[rgr_var_idx].key,"fill_empty")){ rgr->flg_add_fll=True; continue; } /* !add_fll */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cell_measures") || !strcmp(rgr_lst[rgr_var_idx].key,"cll_msr")){ rgr->flg_cll_msr=True; continue; } /* !cell_measures */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_cell_measures") || !strcmp(rgr_lst[rgr_var_idx].key,"no_cll_msr")){ rgr->flg_cll_msr=False; continue; } /* !cell_measures */ if(!strcmp(rgr_lst[rgr_var_idx].key,"curvilinear") || !strcmp(rgr_lst[rgr_var_idx].key,"crv")){ rgr->flg_crv=True; continue; } /* !curvilinear */ if(!strcmp(rgr_lst[rgr_var_idx].key,"diagnose_area") || !strcmp(rgr_lst[rgr_var_idx].key,"dgn_area")){ rgr->flg_dgn_area=True; continue; } /* !diagnose_area */ if(!strcmp(rgr_lst[rgr_var_idx].key,"diagnose_bounds") || !strcmp(rgr_lst[rgr_var_idx].key,"dgn_bnd")){ rgr->flg_dgn_bnd=True; continue; } /* !diagnose_bounds */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cf_units") || !strcmp(rgr_lst[rgr_var_idx].key,"CF_units")){ rgr->flg_cf_units=True; rgr->flg_erwg_units=False; continue; } /* !erwg_units */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cell_area_quad")){ rgr->area_mth=2; continue; } /* !area_nco */ if(!strcmp(rgr_lst[rgr_var_idx].key,"cell_area_nco")){ rgr->area_mth=1; continue; } /* !area_nco */ if(!strcmp(rgr_lst[rgr_var_idx].key,"edg_typ") || !strcmp(rgr_lst[rgr_var_idx].key,"tri_arc") || !strcmp(rgr_lst[rgr_var_idx].key,"vrt_cnc")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"grt_crc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"gtc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"great_circle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"geodesic") || !strcasecmp(rgr_lst[rgr_var_idx].val,"orthodrome")){ rgr->edg_typ=nco_edg_gtc; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"sml_crc") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ltr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"small_circle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"latitude_triangle") || !strcasecmp(rgr_lst[rgr_var_idx].val,"true")){ rgr->edg_typ=nco_edg_smc; (void)fprintf(stderr,"%s: WARNING Requested to run with small-circle edges. This option has not yet been tested and validated. Use only at your own risk.\n",nco_prg_nm_get()); }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"crt") || !strcasecmp(rgr_lst[rgr_var_idx].val,"cartesian") || !strcasecmp(rgr_lst[rgr_var_idx].val,"planar") || !strcasecmp(rgr_lst[rgr_var_idx].val,"flat")){ rgr->edg_typ=nco_edg_crt; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !edg_typ */ if(!strcmp(rgr_lst[rgr_var_idx].key,"erwg_units") || !strcmp(rgr_lst[rgr_var_idx].key,"esmf_units") || !strcmp(rgr_lst[rgr_var_idx].key,"degrees")){ rgr->flg_cf_units=False; rgr->flg_erwg_units=True; continue; } /* !erwg_units */ if(!strcmp(rgr_lst[rgr_var_idx].key,"infer") || !strcmp(rgr_lst[rgr_var_idx].key,"nfr")){ rgr->flg_nfr=True; continue; } /* !infer */ if(!strcmp(rgr_lst[rgr_var_idx].key,"no_stagger") || !strcmp(rgr_lst[rgr_var_idx].key,"no_stg")){ rgr->flg_stg=False; continue; } /* !stagger */ if(!strcmp(rgr_lst[rgr_var_idx].key,"grd_ttl") || !strcmp(rgr_lst[rgr_var_idx].key,"ttl")){ if(rgr->grd_ttl) rgr->grd_ttl=(char *)nco_free(rgr->grd_ttl); rgr->grd_ttl=(char *)strdup(rgr_lst[rgr_var_idx].val); /* 20180828 Replace unquoted tildes with spaces (like LaTeX, NCL) so ncremap users can put tildes in place of spaces in ttl 20180905 Reverted this since quoting command in ncremap is superior solution */ if(False){ size_t ttl_lng=strlen(rgr->grd_ttl); for(size_t ttl_idx=0L;ttl_idx<ttl_lng;ttl_idx++) if(rgr->grd_ttl[ttl_idx] == '~'){ if(ttl_idx == 0L) rgr->grd_ttl[ttl_idx]=' '; // Always convert tilde to space if first character else if(rgr->grd_ttl[ttl_idx-1L] != '\\') rgr->grd_ttl[ttl_idx]=' '; // Convert tilde in other locations unless backslash-quoted } /* !tilde */ } /* !0 */ continue; } /* !grd_ttl */ if(!strcmp(rgr_lst[rgr_var_idx].key,"idx_dbg")){ rgr->idx_dbg=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !idx_dbg */ if(!strcmp(rgr_lst[rgr_var_idx].key,"latlon")){ cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%ld,%ld",&rgr->lat_nbr,&rgr->lon_nbr); assert(cnv_nbr == 2); continue; } /* !latlon */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lonlat")){ cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%ld,%ld",&rgr->lon_nbr,&rgr->lat_nbr); assert(cnv_nbr == 2); continue; } /* !lonlat */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nbr")){ rgr->lat_nbr=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !lat_nbr */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nbr")){ rgr->lon_nbr=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !lon_nbr */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"snwe")){ cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%lf,%lf,%lf,%lf",&rgr->lat_sth,&rgr->lat_nrt,&rgr->lon_wst,&rgr->lon_est); if(cnv_nbr != 4) (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); assert(cnv_nbr == 4); if(cnv_nbr != 4) abort(); /* CEWI Use cnv_nbr at least once outside of assert() to avoid gcc 4.8.2 set-but-not-used warning */ continue; } /* !snwe */ if(!strcasecmp(rgr_lst[rgr_var_idx].key,"wesn")){ if(cnv_nbr != 4) cnv_nbr=sscanf(rgr_lst[rgr_var_idx].val,"%lf,%lf,%lf,%lf",&rgr->lon_wst,&rgr->lon_est,&rgr->lat_sth,&rgr->lat_nrt); assert(cnv_nbr == 4); continue; } /* !wesn */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_crv")){ rgr->lat_crv=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !lat_crv */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_crv")){ rgr->lon_crv=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !lon_crv */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_sth")){ rgr->lat_sth=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); // rgr->lat_typ=nco_grd_lat_bb; continue; } /* !lat_sth */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_wst")){ rgr->lon_wst=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); rgr->lon_typ=nco_grd_lon_bb; continue; } /* !lon_wst */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nrt")){ rgr->lat_nrt=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); //rgr->lat_typ=nco_grd_lat_bb; continue; } /* !lat_nrt */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_est")){ rgr->lon_est=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); rgr->lon_typ=nco_grd_lon_bb; continue; } /* !lon_est */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_drc")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"s2n") || !strcasecmp(rgr_lst[rgr_var_idx].val,"south2north") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ston") || !strcasecmp(rgr_lst[rgr_var_idx].val,"southnorth")){ rgr->lat_drc=nco_grd_lat_drc_s2n; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"n2s") || !strcasecmp(rgr_lst[rgr_var_idx].val,"north2south") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ntos") || !strcasecmp(rgr_lst[rgr_var_idx].val,"northsouth")){ rgr->lat_drc=nco_grd_lat_drc_n2s; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !lat_drc */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_typ")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"cap") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fv") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fix") || !strcasecmp(rgr_lst[rgr_var_idx].val,"yarmulke")){ rgr->lat_typ=nco_grd_lat_fv; rgr->grd_typ=nco_grd_2D_fv; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"eqa") || !strcasecmp(rgr_lst[rgr_var_idx].val,"rgl") || !strcasecmp(rgr_lst[rgr_var_idx].val,"unf") || !strcasecmp(rgr_lst[rgr_var_idx].val,"uni")){ rgr->lat_typ=nco_grd_lat_eqa; rgr->grd_typ=nco_grd_2D_eqa; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"gss")){ rgr->lat_typ=nco_grd_lat_gss; rgr->grd_typ=nco_grd_2D_gss; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !lat_typ */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_typ")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"180_wst") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wst_180")) rgr->lon_typ=nco_grd_lon_180_wst; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"180_ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ctr_180")) rgr->lon_typ=nco_grd_lon_180_ctr; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"Grn_wst") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wst_Grn")) rgr->lon_typ=nco_grd_lon_Grn_wst; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"Grn_ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ctr_Grn")) rgr->lon_typ=nco_grd_lon_Grn_ctr; else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !lon_typ */ if(!strcmp(rgr_lst[rgr_var_idx].key,"area_nm")){ rgr->area_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !area_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"bnd_nm")){ rgr->bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !bnd_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"bnd_tm_nm")){ rgr->bnd_tm_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !bnd_tm_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"col_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"col_nm")){ rgr->col_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !col_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"col_nm_out")){ rgr->col_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !col_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"frc_nm")){ rgr->frc_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !frc_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm")){ rgr->ilev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !ilev_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"ilev_nm_out")){ rgr->ilev_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !ilev_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_bnd_nm")){ rgr->lat_bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_bnd_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_dmn_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"lat_dmn")){ rgr->lat_dmn_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_dmn_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lat_nm")){ rgr->lat_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_nm_out")){ rgr->lat_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_vrt_nm")){ rgr->lat_vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_vrt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lat_wgt_nm")){ rgr->lat_wgt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lat_wgt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lev_nm")){ rgr->lev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lev_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lev_nm_out")){ rgr->lev_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lev_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_bnd_nm")){ rgr->lon_bnd_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_bnd_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_dmn_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"lon_dmn")){ rgr->lon_dmn_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_dmn_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"lon_nm")){ rgr->lon_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_nm_out")){ rgr->lon_nm_out=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_nm_out */ if(!strcmp(rgr_lst[rgr_var_idx].key,"lon_vrt_nm")){ rgr->lon_vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !lon_vrt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"plev_nm_in") || !strcmp(rgr_lst[rgr_var_idx].key,"plev_nm")){ rgr->plev_nm_in=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !plev_nm_in */ if(!strcmp(rgr_lst[rgr_var_idx].key,"ply_tri")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"csz")){ rgr->ply_tri_mth=nco_ply_tri_mth_csz; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"ctr") || !strcasecmp(rgr_lst[rgr_var_idx].val,"centroid") || !strcasecmp(rgr_lst[rgr_var_idx].val,"snl") || !strcasecmp(rgr_lst[rgr_var_idx].val,"mat")){ rgr->ply_tri_mth=nco_ply_tri_mth_ctr; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !ply_tri */ if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_frc_nm")){ rgr->sgs_frc_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !sgs_frc */ if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_msk_nm")){ rgr->sgs_msk_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !sgs_msk */ if(!strcmp(rgr_lst[rgr_var_idx].key,"sgs_nrm")){ rgr->sgs_nrm=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !sgs_nrm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"tst")){ rgr->tst=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !tst */ if(!strcmp(rgr_lst[rgr_var_idx].key,"msk_nm") || !strcmp(rgr_lst[rgr_var_idx].key,"mask_nm")){ rgr->msk_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); rgr->flg_msk_out=True; continue; } /* !msk_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"vrt_nm")){ rgr->vrt_nm=(char *)strdup(rgr_lst[rgr_var_idx].val); continue; } /* !vrt_nm */ if(!strcmp(rgr_lst[rgr_var_idx].key,"vrt_ntp") || !strcmp(rgr_lst[rgr_var_idx].key,"ntp_mth")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"lin") || !strcasecmp(rgr_lst[rgr_var_idx].val,"linear") || !strcasecmp(rgr_lst[rgr_var_idx].val,"lnr")){ rgr->ntp_mth=nco_ntp_lnr; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"log") || !strcasecmp(rgr_lst[rgr_var_idx].val,"logarithmic") || !strcasecmp(rgr_lst[rgr_var_idx].val,"lgr")){ rgr->ntp_mth=nco_ntp_log; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !ntp_mth */ if(!strcmp(rgr_lst[rgr_var_idx].key,"xtr_mth")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"nrs_ngh") || !strcasecmp(rgr_lst[rgr_var_idx].val,"ngh") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nearest_neighbor") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nn")){ rgr->xtr_mth=nco_xtr_fll_ngh; }else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"mss_val") || !strcasecmp(rgr_lst[rgr_var_idx].val,"msv") || !strcasecmp(rgr_lst[rgr_var_idx].val,"fll_val") || !strcasecmp(rgr_lst[rgr_var_idx].val,"missing_value")){ rgr->xtr_mth=nco_xtr_fll_msv; }else{ (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !xtr_mth */ if(!strcmp(rgr_lst[rgr_var_idx].key,"xtr_nsp") || !strcmp(rgr_lst[rgr_var_idx].key,"xtr_nbr_src_pnt") || !strcmp(rgr_lst[rgr_var_idx].key,"number_source_points") || !strcmp(rgr_lst[rgr_var_idx].key,"extrapolation_number_source_points")){ rgr->xtr_nsp=strtol(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtol",sng_cnv_rcd); continue; } /* !xtr_nsp */ if(!strcmp(rgr_lst[rgr_var_idx].key,"xtr_xpn") || !strcmp(rgr_lst[rgr_var_idx].key,"extrapolation_exponent") || !strcmp(rgr_lst[rgr_var_idx].key,"exponent_of_distance_in_extrapolation")){ rgr->xtr_xpn=strtod(rgr_lst[rgr_var_idx].val,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(rgr_lst[rgr_var_idx].val,"strtod",sng_cnv_rcd); continue; } /* !xtr_xpn */ if(!strcmp(rgr_lst[rgr_var_idx].key,"wgt_typ") || !strcmp(rgr_lst[rgr_var_idx].key,"weight_type")){ if(!strcasecmp(rgr_lst[rgr_var_idx].val,"con") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nco_con") || !strcasecmp(rgr_lst[rgr_var_idx].val,"conservative") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wgt_con")) rgr->wgt_typ=nco_wgt_con; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"idw") || !strcasecmp(rgr_lst[rgr_var_idx].val,"dwe") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nco_idw") || !strcasecmp(rgr_lst[rgr_var_idx].val,"distance_weighted") || !strcasecmp(rgr_lst[rgr_var_idx].val,"inverse_distance") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wgt_idw")) rgr->wgt_typ=nco_wgt_idw; else if(!strcasecmp(rgr_lst[rgr_var_idx].val,"bln") || !strcasecmp(rgr_lst[rgr_var_idx].val,"nco_bln") || !strcasecmp(rgr_lst[rgr_var_idx].val,"bilinear") || !strcasecmp(rgr_lst[rgr_var_idx].val,"wgt_bln")) rgr->wgt_typ=nco_wgt_bln; else { (void)fprintf(stderr,"%s: ERROR %s unable to parse \"%s\" option value \"%s\" (possible typo in value?), aborting...\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key,rgr_lst[rgr_var_idx].val); abort(); } /* !val */ continue; } /* !wgt_typ */ (void)fprintf(stderr,"%s: ERROR %s reports unrecognized key-value option to --rgr switch: %s\n",nco_prg_nm_get(),fnc_nm,rgr_lst[rgr_var_idx].key); nco_exit(EXIT_FAILURE); } /* !rgr_var_idx */ /* Eliminate sticky wickets: Give nfr precedence over grd */ if(rgr->flg_nfr && rgr->flg_grd) rgr->flg_grd=False; /* Revert to defaults for any names not specified on command-line */ if(!rgr->area_nm) rgr->area_nm=(char *)strdup("area"); /* [sng] Name of variable containing gridcell area */ if(!rgr->bnd_nm) rgr->bnd_nm=(char *)strdup("nvertices"); /* [sng] Name of dimension to employ for spatial bounds */ /* NB: CESM uses nbnd and ilev for temporal and vertical bounds, respectively (CESM outputs no horizontal spatial bounds). NCO defaults to nbnd for all bounds with two endpoints. */ if(!rgr->bnd_tm_nm) rgr->bnd_tm_nm=(char *)strdup("nbnd"); /* [sng] Name of dimension to employ for temporal bounds */ if(!rgr->col_nm_in) rgr->col_nm_in=(char *)strdup("ncol"); /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ if(!rgr->frc_nm) rgr->frc_nm=(char *)strdup("frac_b"); /* [sng] Name of variable containing gridcell fraction */ if(!rgr->ilev_nm_in) rgr->ilev_nm_in=(char *)strdup("ilev"); /* [sng] Name of input dimension to recognize as vertical dimension at layer interfaces */ if(!rgr->lat_bnd_nm) rgr->lat_bnd_nm=(char *)strdup("lat_bnds"); /* [sng] Name of rectangular boundary variable for latitude */ if(!rgr->lat_nm_in) rgr->lat_nm_in=(char *)strdup("lat"); /* [sng] Name of input dimension to recognize as latitude */ if(!rgr->lev_nm_in) rgr->lev_nm_in=(char *)strdup("lev"); /* [sng] Name of input dimension to recognize as vertical dimension at layer midpoints */ if(!rgr->lat_vrt_nm) rgr->lat_vrt_nm=(char *)strdup("lat_vertices"); /* [sng] Name of non-rectangular boundary variable for latitude */ if(!rgr->lat_wgt_nm) rgr->lat_wgt_nm=(char *)strdup("gw"); /* [sng] Name of variable containing latitude weights */ if(!rgr->lon_bnd_nm) rgr->lon_bnd_nm=(char *)strdup("lon_bnds"); /* [sng] Name of rectangular boundary variable for longitude */ if(!rgr->lon_nm_in) rgr->lon_nm_in=(char *)strdup("lon"); /* [sng] Name of dimension to recognize as longitude */ if(!rgr->lon_vrt_nm) rgr->lon_vrt_nm=(char *)strdup("lon_vertices"); /* [sng] Name of non-rectangular boundary variable for longitude */ if(!rgr->msk_nm) rgr->msk_nm=(char *)strdup("mask_b"); /* [sng] Name of variable containing destination mask */ if(!rgr->vrt_nm) rgr->vrt_nm=(char *)strdup("nv"); /* [sng] Name of dimension to employ for vertices */ if(!rgr->plev_nm_in) rgr->plev_nm_in=(char *)strdup("plev"); /* [sng] Name of variable to recognize as pure pressure coordinate */ /* Derived from defaults and command-line arguments */ // On second thought, do not strdup() these here. This way, NULL means user never specified lon/lat-out names // if(!rgr->col_nm_out) rgr->col_nm_out=(char *)strdup("ncol"); /* [sng] Name of dimension to output as horizontal spatial dimension on unstructured grid */ // if(!rgr->lat_nm_out) rgr->lat_nm_out=(char *)strdup("lat"); /* [sng] Name of dimension to output as latitude */ // if(!rgr->lon_nm_out) rgr->lon_nm_out=(char *)strdup("lon"); /* [sng] Name of dimension to output as longitude */ // if(!rgr->lat_nm_out) rgr->lat_nm_out=(char *)strdup(rgr_lat_nm_in); /* [sng] Name of output dimension for latitude */ // if(!rgr->lon_nm_out) rgr->lon_nm_out=(char *)strdup(rgr_lon_nm_in); /* [sng] Name of output dimension for longitude */ /* Free kvms */ if(rgr_lst) rgr_lst=nco_kvm_lst_free(rgr_lst,rgr_var_nbr); return rgr; } /* end nco_rgr_ini() */ int /* O [enm] Return code */ nco_ntp_vrt /* [fnc] Interpolate vertically */ (rgr_sct * const rgr, /* I/O [sct] Regridding structure */ trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */ { /* Purpose: Interpolate fields to new vertical grid specified in a vertical file */ const char fnc_nm[]="nco_ntp_vrt()"; /* [sng] Function name */ char *fl_tpl; /* [sng] Template file (vertical grid file) */ char *fl_pth_lcl=NULL; int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int in_id; /* I [id] Input netCDF file ID */ int tpl_id; /* [id] Input netCDF file ID (for vertical grid template) */ int md_open; /* [enm] Mode flag for nc_open() call */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int dmn_idx; /* [idx] Dimension index */ int rec_idx; /* [idx] Record dimension index */ nco_bool FL_RTR_RMT_LCN; nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s obtaining vertical grid from %s\n",nco_prg_nm_get(),fnc_nm,rgr->fl_vrt); /* Duplicate (because nco_fl_mk_lcl() free()'s its fl_in) */ fl_tpl=(char *)strdup(rgr->fl_vrt); /* Make sure file is on local system and is readable or die trying */ fl_tpl=nco_fl_mk_lcl(fl_tpl,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_tpl,md_open,&bfr_sz_hnt,&tpl_id); /* Formula-terms for hybrid pressure vertical grid on unstructured CAM/EAM horizontal grid: prs_mdp[time,lev,col]=P0*hyam[lev] +PS[time,col]*hybm[lev] prs_ntf[time,lev,col]=P0*hyai[ilev]+PS[time,col]*hybi[ilev] */ /* Formula-terms for hybrid pressure vertical grid on ECMWF RLL horizontal grid: prs_mdp[time,lev,lat,lon]=hyam[lev] +exp(lnsp[time,lat,lon])*hybm[lev] prs_ntf[time,lev,lat,lon]=hyai[ilev]+exp(lnsp[time,lat,lon])*hybi[ilev] */ /* For simplicity and code re-use, all single-variable (not hybrid-variable) coordinate systems adopt "lev" semantics This includes pure pressure coordinates and eventually will include sigma, depth, and height coordinates Only hybrid coordinates will refer to the "ilev" levels and indices All single coordinate systems will refer to "lev" levels and indices */ int dpt_id; /* [id] Ocean depth ID */ int hyai_id=NC_MIN_INT; /* [id] Hybrid A coefficient at layer interfaces ID */ int hyam_id=NC_MIN_INT; /* [id] Hybrid A coefficient at layer midpoints ID */ int hybi_id=NC_MIN_INT; /* [id] Hybrid B coefficient at layer interfaces ID */ int hybm_id=NC_MIN_INT; /* [id] Hybrid B coefficient at layer midpoints ID */ int ilev_id=NC_MIN_INT; /* [id] Interface pressure ID */ int lev_id=NC_MIN_INT; /* [id] Midpoint pressure ID */ int p0_id=NC_MIN_INT; /* [id] Reference pressure ID */ int ps_id=NC_MIN_INT; /* [id] Surface pressure ID */ int plev_id; /* [id] Air pressure ID */ nco_bool flg_grd_hyb_cameam=False; /* [flg] Hybrid coordinate vertical grid uses CAM/EAM conventions */ nco_bool flg_grd_hyb_ecmwf=False; /* [flg] Hybrid coordinate vertical grid uses ECMWF conventions */ nco_bool flg_grd_in_dpt=False; /* [flg] Input depth coordinate vertical grid */ nco_bool flg_grd_in_hyb=False; /* [flg] Input hybrid coordinate vertical grid */ nco_bool flg_grd_in_prs=False; /* [flg] Input pressure coordinate vertical grid */ nco_bool flg_grd_out_dpt=False; /* [flg] Output depth coordinate vertical grid */ nco_bool flg_grd_out_hyb=False; /* [flg] Output hybrid coordinate vertical grid */ nco_bool flg_grd_out_prs=False; /* [flg] Output pressure coordinate vertical grid */ nco_bool flg_vrt_tm=False; /* [flg] Output depends on time-varying vertical grid */ nco_grd_vrt_typ_enm nco_vrt_grd_in=nco_vrt_grd_nil; /* [enm] Vertical grid type for input grid */ nco_grd_vrt_typ_enm nco_vrt_grd_out=nco_vrt_grd_nil; /* [enm] Vertical grid type for output grid */ nco_ntp_typ_enm ntp_mth=rgr->ntp_mth; /* [enm] Interpolation method */ nco_xtr_typ_enm xtr_mth=rgr->xtr_mth; /* [enm] Extrapolation method */ /* Determine output grid type */ if((rcd=nco_inq_varid_flg(tpl_id,"hyai",&hyai_id)) == NC_NOERR){ nco_vrt_grd_out=nco_vrt_grd_hyb; /* EAM */ flg_grd_out_hyb=True; }else if((rcd=nco_inq_varid_flg(tpl_id,"plev",&plev_id)) == NC_NOERR){ nco_vrt_grd_out=nco_vrt_grd_prs; /* NCEP */ flg_grd_out_prs=True; }else if((rcd=nco_inq_varid_flg(tpl_id,"depth",&dpt_id)) == NC_NOERR){ nco_vrt_grd_out=nco_vrt_grd_dpt; /* MPAS */ flg_grd_out_dpt=True; }else{ /* !hyai */ (void)fprintf(stdout,"%s: ERROR %s Unable to locate hybrid-sigma/pressure or pure-pressure vertical grid coordinate information in vertical grid file\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT ensure vertical grid coordinate file contains a valid vertical grid coordinate\n",nco_prg_nm_get()); return NCO_ERR; } /* !hyai */ if(flg_grd_out_hyb){ rcd=nco_inq_varid(tpl_id,"hyai",&hyai_id); rcd=nco_inq_varid(tpl_id,"hyam",&hyam_id); rcd=nco_inq_varid(tpl_id,"hybi",&hybi_id); rcd=nco_inq_varid(tpl_id,"hybm",&hybm_id); rcd=nco_inq_varid(tpl_id,"P0",&p0_id); rcd=nco_inq_varid_flg(tpl_id,"ilev",&ilev_id); rcd=nco_inq_varid_flg(tpl_id,"lev",&lev_id); rcd=nco_inq_varid_flg(tpl_id,"PS",&ps_id); } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ rcd=nco_inq_varid(tpl_id,"plev",&lev_id); } /* !flg_grd_out_prs */ if(flg_grd_out_dpt){ rcd=nco_inq_varid(tpl_id,"depth",&lev_id); } /* !flg_grd_out_dpt */ const int hyai_id_tpl=hyai_id; /* [id] Hybrid A coefficient at layer interfaces ID */ const int hyam_id_tpl=hyam_id; /* [id] Hybrid A coefficient at layer midpoints ID */ const int hybi_id_tpl=hybi_id; /* [id] Hybrid B coefficient at layer interfaces ID */ const int hybm_id_tpl=hybm_id; /* [id] Hybrid B coefficient at layer midpoints ID */ const int p0_id_tpl=p0_id; /* [id] Reference pressure ID */ const int ilev_id_tpl=ilev_id; /* [id] Interface pressure ID */ const int lev_id_tpl=lev_id; /* [id] Midpoint pressure ID */ const int ps_id_tpl=ps_id; /* [id] Surface pressure ID */ char *ilev_nm_in=NULL; /* [sng] Interface level name */ char *lev_nm_in; char *ilev_nm_out; char *lev_nm_out; char *plev_nm_in; /* [sng] Pure-pressure coordnate name */ char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */ int *dmn_ids_in=NULL; /* [nbr] Input file dimension IDs */ int *dmn_ids_out=NULL; /* [nbr] Output file dimension IDs */ int *dmn_ids_rec=NULL; /* [id] Unlimited dimension IDs */ int dmn_nbr_ps; /* [nbr] Number of dimensions in PS variable */ int dmn_nbr_in; /* [nbr] Number of dimensions in input file */ int dmn_nbr_out; /* [nbr] Number of dimensions in output file */ int dmn_id_ilev_out=NC_MIN_INT; /* [id] Dimension ID for interface level in output file */ int dmn_id_lev_out=NC_MIN_INT; /* [id] Dimension ID for midpoint level in output file */ int dmn_id_ilev_in=NC_MIN_INT; /* [id] Dimension ID for interface level in file to be interpolated */ int dmn_id_lev_in=NC_MIN_INT; /* [id] Dimension ID for midpoint level in file to be interpolated */ int dmn_id_tm_in=NC_MIN_INT; /* [id] Dimension ID for time in file to be interpolated */ int dmn_nbr_rec; /* [nbr] Number of unlimited dimensions */ int dmn_idx_tm_in=NC_MIN_INT; /* [idx] Index of record coordinate in input hybrid coordinate PS field */ long *dmn_cnt_in=NULL; long *dmn_cnt_out=NULL; long *dmn_srt=NULL; long ilev_nbr_in; long lev_nbr_in; long ilev_nbr_out; long lev_nbr_out; long tm_idx=0L; /* [idx] Current timestep */ long tm_nbr=1L; /* [idx] Number of timesteps in vertical grid */ long tm_nbr_in=1L; /* [nbr] Number of timesteps in input vertical grid definition */ long tm_nbr_out=1L; /* [nbr] Number of timesetps in output vertical grid definition */ size_t grd_idx; /* [idx] Gridcell index */ size_t grd_sz_in=1L; /* [nbr] Number of elements in single layer of input grid */ size_t grd_sz_out=1L; /* [nbr] Number of elements in single layer of output grid */ size_t idx_fst; /* [idx] Index-offset to current surface pressure timeslice */ if(flg_grd_out_hyb){ /* Interrogate hyai/hyam to obtain ilev/lev dimensions */ rcd=nco_inq_vardimid(tpl_id,hyai_id,&dmn_id_ilev_out); rcd=nco_inq_vardimid(tpl_id,hyam_id,&dmn_id_lev_out); rcd=nco_inq_dimlen(tpl_id,dmn_id_ilev_out,&ilev_nbr_out); rcd=nco_inq_dimlen(tpl_id,dmn_id_lev_out,&lev_nbr_out); rcd=nco_inq_dimname(tpl_id,dmn_id_ilev_out,dmn_nm); ilev_nm_out=strdup(dmn_nm); rcd=nco_inq_dimname(tpl_id,dmn_id_lev_out,dmn_nm); lev_nm_out=strdup(dmn_nm); /* Interrogate PS, if any, for horizontal dimensions */ if(ps_id_tpl != NC_MIN_INT){ rcd=nco_inq_varndims(tpl_id,ps_id,&dmn_nbr_ps); dmn_nbr_out=dmn_nbr_ps; dmn_ids_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_cnt_out=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long)); dmn_srt=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long)); rcd=nco_inq_vardimid(tpl_id,ps_id,dmn_ids_out); rcd=nco_inq_unlimdims(tpl_id,&dmn_nbr_rec,(int *)NULL); if(dmn_nbr_rec > 0){ dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int)); rcd=nco_inq_unlimdims(tpl_id,&dmn_nbr_rec,dmn_ids_rec); } /* !dmn_nbr_rec */ for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ rcd=nco_inq_dimlen(tpl_id,dmn_ids_out[dmn_idx],dmn_cnt_out+dmn_idx); /* 20190330: Allow possibility that PS has time dimension > 1 We want horizontal not temporal dimensions to contribute to grd_sz Temporal dimension is usually unlimited Only multiply grd_sz by fixed (non-unlimited) dimension sizes Corner-case exception when PS spatial dimension on unstructured grid is unlimited */ for(rec_idx=0;rec_idx<dmn_nbr_rec;rec_idx++) if(dmn_ids_out[dmn_idx] == dmn_ids_rec[rec_idx]) break; if(rec_idx == dmn_nbr_rec || dmn_nbr_out == 1) grd_sz_out*=dmn_cnt_out[dmn_idx]; if(rec_idx != dmn_nbr_rec && dmn_nbr_out > 1 && dmn_cnt_out[dmn_idx] > 1L){ tm_nbr_out=dmn_cnt_out[dmn_idx]; if(tm_nbr_out > 1L) flg_vrt_tm=True; } /* tm_nbr_out > 1 */ dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec); } /* !ps_id_tpl */ } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ /* Interrogate plev to obtain plev dimensions */ rcd=nco_inq_vardimid(tpl_id,lev_id,&dmn_id_lev_out); rcd=nco_inq_dimlen(tpl_id,dmn_id_lev_out,&lev_nbr_out); rcd=nco_inq_dimname(tpl_id,dmn_id_lev_out,dmn_nm); ilev_nbr_out=lev_nbr_out; } /* !flg_grd_out_prs */ double *hyai_out=NULL; /* [frc] Hybrid A coefficient at layer interfaces on output grid */ double *hyam_out=NULL; /* [frc] Hybrid A coefficient at layer midpoints on output grid */ double *hybi_out=NULL; /* [frc] Hybrid B coefficient at layer interfaces on output grid */ double *hybm_out=NULL; /* [frc] Hybrid B coefficient at layer midpoints on output grid */ double *ilev_out=NULL; /* [hPa] Interface pressure on output grid */ double *lev_out=NULL; /* [hPa] Midpoint pressure on output grid */ double *ps_out=NULL; /* [Pa] Surface pressure on output grid */ double *prs_mdp_out=NULL; /* [Pa] Midpoint pressure on output grid */ double *prs_ntf_out=NULL; /* [Pa] Interface pressure on output grid */ double p0_out; /* [Pa] Reference pressure on output grid */ long ilev_idx; /* [idx] Interface level index */ long lev_idx; /* [idx] Level index */ const nc_type crd_typ_out=NC_DOUBLE; nc_type var_typ_rgr; /* [enm] Variable type used during regridding */ var_typ_rgr=NC_DOUBLE; /* NB: Perform interpolation in double precision */ if(flg_grd_out_hyb){ hyai_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr)); hyam_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); hybi_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr)); hybm_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); ilev_out=(double *)nco_malloc(ilev_nbr_out*nco_typ_lng(var_typ_rgr)); lev_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(tpl_id,hyai_id,hyai_out,crd_typ_out); rcd=nco_get_var(tpl_id,hyam_id,hyam_out,crd_typ_out); rcd=nco_get_var(tpl_id,hybi_id,hybi_out,crd_typ_out); rcd=nco_get_var(tpl_id,hybm_id,hybm_out,crd_typ_out); rcd=nco_get_var(tpl_id,p0_id,&p0_out,crd_typ_out); if(ilev_id_tpl != NC_MIN_INT){ rcd=nco_get_var(tpl_id,ilev_id,ilev_out,crd_typ_out); }else{ /* p0 is in Pa but ilev traditionally given in hPa */ for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++) ilev_out[ilev_idx]=p0_out*(hyai_out[ilev_idx]+hybi_out[ilev_idx])/100.0; } /* !ilev_id_tpl */ if(lev_id_tpl != NC_MIN_INT){ rcd=nco_get_var(tpl_id,lev_id,lev_out,crd_typ_out); }else{ /* p0 is in Pa but lev traditionally given in hPa */ for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++) lev_out[lev_idx]=p0_out*(hyam_out[lev_idx]+hybm_out[lev_idx])/100.0; } /* !ilev_id_tpl */ } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ lev_out=(double *)nco_malloc(lev_nbr_out*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(tpl_id,lev_id,lev_out,crd_typ_out); } /* !flg_grd_out_prs */ /* For vertical interpolation (unlike horizontal regridding), the destination grid is known a priori Straightforward copy all variables and attributes that define grid from fl_tpl to output would work in theory, but would not allow dynamic identification and relabeling of names */ /* if(flg_grd_out_hyb){ const int vrt_grd_lst_nbr=8; const char *vrt_grd_lst[]={"/hyai","/hyam","/hybi","/hybm","/ilev","/lev","/P0","/PS"}; } if(flg_grd_out_prs){ const int vrt_grd_lst_nbr=1; const char *vrt_grd_lst[]={"/plev"}; } */ /* Above this line, fl_tpl and tpl_id refer to vertical coordinate file (i.e., template file) Below this line, fl_in and in_id refer to input file to be vertically regridded Do not close template file until all grid variables have been copied For maximum efficiency, do this after defining all interpolated variables in output That way no file needs to exit define mode or enter data mode more than once However this requires keeping template file, input data file, and output file simulataneously open */ in_id=rgr->in_id; out_id=rgr->out_id; /* Determine input grid type */ if(rgr->plev_nm_in) plev_nm_in=rgr->plev_nm_in; if((rcd=nco_inq_varid_flg(in_id,"hyai",&hyai_id)) == NC_NOERR){ nco_vrt_grd_in=nco_vrt_grd_hyb; /* EAM */ flg_grd_in_hyb=True; }else if((rcd=nco_inq_varid_flg(in_id,plev_nm_in,&plev_id)) == NC_NOERR){ nco_vrt_grd_in=nco_vrt_grd_prs; /* NCEP */ flg_grd_in_prs=True; }else if((rcd=nco_inq_varid_flg(in_id,"depth",&dpt_id)) == NC_NOERR){ nco_vrt_grd_in=nco_vrt_grd_dpt; /* NCEP */ flg_grd_in_dpt=True; }else{ /* !hyai */ (void)fprintf(stdout,"%s: ERROR %s Unable to locate hybrid-sigma/pressure or pure-pressure vertical grid coordinate information in input file\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT only invoke vertical interpolation on files that contain variables with vertical dimensions, and with known vertical coordinate variable names. These default to \"hyai\" for hybrid, \"plev\" for pressure, \"depth\" for depth. See http://nco.sf.net/nco.html#lev_nm for options to change these names at run-time, e.g., \"--rgr plev_nm=vrt_nm\"\n",nco_prg_nm_get()); return NCO_ERR; } /* !hyai */ /* Sanity checks: One type of input and one type of output grid detected */ assert(!(flg_grd_in_hyb && flg_grd_in_prs)); assert(!(flg_grd_in_hyb && flg_grd_in_dpt)); assert(!(flg_grd_in_prs && flg_grd_in_dpt)); assert(flg_grd_in_hyb || flg_grd_in_prs || flg_grd_in_dpt); assert(!(flg_grd_out_hyb && flg_grd_out_prs)); assert(!(flg_grd_out_hyb && flg_grd_out_dpt)); assert(!(flg_grd_out_prs && flg_grd_out_dpt)); assert(flg_grd_out_hyb || flg_grd_out_prs || flg_grd_out_dpt); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG Input grid flags : flg_grd_in_hyb = %d, flg_grd_in_prs = %d, flg_grd_in_dpt = %d\n",nco_prg_nm_get(),flg_grd_in_hyb,flg_grd_in_prs,flg_grd_in_dpt); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG Output grid flags: flg_grd_out_hyb = %d, flg_grd_out_prs = %d, flg_grd_out_dpt = %d\n",nco_prg_nm_get(),flg_grd_out_hyb,flg_grd_out_prs,flg_grd_out_dpt); /* 20191219: This block is not used, deprecate it? Or use once new coordinates like altitude, depth supported? */ nco_vrt_ntp_typ_enm nco_vrt_ntp_typ=nco_ntp_nil; /* Vertical interpolation type */ if(nco_vrt_grd_in == nco_vrt_grd_hyb && nco_vrt_grd_out == nco_vrt_grd_hyb) nco_vrt_ntp_typ=nco_ntp_hyb_to_hyb; if(nco_vrt_grd_in == nco_vrt_grd_hyb && nco_vrt_grd_out == nco_vrt_grd_prs) nco_vrt_ntp_typ=nco_ntp_hyb_to_prs; if(nco_vrt_grd_in == nco_vrt_grd_prs && nco_vrt_grd_out == nco_vrt_grd_hyb) nco_vrt_ntp_typ=nco_ntp_prs_to_hyb; if(nco_vrt_grd_in == nco_vrt_grd_prs && nco_vrt_grd_out == nco_vrt_grd_prs) nco_vrt_ntp_typ=nco_ntp_prs_to_prs; assert(nco_vrt_ntp_typ != nco_ntp_nil); /* Variables on input grid, i.e., on grid in data file to be interpolated */ if(flg_grd_in_hyb){ rcd=nco_inq_varid(in_id,"hyai",&hyai_id); rcd=nco_inq_varid(in_id,"hyam",&hyam_id); rcd=nco_inq_varid(in_id,"hybi",&hybi_id); rcd=nco_inq_varid(in_id,"hybm",&hybm_id); /* 20190602: ECMWF hybrid vertical grid parameters and dimensions differ from CAM/EAM: ECMWF defines vertical dimensions "nhym" and "nhyi" specifically for hy[ab][im] and uses "lev" and "lev_2" for all other variables, whereas CAM/EAM uses same dimensions "lev" and "ilev" for all vertical variables including hybrid coefficients ECMWF provides "hya?" as a constant in Pa and "hyb?" as a dimensionless coefficient of PS, whereas CAM/EAM provides "hya?" and "hyb?" both as dimensionless coefficients of P0 and PS ECMWF provides "lev" and "lev_2" with midpoint and surface pressure indices (not values), respectively, whereas CAM/EAM provides "lev" and "ilev" coordinate values in hPa ECMWF provides dimensionless "lnsp" for log(surface pressure) whereas CAM/EAM provides "PS" for surface pressure in Pa ECMWF "lnsp" has degenerate level dimension "lev_2" whereas CAM/EAM "PS" has no "ilev" dimension ECMWF uses hya? instead of reference pressure whereas CAM/EAM provides "P0" in hPa */ if((rcd=nco_inq_varid_flg(in_id,"lnsp",&ps_id)) == NC_NOERR) flg_grd_hyb_ecmwf=True; else if((rcd=nco_inq_varid_flg(in_id,"PS",&ps_id)) == NC_NOERR) flg_grd_hyb_cameam=True; else{ (void)fprintf(stderr,"%s: ERROR %s Unable to find surface pressure variable required for hybrid grid in input file\n",nco_prg_nm_get(),fnc_nm); abort(); } /* !rcd */ if(flg_grd_hyb_cameam){ rcd=nco_inq_varid(in_id,"P0",&p0_id); ilev_id=NC_MIN_INT; lev_id=NC_MIN_INT; if(ilev_id_tpl == NC_MIN_INT) rcd=nco_inq_varid_flg(in_id,"ilev",&ilev_id); if(lev_id_tpl == NC_MIN_INT) rcd=nco_inq_varid_flg(in_id,"lev",&lev_id); } /* !flg_grd_hyb_cameam */ /* 20190603: We require ECMWF IFS input to have a "lev" coordinate so we can use "lev" dimension not "nhyb" */ if(flg_grd_hyb_ecmwf) rcd=nco_inq_varid(in_id,"lev",&lev_id); } /* !flg_grd_in_hyb */ if(flg_grd_in_prs){ rcd=nco_inq_varid(in_id,plev_nm_in,&lev_id); if((rcd=nco_inq_varid_flg(in_id,"PS",&ps_id)) == NC_NOERR){ /* Output file creation procedure discriminates between input surface pressure dimensioned as CAM/EAM vs. ECMWF */ flg_grd_hyb_cameam=True; if(flg_grd_out_hyb && (ps_id_tpl == NC_MIN_INT)) (void)fprintf(stderr,"%s: INFO %s detects variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in pure-pressure input data file. PS will be copied directly from pure-pressure grid input dataset to, and used to construct the pressures of, the output hybrid-coordinate data file.\n",nco_prg_nm_get(),fnc_nm); if(flg_grd_out_hyb && (ps_id_tpl != NC_MIN_INT)) (void)fprintf(stderr,"%s: INFO %s detects variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in both vertical-grid file and pure-pressure input data file. The vertical grid-file takes precedence. PS will be copied directly from vertical-grid file to, and used to construct the pressures of, the output hybrid-coordinate data file. PS in input pure-pressure file will be ignored.\n",nco_prg_nm_get(),fnc_nm); }else{ if(flg_grd_out_hyb && (ps_id_tpl == NC_MIN_INT)){ (void)fprintf(stderr,"%s: ERROR %s does not find variable PS (canonical name for spatially varying surface pressure field in hybrid grids) in pure-pressure input data file or in vertical grid-file for hybrid-pressure output. PS must be present in at least one of these files in order to construct the output hybrid-coordinate pressures.\nHINT: Append a valid PS to the inpud data file or vertical grid-file.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !ps_id_tpl */ } /* !ps_id */ } /* !flg_grd_in_prs */ if(flg_grd_in_dpt){ rcd=nco_inq_varid(in_id,"depth",&lev_id); } /* !flg_grd_in_dpt */ const int ilev_id_in=ilev_id; /* [id] Interface pressure ID */ const int lev_id_in=lev_id; /* [id] Midpoint pressure ID */ const int ps_id_in=ps_id; /* [id] Surface pressure ID */ /* Identify all record-dimensions in input file */ rcd=nco_inq_unlimdims(in_id,&dmn_nbr_rec,(int *)NULL); if(dmn_nbr_rec > 0){ dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int)); rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec); } /* !dmn_nbr_rec */ if(flg_grd_in_hyb){ /* Get hybrid vertical information first */ rcd=nco_inq_varndims(in_id,ps_id,&dmn_nbr_in); rcd=nco_inq_vardimid(in_id,hyai_id,&dmn_id_ilev_in); if(flg_grd_hyb_cameam) rcd=nco_inq_vardimid(in_id,hyam_id,&dmn_id_lev_in); if(flg_grd_hyb_ecmwf) rcd=nco_inq_vardimid(in_id,lev_id,&dmn_id_lev_in); rcd=nco_inq_dimlen(in_id,dmn_id_ilev_in,&ilev_nbr_in); rcd=nco_inq_dimlen(in_id,dmn_id_lev_in,&lev_nbr_in); rcd=nco_inq_dimname(in_id,dmn_id_ilev_in,dmn_nm); ilev_nm_in=strdup(dmn_nm); rcd=nco_inq_dimname(in_id,dmn_id_lev_in,dmn_nm); lev_nm_in=strdup(dmn_nm); } /* !flg_grd_in_hyb */ if(flg_grd_in_prs){ /* Interrogate plev to obtain plev dimensions */ rcd=nco_inq_vardimid(in_id,lev_id,&dmn_id_lev_in); rcd=nco_inq_dimlen(in_id,dmn_id_lev_in,&lev_nbr_in); rcd=nco_inq_dimname(in_id,dmn_id_lev_in,dmn_nm); lev_nm_in=strdup(dmn_nm); /* Define horizontal grid if no PS is provided (i.e., pure-pressure to pure-pressure interpolation) */ if(!flg_grd_out_hyb){ /* Problem: What is horizontal grid size of pressure grid file? Algorithm: Examine first multi-dimensional variable that includes plev dimension Assume horizontal dimensions vary more rapidly than (i.e., follow) plev Compute horizontal grid size accordingly Set output horizontal size to input horizontal size */ int var_nbr; /* [nbr] Number of variables in file */ int var_idx; /* [idx] Index over variables in file */ rcd=nco_inq(in_id,&dmn_nbr_in,&var_nbr,(int *)NULL,(int *)NULL); dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_cnt_in=(long *)nco_malloc(dmn_nbr_in*sizeof(long)); for(var_idx=0;var_idx<var_nbr;var_idx++){ rcd=nco_inq_varndims(in_id,var_idx,&dmn_nbr_in); rcd=nco_inq_vardimid(in_id,var_idx,dmn_ids_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++) if(dmn_ids_in[dmn_idx] == dmn_id_lev_in) break; /* Does current variable have lev dimension? */ if(dmn_idx < dmn_nbr_in){ /* Yes. Do any dimensions vary more rapidly than lev? */ if(dmn_idx < dmn_nbr_in-1){ /* Yes. Assume remaining dimension are horizontal spatial dimensions */ char var_nm[NC_MAX_NAME+1L]; (void)nc_inq_varname(in_id,var_idx,var_nm); for(int dmn_idx_hrz=dmn_idx+1;dmn_idx_hrz<dmn_nbr_in;dmn_idx_hrz++){ rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx_hrz],dmn_cnt_in+dmn_idx_hrz); grd_sz_in*=dmn_cnt_in[dmn_idx_hrz]; } /* !dmn_idx_hrz */ break; } /* !dmn_idx */ } /* !dmn_idx */ } /* !var_idx */ assert(var_idx != var_nbr); grd_sz_out=grd_sz_in; } /* !flg_grd_out_hyb */ } /* !flg_grd_in_prs */ double *hyai_in=NULL; /* [frc] Hybrid A coefficient at layer interfaces on input grid */ double *hyam_in=NULL; /* [frc] Hybrid A coefficient at layer midpoints on input grid */ double *hybi_in=NULL; /* [frc] Hybrid B coefficient at layer interfaces on input grid */ double *hybm_in=NULL; /* [frc] Hybrid B coefficient at layer midpoints on input grid */ double *lev_in=NULL; /* [Pa] Air pressure on input grid */ double *prs_mdp_in=NULL; /* [Pa] Midpoint pressure on input grid */ double *prs_ntf_in=NULL; /* [Pa] Interface pressure on input grid */ double *ps_in=NULL; /* [Pa] Surface pressure on input grid */ double p0_in; /* [Pa] Reference pressure on input grid */ if(flg_grd_in_hyb){ hyai_in=(double *)nco_malloc(ilev_nbr_in*nco_typ_lng(var_typ_rgr)); hyam_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr)); hybi_in=(double *)nco_malloc(ilev_nbr_in*nco_typ_lng(var_typ_rgr)); hybm_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(in_id,hyai_id,hyai_in,crd_typ_out); rcd=nco_get_var(in_id,hyam_id,hyam_in,crd_typ_out); rcd=nco_get_var(in_id,hybi_id,hybi_in,crd_typ_out); rcd=nco_get_var(in_id,hybm_id,hybm_in,crd_typ_out); if(flg_grd_hyb_cameam) rcd=nco_get_var(in_id,p0_id,&p0_in,crd_typ_out); /* ECMWF distributes IFS forecasts with lnsp = log(surface pressure) */ if(flg_grd_hyb_ecmwf){ /* Decompose ECMWF hya? convention into CAM/EAM-like product of P0 and hya? */ p0_in=100000.0; for(size_t idx=0;idx<lev_nbr_in;idx++){ hyai_in[idx]/=p0_in; hyam_in[idx]/=p0_in; } /* !idx */ } /* flg_grd_hyb_ecmwf */ } /* !flg_grd_in_hyb */ if(flg_grd_in_prs){ lev_in=(double *)nco_malloc(lev_nbr_in*nco_typ_lng(var_typ_rgr)); rcd=nco_get_var(in_id,lev_id,lev_in,crd_typ_out); } /* !flg_grd_in_prs */ /* Always obtain surface pressure if input or output grid is hybrid */ if(flg_grd_in_hyb || flg_grd_out_hyb){ /* Copy horizontal grid information from input file LHS variables were set above if PS is in template file */ if(ps_id_tpl == NC_MIN_INT){ /* NB: dmn_nbr_in/out in this block refer only to horizontal dimensions necessary to define PS */ rcd=nco_inq_varndims(in_id,ps_id,&dmn_nbr_in); /* This is harmlessly repeated for hybrid input files */ dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_cnt_in=(long *)nco_malloc((dmn_nbr_in+1)*sizeof(long)); if(!dmn_srt) dmn_srt=(long *)nco_malloc((dmn_nbr_in+1)*sizeof(long)); /* NB: Only allocate dmn_srt once */ rcd=nco_inq_vardimid(in_id,ps_id,dmn_ids_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_in+dmn_idx); /* 20190330: Allow possibility that PS has time dimension > 1 We want horizontal not temporal dimensions to contribute to grd_sz Temporal dimension is usually unlimited Only multiply grd_sz by fixed (non-unlimited) dimension sizes Corner-case exception when PS spatial dimension on unstructured grid is unlimited */ for(rec_idx=0;rec_idx<dmn_nbr_rec;rec_idx++) if(dmn_ids_in[dmn_idx] == dmn_ids_rec[rec_idx]) break; if(rec_idx == dmn_nbr_rec || dmn_nbr_in == 1) grd_sz_in*=dmn_cnt_in[dmn_idx]; if(rec_idx != dmn_nbr_rec && dmn_nbr_in > 1 && dmn_cnt_in[dmn_idx] > 1L){ dmn_id_tm_in=dmn_ids_in[dmn_idx]; dmn_idx_tm_in=dmn_idx; tm_nbr_in=dmn_cnt_in[dmn_idx_tm_in]; if(tm_nbr_in > 1L) flg_vrt_tm=True; } /* tm_nbr_in > 1 */ dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ /* Given all input PS information, define output PS information */ dmn_nbr_ps=dmn_nbr_out=dmn_nbr_in; dmn_ids_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_cnt_out=(long *)nco_malloc((dmn_nbr_out+1)*sizeof(long)); /* fxm: next line works for hyb_in and is buggy for prs_in */ memcpy(dmn_ids_out,dmn_ids_in,dmn_nbr_in*sizeof(int)); memcpy(dmn_cnt_out,dmn_cnt_in,dmn_nbr_in*sizeof(long)); grd_sz_out=grd_sz_in; tm_nbr_out=tm_nbr_in; }else{ /* !ps_id_tpl */ /* 20200825: We have already defined grd_sz_out if PS is in template file We have already defined grd_sz_in and grd_sz_out := grd_sz_in when PS not in template file We have already defined grd_sz_in if input file is pure-pressure However, we have not yet defined grd_sz_in if input file is hybrid Expectation is that grd_sz_in (from input file) = grd_sz_out (from template file) An independent check on this would examine dimension sizes in input file Such a check would immediately flag horizontal mismatches between vertical file and input file The check could not rely on PS being present in input file The check could/should examine the first horizontal variable in input file This would require a lot of code, so we just assume it is true */ grd_sz_in=grd_sz_out; } /* !ps_id_tpl */ /* Timestep sequencing NB: tm_nbr_??? variables count timesteps in vertical grid definitions These are not necessarily the same as the number of timesteps in either file Time-invariant hybrid or pure-pressure coordinates are valid vertical grids for timeseries Usually hybrid grids have as many timesteps in the grids as in the timeseries Usually pressure grids are time-invariant (as of 20190511 time-varying pure pressure grids are still not supported) This implementation interpolates timeseries to/from time-invariant vertical grids in one OpenMP call! */ if(tm_nbr_in > 1L || tm_nbr_out > 1L){ if(tm_nbr_in > tm_nbr_out) assert((float)tm_nbr_in/(float)tm_nbr_out == tm_nbr_in/tm_nbr_out); else assert((float)tm_nbr_out/(float)tm_nbr_in == tm_nbr_out/tm_nbr_in); } /* !tm_nbr_in */ tm_nbr=tm_nbr_in > tm_nbr_out ? tm_nbr_in : tm_nbr_out; /* Sanity checks */ if(grd_sz_in != grd_sz_out || tm_nbr_in != tm_nbr_out) (void)fprintf(stdout,"%s: ERROR %s reports that temporal or horizontal spatial dimensions differ: grd_sz_in = %ld != %ld = grd_sz_out, and/or tm_nbr_in = %ld != %ld = tm_nbr_out\n",nco_prg_nm_get(),fnc_nm,grd_sz_in,grd_sz_out,tm_nbr_in,tm_nbr_out); assert(grd_sz_in == grd_sz_out); assert(tm_nbr_in == tm_nbr_out); ps_in=(double *)nco_malloc_dbg(tm_nbr_in*grd_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() ps_in value buffer"); /* Surface pressure comes from either hybrid vertical grid-files, hybrid data files, or pressure data files that provide surface pressure */ if(flg_grd_in_hyb || (flg_grd_in_prs && ps_id_tpl == NC_MIN_INT)) rcd=nco_get_var(in_id,ps_id,ps_in,crd_typ_out); /* ECMWF distributes IFS forecasts with lnsp = log(surface pressure) */ if(flg_grd_hyb_ecmwf){ /* Convert ECMWF-provided log(surface_pressure) to surface_pressure */ const size_t ps_sz_in=tm_nbr_in*grd_sz_in; /* [nbr] Number of elements in ps_in */ for(size_t idx=0;idx<ps_sz_in;idx++) ps_in[idx]=exp(ps_in[idx]); } /* flg_grd_hyb_ecmwf */ /* Finally have enough information to allocate output pressure grid */ ps_out=(double *)nco_malloc_dbg(tm_nbr_out*grd_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() ps_out value buffer"); /* Get PS from output horizontal grid, if available, otherwise copy from input horizontal grid */ if(ps_id_tpl != NC_MIN_INT){ rcd=nco_get_var(tpl_id,ps_id_tpl,ps_out,crd_typ_out); /* NB: Here we read from tpl_id one last time */ }else{ memcpy(ps_out,ps_in,tm_nbr_in*grd_sz_in*nco_typ_lng(var_typ_rgr)); } /* !ps_id_tpl */ } /* ! */ /* Compare input and output surface pressure fields to determine whether subterranean extrapolation required */ nco_bool flg_add_msv_att; /* [flg] Extrapolation requires _FillValue */ flg_add_msv_att=False; /* Extrapolation type xtr_fll_msv may cause need to create _FillValue attributes */ if(xtr_mth == nco_xtr_fll_msv){ const size_t ps_sz=tm_nbr*grd_sz_in; // [nbr] Size of surface-pressure field double *prs_max_in=NULL; /* [Pa] Maximum midpoint pressure on input grid */ double *prs_max_out=NULL; /* [Pa] Maximum midpoint pressure on output grid */ double *prs_min_in=NULL; /* [Pa] Minimum midpoint pressure on input grid */ double *prs_min_out=NULL; /* [Pa] Minimum midpoint pressure on output grid */ long idx_lev_max; // [idx] Index of midpoint level with greatest pressure long idx_lev_min; // [idx] Index of midpoint level with lowest pressure size_t idx; // [idx] Counting index prs_max_in=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_max_in value buffer"); prs_max_out=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_max_out value buffer"); prs_min_in=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_min_in value buffer"); prs_min_out=(double *)nco_malloc_dbg(ps_sz*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_min_out value buffer"); if(flg_grd_in_hyb){ // fxm: assumes hybrid grid has least/greatest pressure at top/bottom level idx_lev_max=lev_nbr_in-1; idx_lev_min=0L; for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){ idx_fst=tm_idx*grd_sz_in; for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++){ prs_max_in[grd_idx+idx_fst]=p0_in*hyam_in[idx_lev_max]+ps_in[idx_fst+grd_idx]*hybm_in[idx_lev_max]; prs_min_in[grd_idx+idx_fst]=p0_in*hyam_in[idx_lev_min]+ps_in[idx_fst+grd_idx]*hybm_in[idx_lev_min]; } /* !grd_idx */ } /* !tm_idx */ } /* !flg_grd_in_hyb */ if(flg_grd_out_hyb){ // fxm: assumes hybrid grid has least/greatest pressure at top/bottom level idx_lev_max=lev_nbr_out-1; idx_lev_min=0L; for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){ idx_fst=tm_idx*grd_sz_out; for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++){ prs_max_out[grd_idx+idx_fst]=p0_out*hyam_out[idx_lev_max]+ps_out[idx_fst+grd_idx]*hybm_out[idx_lev_max]; prs_min_out[grd_idx+idx_fst]=p0_out*hyam_out[idx_lev_min]+ps_out[idx_fst+grd_idx]*hybm_out[idx_lev_min]; } /* !grd_idx */ } /* !tm_idx */ } /* !flg_grd_out_hyb */ if(flg_grd_in_prs){ double lev_in_max; double lev_in_min; if(lev_in[0] < lev_in[1]) lev_in_max=lev_in[lev_nbr_in-1]; else lev_in_max=lev_in[0]; if(lev_in[0] < lev_in[1]) lev_in_min=lev_in[0]; else lev_in_max=lev_in[lev_nbr_in-1]; for(size_t idx_in=0;idx_in<ps_sz;idx_in++) prs_max_in[idx_in]=lev_in_max; for(size_t idx_in=0;idx_in<ps_sz;idx_in++) prs_min_in[idx_in]=lev_in_min; } /* !flg_grd_in_prs */ if(flg_grd_out_prs){ double lev_out_max; double lev_out_min; if(lev_out[0] < lev_out[1]) lev_out_max=lev_out[lev_nbr_out-1]; else lev_out_max=lev_out[0]; if(lev_out[0] < lev_out[1]) lev_out_min=lev_out[0]; else lev_out_min=lev_out[lev_nbr_out-1]; for(size_t idx_out=0;idx_out<ps_sz;idx_out++) prs_max_out[idx_out]=lev_out_max; for(size_t idx_out=0;idx_out<ps_sz;idx_out++) prs_min_out[idx_out]=lev_out_min; } /* !flg_grd_out_prs */ for(idx=0;idx<ps_sz;idx++) if(prs_max_out[idx] > prs_max_in[idx]) break; if(idx < ps_sz) flg_add_msv_att=True; for(idx=0;idx<ps_sz;idx++) if(prs_min_out[idx] < prs_min_in[idx]) break; if(idx < ps_sz) flg_add_msv_att=True; if(flg_add_msv_att && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports at least one point in at least one output level requires extrapolation (not interpolation). Will ensure that all interpolated fields have _FillValue attribute.\n",nco_prg_nm_get(),fnc_nm); if(prs_max_in) prs_max_in=(double *)nco_free(prs_max_in); if(prs_max_out) prs_max_out=(double *)nco_free(prs_max_out); if(prs_min_in) prs_min_in=(double *)nco_free(prs_min_in); if(prs_min_out) prs_min_out=(double *)nco_free(prs_min_out); } /* !xtr_mth */ /* Lay-out regridded file */ //(void)fprintf(stdout,"%s: DEBUG quark1 dmn_nbr_out = %d, dmn_nbr_ps = %d\n",nco_prg_nm_get(),dmn_nbr_out,dmn_nbr_ps); /* Use explicitly specified output names, if any, otherwise use template names (either explicitly specified or discovered by fuzzing) */ if(rgr->lev_nm_out) lev_nm_out=rgr->lev_nm_out; if(rgr->ilev_nm_out){ if(flg_grd_out_hyb) ilev_nm_out=rgr->ilev_nm_out; if(flg_grd_out_prs) lev_nm_out=rgr->ilev_nm_out; } /* !ilev_nm_out */ if(flg_grd_out_prs){ /* Unless user explicitly specifies output name, use same name as input */ if(!rgr->lev_nm_out) lev_nm_out=(char *)strdup(plev_nm_in); /* Hybrid-sigma/pressure interface variables, if any, must also be output to pure-pressure files on lev grid */ ilev_nm_out=(char *)strdup(lev_nm_out); } /* !flg_grd_out_prs */ /* Define new vertical dimensions before all else */ if(flg_grd_out_hyb){ rcd=nco_def_dim(out_id,ilev_nm_out,ilev_nbr_out,&dmn_id_ilev_out); rcd=nco_def_dim(out_id,lev_nm_out,lev_nbr_out,&dmn_id_lev_out); /* Horizontal dimensions necessary to define PS variable */ for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ if(ps_id_tpl != NC_MIN_INT){ rcd=nco_inq_dimname(tpl_id,dmn_ids_out[dmn_idx],dmn_nm); }else{ rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm); rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_out+dmn_idx); } /* !ps_id_tpl */ if(flg_grd_hyb_cameam) rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx); /* 20190602: ECMWF IFS PS variable has degenerate vertical dimension (lev_2). Avoid re-definition */ if(flg_grd_hyb_ecmwf) if(strcmp(dmn_nm,ilev_nm_out)) if(strcmp(dmn_nm,lev_nm_out)) rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx); } /* !dmn_idx */ } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ rcd=nco_def_dim(out_id,lev_nm_out,lev_nbr_out,&dmn_id_lev_out); } /* !flg_grd_out_prs */ /* Do not extract grid variables (that are also extensive variables) like ilev, lev, hyai, hyam, hybi, hybm */ /* Exception list source: CAM: hyai, hyam, hybi, hybm, ilev, lev, P0, PS EAM: hyai, hyam, hybi, hybm, ilev, lev, P0, PS ECMWF: hyai, hyam, hybi, hybm, lev, lnsp NCEP: plev */ const int var_xcl_lst_nbr=10; /* [nbr] Number of objects on exclusion list */ const char *var_xcl_lst[]={"/hyai","/hyam","/hybi","/hybm","/ilev","/lev","/P0","/plev","/PS","/lnsp"}; int var_cpy_nbr=0; /* [nbr] Number of copied variables */ int var_rgr_nbr=0; /* [nbr] Number of regridded variables */ int var_xcl_nbr=0; /* [nbr] Number of deleted variables */ int var_crt_nbr=0; /* [nbr] Number of created variables */ long idx; /* [idx] Generic index */ unsigned int idx_tbl; /* [idx] Counter for traversal table */ const unsigned int trv_nbr=trv_tbl->nbr; /* [idx] Number of traversal table entries */ for(idx=0;idx<var_xcl_lst_nbr;idx++){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,var_xcl_lst[idx])) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* !idx_tbl */ } /* !idx */ /* 20191001: Do not automatically define plev_nm_in in pressure-grid output files The variable named lev_nm_out in the input data file is always defined in the output file So if plev_nm_in == lev_nm_out it will be defined anyway */ if(flg_grd_in_prs && flg_grd_out_prs && strcmp(plev_nm_in,lev_nm_out)){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm,plev_nm_in)) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* !idx_tbl */ } /* !idx */ char *var_nm; /* [sng] Variable name */ int *dmn_id_in=NULL; /* [id] Dimension IDs */ int *dmn_id_out=NULL; /* [id] Dimension IDs */ int var_id_in; /* [id] Variable ID */ int var_id_out; /* [id] Variable ID */ nc_type var_typ_out; /* [enm] Variable type to write to disk */ nco_bool PCK_ATT_CPY=True; /* [flg] Copy attributes "scale_factor", "add_offset" */ int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; dfl_lvl=rgr->dfl_lvl; fl_out_fmt=rgr->fl_out_fmt; /* Define new coordinates and grid variables in regridded file */ const int dmn_nbr_0D=0; /* [nbr] Rank of 0-D grid variables (scalars) */ const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ //const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ //const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */ //const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */ if(flg_grd_out_hyb){ rcd+=nco_def_var(out_id,"hyai",crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&hyai_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hyai_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"hyam",crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&hyam_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hyam_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"hybi",crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&hybi_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hybi_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"hybm",crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&hybm_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,hybm_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,ilev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_ilev_out,&ilev_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ilev_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&lev_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lev_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,"P0",crd_typ_out,dmn_nbr_0D,(int *)NULL,&p0_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,p0_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; // for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ // rcd=nco_inq_dimname(out_id,dmn_ids_out[dmn_idx],dmn_nm); // (void)fprintf(stdout,"%s: DEBUG quark5 dmn_nbr_out = %d, dmn_nbr_ps = %d, dmn_idx = %d, dmn_ids_out[%d] = %d, dmn_nm = %s\n",nco_prg_nm_get(),dmn_nbr_out,dmn_nbr_ps,dmn_idx,dmn_idx,dmn_ids_out[dmn_idx],dmn_nm); // } /* !dmn_idx */ if(flg_grd_hyb_cameam) rcd+=nco_def_var(out_id,"PS",crd_typ_out,dmn_nbr_ps,dmn_ids_out,&ps_id); if(flg_grd_hyb_ecmwf){ /* Remove degenerate ECMWF vertical dimension so that output PS has dmn_nbr_ps-1 not dmn_nbr_ps dimensions */ int dmn_nbr_out_ecmwf=0; for(dmn_idx=0;dmn_idx<dmn_nbr_ps;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm); if(strcmp(dmn_nm,ilev_nm_out) && strcmp(dmn_nm,lev_nm_out) && strcmp(dmn_nm,"lev_2")) rcd=nco_inq_dimid(out_id,dmn_nm,dmn_ids_out+dmn_nbr_out_ecmwf++); } /* !dmn_idx */ rcd+=nco_def_var(out_id,"PS",crd_typ_out,dmn_nbr_out_ecmwf,dmn_ids_out,&ps_id); } /* !flg_grd_hyb_ecmwf */ if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ps_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; (void)nco_att_cpy(tpl_id,out_id,hyai_id_tpl,hyai_id,PCK_ATT_CPY); (void)nco_att_cpy(tpl_id,out_id,hyam_id_tpl,hyam_id,PCK_ATT_CPY); (void)nco_att_cpy(tpl_id,out_id,hybi_id_tpl,hybi_id,PCK_ATT_CPY); (void)nco_att_cpy(tpl_id,out_id,hybm_id_tpl,hybm_id,PCK_ATT_CPY); if(p0_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,p0_id_tpl,p0_id,PCK_ATT_CPY); /* p0 not expected to be in ECMWF grids */ if(ilev_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,ilev_id_tpl,ilev_id,PCK_ATT_CPY); else if(ilev_id_in != NC_MIN_INT) (void)nco_att_cpy(in_id,out_id,ilev_id_in,ilev_id,PCK_ATT_CPY); if(lev_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,lev_id_tpl,lev_id,PCK_ATT_CPY); else if(lev_id_in != NC_MIN_INT) (void)nco_att_cpy(in_id,out_id,lev_id_in,lev_id,PCK_ATT_CPY); if(ps_id_tpl != NC_MIN_INT) (void)nco_att_cpy(tpl_id,out_id,ps_id_tpl,ps_id,PCK_ATT_CPY); else (void)nco_att_cpy(in_id,out_id,ps_id_in,ps_id,PCK_ATT_CPY); } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ rcd+=nco_def_var(out_id,lev_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lev_out,&lev_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lev_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; (void)nco_att_cpy(tpl_id,out_id,lev_id_tpl,lev_id,PCK_ATT_CPY); dmn_id_ilev_out=dmn_id_lev_out; } /* !flg_grd_out_prs */ /* No further access to template file, close it */ nco_close(tpl_id); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_tpl); char *dmn_nm_cp; /* [sng] Dimension name as char * to reduce indirection */ nco_bool has_ilev; /* [flg] Contains interface level dimension */ nco_bool has_lev; /* [flg] Contains midpoint level dimension */ nco_bool has_tm; /* [flg] Contains time dimension */ nco_bool need_prs_ntf=False; /* [flg] At least one variable to regrid is on interface levels */ nco_bool need_prs_mdp=False; /* [flg] At least one variable to regrid is on midpoint levels */ trv_sct trv; /* [sct] Traversal table object structure to reduce indirection */ /* Define regridding flag for each variable */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ dmn_nbr_in=trv_tbl->lst[idx_tbl].nbr_dmn; has_ilev=False; has_lev=False; for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ /* Pre-determine flags necessary during next loop */ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; /* fxm: Generalize to include any variable containing coordinates with "standard_name" = "atmosphere_hybrid_sigma_pressure_coordinate" */ if(!has_ilev && ilev_nm_in) has_ilev=!strcmp(dmn_nm_cp,ilev_nm_in); if(!has_lev) has_lev=!strcmp(dmn_nm_cp,lev_nm_in); } /* end loop over dimensions */ /* Regrid variables that contain either vertical dimension */ if(has_ilev || has_lev){ trv_tbl->lst[idx_tbl].flg_rgr=True; var_rgr_nbr++; if(has_ilev) need_prs_ntf=True; if(has_lev) need_prs_mdp=True; } /* endif */ assert(!(has_ilev && has_lev)); /* Copy all variables that are not regridded or omitted */ if(!trv_tbl->lst[idx_tbl].flg_rgr) var_cpy_nbr++; } /* end nco_obj_typ_var */ } /* end idx_tbl */ if(!var_rgr_nbr) (void)fprintf(stdout,"%s: WARNING %s reports no variables fit interpolation criteria. The vertical interpolator expects something to interpolate, and variables not interpolated are copied straight to output. HINT: If the name(s) of the input vertical grid dimensions (e.g., ilev and lev) do not match NCO's preset defaults (case-insensitive unambiguous forms and abbreviations of \"ilev\", \"lev\", and/or \"plev\", respectively) then change the dimension names that NCO looks for. Instructions are at http://nco.sf.net/nco.html#regrid. For hybrid-pressure coordinate grids, ensure that the \"ilev\" and \"lev\" variable names are known with, e.g., \"ncks --rgr ilev_nm=interface_level --rgr lev_nm=midpoint_level\" or \"ncremap -R '--rgr ilev=interface_level --rgr lev=midpoint_level'\". For pure pressure grids, ensure the \"plev\" coordinate name is defined with, e.g., \"ncks --rgr plev_nm=pressure_level\" or \"ncremap -R '--rgr plev=pressure_level'\".\n",nco_prg_nm_get(),fnc_nm); if(nco_dbg_lvl_get() >= nco_dbg_fl){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr) (void)fprintf(stderr,"Interpolate %s? %s\n",trv.nm,trv.flg_rgr ? "Yes" : "No"); } /* end idx_tbl */ } /* end dbg */ /* Pre-allocate dimension ID and cnt/srt space */ int dmn_nbr_max; /* [nbr] Maximum number of dimensions variable can have in input or output */ rcd+=nco_inq_ndims(in_id,&dmn_nbr_max); dmn_id_in=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); aed_sct aed_mtd_fll_val; char *att_nm_fll_val=strdup("_FillValue"); int flg_pck; /* [flg] Variable is packed on disk */ nco_bool has_mss_val; /* [flg] Has numeric missing value attribute */ double mss_val_dbl; double mss_val_cmp_dbl; /* Missing value for comparison to double precision values */ float mss_val_flt; if(flg_add_msv_att){ aed_mtd_fll_val.att_nm=att_nm_fll_val; aed_mtd_fll_val.mode=aed_create; aed_mtd_fll_val.sz=1L; mss_val_dbl=NC_FILL_DOUBLE; mss_val_flt=NC_FILL_FLOAT; } /* !flg_add_msv_att */ /* Define interpolated and copied variables in output file */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ var_nm=trv.nm; /* Preserve input type in output type */ var_typ_out=trv.var_typ; dmn_nbr_in=trv.nbr_dmn; dmn_nbr_out=trv.nbr_dmn; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid_flg(out_id,var_nm,&var_id_out); /* If variable has not been defined, define it */ if(rcd != NC_NOERR){ if(trv.flg_rgr){ /* Interpolate */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); rcd=nco_inq_var_packing(in_id,var_id_in,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports variable \"%s\" is packed so results unpredictable. HINT: If regridded values seems weird, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,var_nm); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); if(ilev_nm_in && !strcmp(dmn_nm,ilev_nm_in)){ /* Change ilev dimension */ dmn_id_out[dmn_idx]=dmn_id_ilev_out; dmn_cnt_out[dmn_idx]=ilev_nbr_out; }else if(!strcmp(dmn_nm,lev_nm_in)){ /* Change lev dimension */ dmn_id_out[dmn_idx]=dmn_id_lev_out; dmn_cnt_out[dmn_idx]=lev_nbr_out; }else{ /* Dimensions ilev/lev_nm_in have already been defined as ilev/lev_nm_out, replicate all other dimensions */ rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx); } /* !ilev */ if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_out+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt_out[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_id_out+dmn_idx); } /* !rcd */ } /* !dmn_idx */ }else{ /* !flg_rgr */ /* Replicate non-interpolated variables */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_out+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt_out[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_id_out+dmn_idx); } /* !rcd */ } /* !dmn_idx */ } /* !flg_rgr */ rcd=nco_def_var(out_id,var_nm,var_typ_out,dmn_nbr_out,dmn_id_out,&var_id_out); /* Duplicate netCDF4 settings when possible */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC){ /* Deflation */ if(dmn_nbr_out > 0){ int dfl_lvl_in; /* [enm] Deflate level [0..9] */ rcd=nco_inq_var_deflate(in_id,var_id_in,&shuffle,&deflate,&dfl_lvl_in); /* Copy original deflation settings */ if(deflate || shuffle) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl_in); /* Overwrite HDF Lempel-Ziv compression level, if requested */ if(dfl_lvl == 0) deflate=(int)False; else deflate=(int)True; /* Turn-off shuffle when uncompressing otherwise chunking requests may fail */ if(dfl_lvl == 0) shuffle=NC_NOSHUFFLE; /* Shuffle never, to my knowledge, increases filesize, so shuffle by default when manually deflating */ if(dfl_lvl >= 0) shuffle=NC_SHUFFLE; if(dfl_lvl >= 0) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl); } /* !dmn_nbr_out */ } /* !NC_FORMAT_NETCDF4 */ (void)nco_att_cpy(in_id,out_id,var_id_in,var_id_out,PCK_ATT_CPY); /* Variables with subterranean levels and missing-value extrapolation must have _FillValue attribute */ if(flg_add_msv_att && trv.flg_rgr){ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl); if(!has_mss_val){ nco_bool flg_att_chg; /* [flg] _FillValue attribute was written */ aed_mtd_fll_val.var_nm=var_nm; aed_mtd_fll_val.id=var_id_out; aed_mtd_fll_val.type=var_typ_out; if(var_typ_out == NC_FLOAT) aed_mtd_fll_val.val.fp=&mss_val_flt; else if(var_typ_out == NC_DOUBLE) aed_mtd_fll_val.val.dp=&mss_val_dbl; flg_att_chg=nco_aed_prc(out_id,var_id_out,aed_mtd_fll_val); if(!flg_att_chg && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: WARNING %s reports unsuccessful attempt to create _FillValue attribute for variable %s\n",nco_prg_nm_get(),fnc_nm,var_nm); } /* !has_mss_val */ } /* !flg_add_msv_att */ } /* !rcd */ } /* !var */ } /* !idx_tbl */ /* Free pre-allocated array space */ if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec); /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Begin data mode */ (void)nco_enddef(out_id); /* Copy all grid variables */ if(flg_grd_out_hyb){ (void)nco_put_var(out_id,hyai_id,hyai_out,crd_typ_out); (void)nco_put_var(out_id,hyam_id,hyam_out,crd_typ_out); (void)nco_put_var(out_id,hybi_id,hybi_out,crd_typ_out); (void)nco_put_var(out_id,hybm_id,hybm_out,crd_typ_out); (void)nco_put_var(out_id,ilev_id,ilev_out,crd_typ_out); (void)nco_put_var(out_id,lev_id,lev_out,crd_typ_out); (void)nco_put_var(out_id,p0_id,&p0_out,crd_typ_out); (void)nco_put_var(out_id,ps_id,ps_out,crd_typ_out); } /* !flg_grd_out_hyb */ if(flg_grd_out_prs){ (void)nco_put_var(out_id,lev_id,lev_out,crd_typ_out); } /* !flg_grd_out_prs */ nco_bool flg_ntp_log=True; /* [flg] Interpolate in log(vertical_coordinate) */ if(ntp_mth == nco_ntp_lnr) flg_ntp_log=False; size_t idx_in; /* [idx] Index into 3D input variables */ size_t idx_out; /* [idx] Index into 3D output variables */ size_t var_sz_in; /* [nbr] Number of elements in variable (will be self-multiplied) */ size_t var_sz_out; /* [nbr] Number of elements in variable (will be self-multiplied) */ /* Interpolate or copy variable values */ double *var_val_dbl_in=NULL; double *var_val_dbl_out=NULL; double *prs_ntp_in; /* [Pa] Interpolated pressure array on input grid */ double *prs_ntp_out; /* [Pa] Interpolated pressure array on output grid */ int lvl_idx_in; /* [idx] Level index on input grid */ int lvl_idx_out; /* [idx] Level index on output grid */ int lvl_nbr_in; /* [nbr] Number of levels for current interpolated variable on input grid */ int lvl_nbr_out; /* [nbr] Number of levels for current interpolated variable on output grid */ int thr_idx; /* [idx] Thread index */ size_t grd_nbr=grd_sz_in; /* [nbr] Horizonal grid size */ size_t idx_dbg=rgr->idx_dbg; /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped as shared in parallel clause */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ /* Repeating above documentation for the forgetful: NB: tm_nbr is max(timesteps) in vertical grid definitions, not number of records in either file This implementation interpolates timeseries to/from time-invariant vertical grids in one OpenMP call! */ for(tm_idx=0;tm_idx<tm_nbr;tm_idx++){ /* Index-offset to current surface pressure timeslice */ idx_fst=tm_idx*grd_sz_in; if(need_prs_mdp){ /* Allocated and define midpoint pressures */ if(tm_idx == 0) prs_mdp_in=(double *)nco_malloc_dbg(grd_sz_in*lev_nbr_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_mdp_in value buffer"); if(tm_idx == 0) prs_mdp_out=(double *)nco_malloc_dbg(grd_sz_out*lev_nbr_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_mdp_out value buffer"); if(flg_grd_in_hyb) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_in;lev_idx++) prs_mdp_in[grd_idx+lev_idx*grd_sz_in]=p0_in*hyam_in[lev_idx]+ps_in[idx_fst+grd_idx]*hybm_in[lev_idx]; if(flg_grd_out_hyb) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++) prs_mdp_out[grd_idx+lev_idx*grd_sz_out]=p0_out*hyam_out[lev_idx]+ps_out[idx_fst+grd_idx]*hybm_out[lev_idx]; if(flg_grd_in_prs) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_in;lev_idx++) prs_mdp_in[grd_idx+lev_idx*grd_sz_in]=lev_in[lev_idx]; if(flg_grd_out_prs) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(lev_idx=0;lev_idx<lev_nbr_out;lev_idx++) prs_mdp_out[grd_idx+lev_idx*grd_sz_out]=lev_out[lev_idx]; if(flg_ntp_log){ var_sz_in=grd_sz_in*lev_nbr_in; for(idx_in=0;idx_in<var_sz_in;idx_in++) prs_mdp_in[idx_in]=log(prs_mdp_in[idx_in]); var_sz_out=grd_sz_out*lev_nbr_out; for(idx_out=0;idx_out<var_sz_out;idx_out++) prs_mdp_out[idx_out]=log(prs_mdp_out[idx_out]); } /* !flg_ntp_log */ } /* !need_prs_mdp */ if(need_prs_ntf){ /* Allocate and define interface pressures */ if(tm_idx == 0) prs_ntf_in=(double *)nco_malloc_dbg(grd_sz_in*ilev_nbr_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_ntf_in value buffer"); if(tm_idx == 0) prs_ntf_out=(double *)nco_malloc_dbg(grd_sz_out*ilev_nbr_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() prs_ntf_out value buffer"); if(flg_grd_in_hyb) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_in;ilev_idx++) prs_ntf_in[grd_idx+ilev_idx*grd_sz_in]=p0_in*hyai_in[ilev_idx]+ps_in[idx_fst+grd_idx]*hybi_in[ilev_idx]; if(flg_grd_out_hyb) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++) prs_ntf_out[grd_idx+ilev_idx*grd_sz_out]=p0_out*hyai_out[ilev_idx]+ps_out[idx_fst+grd_idx]*hybi_out[ilev_idx]; if(flg_grd_in_prs) for(grd_idx=0;grd_idx<grd_sz_in;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_in;ilev_idx++) prs_ntf_in[grd_idx+ilev_idx*grd_sz_in]=lev_in[ilev_idx]; if(flg_grd_out_prs) for(grd_idx=0;grd_idx<grd_sz_out;grd_idx++) for(ilev_idx=0;ilev_idx<ilev_nbr_out;ilev_idx++) prs_ntf_out[grd_idx+ilev_idx*grd_sz_out]=lev_out[ilev_idx]; if(flg_ntp_log){ var_sz_in=grd_sz_in*ilev_nbr_in; for(idx_in=0;idx_in<var_sz_in;idx_in++) prs_ntf_in[idx_in]=log(prs_ntf_in[idx_in]); var_sz_out=grd_sz_out*ilev_nbr_out; for(idx_out=0;idx_out<var_sz_out;idx_out++) prs_ntf_out[idx_out]=log(prs_ntf_out[idx_out]); } /* !flg_ntp_log */ } /* !need_prs_ntf */ /* Set firstprivate variables to initial values */ has_ilev=False; has_lev=False; has_tm=False; if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"Interpolation progress: # means interpolated, ~ means copied\n"); #ifdef __GNUG__ # define GCC_LIB_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ ) # if GCC_LIB_VERSION < 490 # define GXX_OLD_OPENMP_SHARED_TREATMENT 1 # endif /* 480 */ # if GCC_LIB_VERSION >= 900 # define GXX_WITH_OPENMP5_GPU_SUPPORT 1 # endif /* 900 */ #endif /* !__GNUC__ */ #if defined( __INTEL_COMPILER) # pragma omp parallel for default(none) firstprivate(has_ilev,has_lev,has_tm,var_val_dbl_in,var_val_dbl_out) private(dmn_cnt_in,dmn_cnt_out,dmn_id_in,dmn_id_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,grd_idx,has_mss_val,idx_in,idx_out,idx_tbl,in_id,lvl_idx_in,lvl_idx_out,lvl_nbr_in,lvl_nbr_out,mss_val_cmp_dbl,mss_val_dbl,prs_ntp_in,prs_ntp_out,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr) shared(dmn_id_ilev_in,dmn_id_ilev_out,dmn_id_lev_in,dmn_id_lev_out,dmn_id_tm_in,flg_ntp_log,flg_vrt_tm,fnc_nm,grd_nbr,idx_dbg,ilev_nbr_in,ilev_nbr_out,lev_nbr_in,lev_nbr_out,out_id,prs_mdp_in,prs_mdp_out,prs_ntf_in,prs_ntf_out,tm_idx,xtr_mth) #else /* !__INTEL_COMPILER */ # ifdef GXX_OLD_OPENMP_SHARED_TREATMENT # pragma omp parallel for default(none) firstprivate(has_ilev,has_lev,has_tm,var_val_dbl_in,var_val_dbl_out) private(dmn_cnt_in,dmn_cnt_out,dmn_id_in,dmn_id_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,grd_idx,has_mss_val,idx_in,idx_out,idx_tbl,in_id,lvl_idx_in,lvl_idx_out,lvl_nbr_in,lvl_nbr_out,mss_val_cmp_dbl,mss_val_dbl,prs_ntp_in,prs_ntp_out,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr) shared(dmn_id_ilev_in,dmn_id_ilev_out,dmn_id_lev_in,dmn_id_lev_out,dmn_id_tm_in,flg_ntp_log,flg_vrt_tm,fnc_nm,grd_nbr,idx_dbg,ilev_nbr_in,ilev_nbr_out,lev_nbr_in,lev_nbr_out,out_id,prs_mdp_in,prs_mdp_out,prs_ntf_in,prs_ntf_out,tm_idx,xtr_mth) # else /* !old g++ */ # if defined(GXX_WITH_OPENMP5_GPU_SUPPORT) && 0 # pragma omp target teams distribute parallel for # else # pragma omp parallel for firstprivate(has_ilev,has_lev,has_tm,var_val_dbl_in,var_val_dbl_out) private(dmn_cnt_in,dmn_cnt_out,dmn_id_in,dmn_id_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,grd_idx,has_mss_val,idx_in,idx_out,idx_tbl,in_id,lvl_idx_in,lvl_idx_out,lvl_nbr_in,lvl_nbr_out,mss_val_cmp_dbl,mss_val_dbl,prs_ntp_in,prs_ntp_out,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr) shared(dmn_id_ilev_in,dmn_id_ilev_out,dmn_id_lev_in,dmn_id_lev_out,dmn_id_tm_in,flg_ntp_log,flg_vrt_tm,grd_nbr,idx_dbg,ilev_nbr_in,ilev_nbr_out,lev_nbr_in,lev_nbr_out,out_id,prs_mdp_in,prs_mdp_out,prs_ntf_in,prs_ntf_out,tm_idx,xtr_mth) # endif /* !GCC > 9.0 */ # endif /* !GCC < 4.9 */ #endif /* !__INTEL_COMPILER */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; thr_idx=omp_get_thread_num(); in_id=trv_tbl->in_id_arr[thr_idx]; #ifdef _OPENMP if(nco_dbg_lvl_get() >= nco_dbg_grp && !thr_idx && !idx_tbl) (void)fprintf(fp_stdout,"%s: INFO %s reports regrid loop uses %d thread%s\n",nco_prg_nm_get(),fnc_nm,omp_get_num_threads(),(omp_get_num_threads() > 1) ? "s" : ""); if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s: INFO thread = %d, idx_tbl = %d, nm = %s\n",nco_prg_nm_get(),thr_idx,idx_tbl,trv.nm); #endif /* !_OPENMP */ if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s%s ",trv.flg_rgr ? "#" : "~",trv.nm); if(trv.flg_rgr){ /* Interpolate variable */ var_nm=trv.nm; if(!strcmp(var_nm,"US") || !strcmp(var_nm,"VS")) (void)fprintf(fp_stdout,"%s: WARNING %s reports attempt to vertically interpolate a variable named \"%s\". If this variable is from a CESM CAM or E3SM EAM output or initial condition file on a rectangular grid (e.g., FV 0.9x1.25), then expect this program to fail and dump core when interpolating US and to produce slightly incorrect answers for VS. The vertical interpolation routine requires that interpolated variables be on the same horizontal grid as the supplied pressure field. However, the CAM/EAM US and VS variables from rectangular grid simulations are often on a horizontal grid, called the staggered grid, that is offset from the rest of the variables including the surface pressure. US usually sits on a grid that is staggered in latitude from, and is a slightly different size than, the surface pressure grid. This leads to a core dump. VS sits on a grid staggered in longitude from, though the same size as, the surface pressure field. The resulting interpolation will be based on surface pressure half a gridcell to the east rather than centered with VS. The correct procedure to vertically interpolate US and VS is to 1) horizontally regrid the supplied surface pressure (often \"PS\") to the staggered grid, then 2) vertically interpolate US and VS to the desired vertical grid based on the surface pressure on the staggered grid, then 3) re-combine the interpolated US and VS with the interpolated versions of the rest of the variables. The best solution to this dilemma is to script this workflow. Contact Charlie if you need help with this.\n",nco_prg_nm_get(),fnc_nm,var_nm); var_typ_rgr=NC_DOUBLE; /* NB: Perform regridding in double precision */ var_typ_out=trv.var_typ; /* NB: Output type in file is same as input type */ var_sz_in=1L; var_sz_out=1L; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid(out_id,var_nm,&var_id_out); rcd=nco_inq_varndims(in_id,var_id_in,&dmn_nbr_in); rcd=nco_inq_varndims(out_id,var_id_out,&dmn_nbr_out); dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out; dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */ dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); rcd=nco_inq_vardimid(out_id,var_id_out,dmn_id_out); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx); if(dmn_id_in[dmn_idx] == dmn_id_ilev_in) has_ilev=True; if(dmn_id_in[dmn_idx] == dmn_id_lev_in) has_lev=True; if(dmn_id_in[dmn_idx] == dmn_id_tm_in) has_tm=True; if(flg_vrt_tm && has_tm && dmn_id_in[dmn_idx] == dmn_id_tm_in){ dmn_cnt_in[dmn_idx]=1L; dmn_srt[dmn_idx]=tm_idx; }else{ dmn_srt[dmn_idx]=0L; } /* !flg_vrt_tm */ var_sz_in*=dmn_cnt_in[dmn_idx]; } /* !dmn_idx */ var_val_dbl_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() input value buffer"); rcd=nco_get_vara(in_id,var_id_in,dmn_srt,dmn_cnt_in,var_val_dbl_in,var_typ_rgr); for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ /* Dimension count vector is same as input except for lvl dimension */ dmn_cnt_out[dmn_idx]=dmn_cnt_in[dmn_idx]; if(has_ilev && dmn_id_out[dmn_idx] == dmn_id_ilev_out) dmn_cnt_out[dmn_idx]=ilev_nbr_out; if(has_lev && dmn_id_out[dmn_idx] == dmn_id_lev_out) dmn_cnt_out[dmn_idx]=lev_nbr_out; var_sz_out*=dmn_cnt_out[dmn_idx]; } /* !dmn_idx */ var_val_dbl_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output value buffer"); /* Missing value setup */ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl); if(has_mss_val) mss_val_cmp_dbl=mss_val_dbl; else mss_val_cmp_dbl=NC_FILL_DOUBLE; if(has_ilev){ /* Interpolate current variable from input interface pressure grid to output interface pressure grid */ lvl_nbr_in=ilev_nbr_in; lvl_nbr_out=ilev_nbr_out; prs_ntp_in=prs_ntf_in; prs_ntp_out=prs_ntf_out; }else{ /* Interpolate current variable from input midpoint pressure grid to output midpoint pressure grid */ lvl_nbr_in=lev_nbr_in; lvl_nbr_out=lev_nbr_out; prs_ntp_in=prs_mdp_in; prs_ntp_out=prs_mdp_out; } /* !ilev */ /* Procedure: Extract input/output coordinate/data arrays into 1D column order This enables actual interpolation code to be written for, or take advantage of, 1D interpolation routines After interpolating into 1D sequential memory, copy back to ND output and repeat */ double *crd_in=NULL; /* Input vertical coordinate (must be monotonic) */ double *crd_out=NULL; /* Output vertical coordinate (must be monotonic) */ double *dat_in=NULL; /* Input data (to be interpolated) on input vertical coordinate grid */ double *dat_out=NULL; /* Output data (interpolated) output vertical coordinate grid (i.e., the answer) */ double *crd_in_mnt; /* Input vertical coordinate reversed if necessary to be monotonically increasing */ double *crd_out_mnt; /* Output vertical coordinate reversed if necessary to be monotonically increasing */ double *dat_in_mnt; /* Input data (to be interpolated) reversed if necessary along with input grid */ double *dat_out_mnt; /* Output data (interpolated) reversed if necessary along with output grid */ nco_xtr_sct xtr_LHS; nco_xtr_sct xtr_RHS; size_t brk_lft_idx; size_t brk_rgt_idx; size_t in_idx; size_t in_nbr; size_t out_nbr; size_t out_idx; /* Default extrapolation uses nearest valid neighbor */ xtr_LHS.xtr_fll=True; xtr_LHS.xtr_vrb=False; xtr_LHS.typ_fll=xtr_mth; xtr_RHS.xtr_fll=True; xtr_RHS.xtr_vrb=False; xtr_RHS.typ_fll=xtr_mth; /* Special-case extrapolation methods allowed for all except missing-value extrapolation types */ if(xtr_mth != nco_xtr_fll_msv){ if(!strcmp(var_nm,"T") || !strcmp(var_nm,"ta")) xtr_RHS.typ_fll=nco_xtr_fll_tpt; else if(!strcmp(var_nm,"Z3") || !strcmp(var_nm,"zg")) xtr_LHS.typ_fll=xtr_RHS.typ_fll=nco_xtr_fll_gph; } /* !xtr_mth */ crd_in=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); crd_out=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); dat_in=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); dat_out=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); in_nbr=lvl_nbr_in; out_nbr=lvl_nbr_out; nco_bool in_ncr; /* [flg] Input coordinate monotonically increases */ nco_bool out_ncr; /* [flg] Output coordinate monotonically increases */ /* Determine monotonicity direction only once, based on first vertical column */ if(prs_ntp_in[grd_nbr]-prs_ntp_in[0] > 0.0) in_ncr=True; else in_ncr=False; out_ncr=True; if(out_nbr > 1) if(prs_ntp_out[grd_nbr]-prs_ntp_out[0] < 0.0) out_ncr=False; /* If necessary, allocate (once, and re-use it) additional memory to hold reversed arrays */ if(!in_ncr){ crd_in_mnt=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); dat_in_mnt=(double *)nco_malloc(lvl_nbr_in*sizeof(double)); } /* !in_ncr */ if(!out_ncr){ crd_out_mnt=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); dat_out_mnt=(double *)nco_malloc(lvl_nbr_out*sizeof(double)); } /* !out_ncr */ /* Constants and parameters for extrapolation */ const double gamma_moist=6.5/10000.0; /* [K/Pa] Temperature extrapolation assumes constant moist adiabatic lower atmosphere lapse rate dT/dp=constant=(6.5 K)/(100 mb) = (6.5 K)/(10000 Pa) */ const double Rd_rcp_g0=287.0/9.81; /* [K/Pa] Geopotential height extrapolation uses hypsometric equation Z2-Z1=(Rd*Tv_avg/g0)*ln(p1/p2)=(Rd*Tv_avg/g0)*(ln(p1)-ln(p2)) */ const double tpt_vrt_avg=288.0; /* [K] Mean virtual temperature assumed for geopotential height extrapolation */ nco_bool FIRST_WARNING_LHS; /* [flg] First warning for LHS extrapolation */ nco_bool FIRST_WARNING_RHS; /* [flg] First warning for RHS extrapolation */ if(tm_idx == 0){ /* Only print extrapolation warnings for first timestep to prevent noisy output NB: Algorithm prevents any warnings for extrapolations that appear after first timestep */ FIRST_WARNING_LHS=True; FIRST_WARNING_RHS=True; } /* !tm_idx */ /* Outer loop over columns */ for(grd_idx=0;grd_idx<grd_nbr;grd_idx++){ /* Initialize pseudo-1D variables with consecutive memory addresses to avoid indirection */ for(lvl_idx_in=0;lvl_idx_in<lvl_nbr_in;lvl_idx_in++){ idx_in=grd_idx+lvl_idx_in*grd_nbr; crd_in[lvl_idx_in]=prs_ntp_in[idx_in]; dat_in[lvl_idx_in]=var_val_dbl_in[idx_in]; } /* !lvl_idx_in */ for(lvl_idx_out=0;lvl_idx_out<lvl_nbr_out;lvl_idx_out++){ idx_out=grd_idx+lvl_idx_out*grd_nbr; crd_out[lvl_idx_out]=prs_ntp_out[idx_out]; } /* !lvl_idx_out */ /* Interpolation code easier to write/debug if crd_in and crd_out both monotonically increase However, monotonically decreasing coordinates useful in many cases, such as depth coordinate, and pressure levels arranged largest to smallest (favored by CMIP) Next code block reverses array(s) if necessary so coordinates monotonically increase Code uses crd_in_mnt, dat_in_mnt, crd_out_mnt where "_mnt" reminds of "monotonically increasing" assumption Following code lifted from CSZ's libcsz.a library source code ~/sw/c++/vec.hh */ if(in_ncr){ crd_in_mnt=crd_in; dat_in_mnt=dat_in; }else{ for(in_idx=0;in_idx<in_nbr;in_idx++){ crd_in_mnt[in_idx]=crd_in[in_nbr-in_idx-1]; dat_in_mnt[in_idx]=dat_in[in_nbr-in_idx-1]; } /* !in_idx */ } /* !in_ncr */ if(out_ncr){ crd_out_mnt=crd_out; dat_out_mnt=dat_out; }else{ for(out_idx=0;out_idx<out_nbr;out_idx++) crd_out_mnt[out_idx]=crd_out[out_nbr-out_idx-1]; } /* !out_ncr */ // Initialize bracketing index brk_lft_idx=0; // Loop over desired output coordinates for(out_idx=0;out_idx<out_nbr;out_idx++){ // Order of conditions is important since second condition is illegal if brk_lft_idx >= in_nbr while((brk_lft_idx < in_nbr) && (crd_in_mnt[brk_lft_idx] < crd_out_mnt[out_idx])){ brk_lft_idx++; } // !while brk_lft_idx--; // Handle identity interpolation separately to preserve symmetry in extrapolation code if(brk_lft_idx != in_nbr-1){ if(crd_in_mnt[brk_lft_idx+1] == crd_out_mnt[out_idx]){ dat_out_mnt[out_idx]=dat_in_mnt[brk_lft_idx+1]; if(brk_lft_idx == -1) brk_lft_idx=0; // Reset brk_lft_idx to 0 so next while loop works continue; // Jump to next iteration } // !crd_in_mnt } // !brk_lft_idx if(brk_lft_idx == -1){ // LHS Extrapolation required // Degenerate case: crd_out_mnt[out_idx] < crd_in_mnt[0] brk_lft_idx=0; // Reset brk_lft_idx to 0 so next while loop works if(xtr_LHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: WARNING %s reports variable %s column %lu output value dat_out_mnt[%lu] at coordinate crd_out_mnt[%lu] = %g requires LHS extrapolation beyond leftmost valid coordinate at crd_in_mnt[%lu] = %g. Nearest valid datum is dat_in_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,var_nm,grd_idx,out_idx,out_idx,crd_out_mnt[out_idx],brk_lft_idx,crd_in_mnt[brk_lft_idx],brk_lft_idx,dat_in_mnt[brk_lft_idx]); // Extrapolation options are presented in decreasing order of preference if(!xtr_LHS.xtr_fll){ (void)fprintf(fp_stdout,"%s: ERROR %s Full LHS extrapolation required but not permitted\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; } /* !xtr_LHS.xtr_fll */ switch(xtr_LHS.typ_fll){ case nco_xtr_fll_nil: dat_out_mnt[out_idx]=0.0; break; case nco_xtr_fll_msv: dat_out_mnt[out_idx]=mss_val_cmp_dbl; break; case nco_xtr_fll_ngh: dat_out_mnt[out_idx]=dat_in_mnt[0]; break; case nco_xtr_fll_lnr: dat_out_mnt[out_idx]=dat_in_mnt[0]- (crd_in_mnt[0]-crd_out_mnt[out_idx])* (dat_in_mnt[1]-dat_in_mnt[0])/(crd_in_mnt[1]-crd_in_mnt[0]); break; case nco_xtr_fll_gph: if(flg_ntp_log) /* Coordinates are already logarithmic in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[0]+ Rd_rcp_g0*tpt_vrt_avg*(crd_in_mnt[0]-crd_out_mnt[out_idx]); else /* Interpolate with logarithm of pressure coordinates */ dat_out_mnt[out_idx]=dat_in_mnt[0]+ Rd_rcp_g0*tpt_vrt_avg*log(crd_in_mnt[0]/crd_out_mnt[out_idx]); if(FIRST_WARNING_LHS) (void)fprintf(fp_stdout,"%s: INFO %s geopotential height extrapolated upward towards space using hypsometric equation with constant global mean virtual temperature = %g for variable %s\n",nco_prg_nm_get(),fnc_nm,tpt_vrt_avg,var_nm); FIRST_WARNING_LHS=False; break; default: (void)fprintf(fp_stdout,"%s: ERROR %s Unknown xtr_LHS.typ_fll\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; break; } // !xtr_LHS.typ_fll if(xtr_LHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: INFO %s LHS extrapolation yields dat_out_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,out_idx,dat_out_mnt[out_idx]); }else if(brk_lft_idx < in_nbr-1){ // Normal case: crd_out_mnt is interpolable brk_rgt_idx=brk_lft_idx+1; // NB: brk_rgt_idx is ALWAYS greater than brk_lft_idx // This simulaneously meets two criteria: // 1. Divide-by-zero errors are impossible in the next step // 2. The identity interpolation is satisfied since crd_dlt == 0.0: // i.e., If crd_out_mnt[idx] == crd_in_mnt[brk_lft_idx] then dat_out_mnt[out_idx] := dat_in_mnt[brk_lft_idx] // Linearly interpolate dat_out_mnt[out_idx]= dat_in_mnt[brk_lft_idx]+ (crd_out_mnt[out_idx]-crd_in_mnt[brk_lft_idx])* (dat_in_mnt[brk_rgt_idx]-dat_in_mnt[brk_lft_idx])/ (crd_in_mnt[brk_rgt_idx]-crd_in_mnt[brk_lft_idx]); }else if(brk_lft_idx == in_nbr-1){ // RHS Extrapolation required // Degenerate case: brk_lft_idx is last element of crd_in_mnt brk_rgt_idx=brk_lft_idx; if(xtr_RHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: WARNING %s reports variable %s column %lu output value dat_out_mnt[%lu] at coordinate crd_out_mnt[%lu] = %g requires RHS extrapolation beyond rightmost valid coordinate at crd_in_mnt[%lu] = %g. Nearest valid datum is dat_in_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,var_nm,grd_idx,out_idx,out_idx,crd_out_mnt[out_idx],brk_rgt_idx,crd_in_mnt[brk_rgt_idx],brk_rgt_idx,dat_in_mnt[brk_rgt_idx]); // Extrapolation options are presented in decreasing order of preference if(!xtr_RHS.xtr_fll){ (void)fprintf(fp_stdout,"%s: ERROR %s Full RHS extrapolation required but not permitted\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; } /* !xtr_RHS.xtr_fll */ switch(xtr_RHS.typ_fll){ case nco_xtr_fll_nil: dat_out_mnt[out_idx]=0.0; break; case nco_xtr_fll_msv: dat_out_mnt[out_idx]=mss_val_cmp_dbl; break; case nco_xtr_fll_ngh: dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]; break; case nco_xtr_fll_lnr: dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+ (crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1])* (dat_in_mnt[in_nbr-1]-dat_in_mnt[in_nbr-2])/ (crd_in_mnt[in_nbr-1]-crd_in_mnt[in_nbr-2]); break; case nco_xtr_fll_tpt: if(flg_ntp_log) /* Exponentiate so coordinates are linear in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+ (exp(crd_out_mnt[out_idx])-exp(crd_in_mnt[in_nbr-1]))*gamma_moist; else /* Coordinates are already linear in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]+ (crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1])*gamma_moist; if(FIRST_WARNING_RHS) (void)fprintf(fp_stdout,"%s: INFO %s temperature extrapolated toward/into surface assuming constant moist adiabatic lapse rate = %g K/(100 mb) for variable %s\n",nco_prg_nm_get(),fnc_nm,gamma_moist*10000.0,var_nm); FIRST_WARNING_RHS=False; break; case nco_xtr_fll_gph: if(flg_ntp_log) /* Coordinates are already logarithmic in pressure */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]- Rd_rcp_g0*tpt_vrt_avg*(crd_out_mnt[out_idx]-crd_in_mnt[in_nbr-1]); else /* Interpolate with logarithm of pressure coordinates */ dat_out_mnt[out_idx]=dat_in_mnt[in_nbr-1]- Rd_rcp_g0*tpt_vrt_avg*log(crd_out_mnt[out_idx]/crd_in_mnt[in_nbr-1]); if(FIRST_WARNING_RHS) (void)fprintf(fp_stdout,"%s: INFO %s geopotential height extrapolated toward/into surface using hypsometric equation with constant global mean virtual temperature = %g for variable %s\n",nco_prg_nm_get(),fnc_nm,tpt_vrt_avg,var_nm); FIRST_WARNING_RHS=False; break; default: (void)fprintf(fp_stdout,"%s: ERROR %s Unknown xtr_RHS\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; break; } // !xtr_RHS.typ_fll if(xtr_RHS.xtr_vrb) (void)fprintf(fp_stdout,"%s: INFO %s RHS extrapolation yields dat_out_mnt[%lu] = %g\n",nco_prg_nm_get(),fnc_nm,out_idx,dat_out_mnt[out_idx]); }else{ (void)fprintf(fp_stdout,"%s: ERROR %s Unforeseen value of brk_lft_idx\n",nco_prg_nm_get(),fnc_nm); // return NCO_ERR; } // !RHS } // !out_idx /* Un-reverse output data to be on original grid */ if(!out_ncr) for(out_idx=0;out_idx<out_nbr;out_idx++) dat_out[out_idx]=dat_out_mnt[out_nbr-out_idx-1]; // End of vec.hh code /* Copy answers into output array */ for(lvl_idx_out=0;lvl_idx_out<lvl_nbr_out;lvl_idx_out++){ idx_out=grd_idx+lvl_idx_out*grd_nbr; var_val_dbl_out[idx_out]=dat_out[lvl_idx_out]; } /* !lvl_idx_out */ if(nco_dbg_lvl_get() >= nco_dbg_io && grd_idx == idx_dbg){ (void)fprintf(fp_stdout,"%s: DEBUG %s variable %s at idx_dbg = %lu\n",nco_prg_nm_get(),fnc_nm,var_nm,idx_dbg); for(out_idx=0;out_idx<out_nbr;out_idx++){ (void)fprintf(fp_stdout,"out_idx = %lu dat_out = %g\n",out_idx,dat_out[out_idx]); } /* !out_idx */ } /* !dbg */ } /* !grd_idx */ if(crd_in) crd_in=(double *)nco_free(crd_in); if(crd_out) crd_out=(double *)nco_free(crd_out); if(dat_in) dat_in=(double *)nco_free(dat_in); if(dat_out) dat_out=(double *)nco_free(dat_out); if(!in_ncr){ if(crd_in_mnt) crd_in_mnt=(double *)nco_free(crd_in_mnt); if(dat_in_mnt) dat_in_mnt=(double *)nco_free(dat_in_mnt); } /* !in_ncr */ if(!out_ncr){ if(crd_out_mnt) crd_out_mnt=(double *)nco_free(crd_out_mnt); if(dat_out_mnt) dat_out_mnt=(double *)nco_free(dat_out_mnt); } /* !out_ncr */ if(nco_typ_ntg(var_typ_out)){ /* 20210407: Round, with rint(), integer fields before sending to netCDF for output Otherwise implicit type conversion will truncate (rather than round) output values This is critical for masks where rounding errors produce near integer values (e.g., 0.999...) that could then be truncated to zero by implicit conversion instead of rounded up to 1. */ for(idx_out=0;idx_out<var_sz_out;idx_out++) if(var_val_dbl_out[idx_out] != mss_val_cmp_dbl) var_val_dbl_out[idx_out]=rint(var_val_dbl_out[idx_out]); } /* !nco_typ_ntg() */ #pragma omp critical { /* begin OpenMP critical */ rcd=nco_put_vara(out_id,var_id_out,dmn_srt,dmn_cnt_out,var_val_dbl_out,var_typ_rgr); } /* end OpenMP critical */ if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); if(var_val_dbl_in) var_val_dbl_in=(double *)nco_free(var_val_dbl_in); if(var_val_dbl_out) var_val_dbl_out=(double *)nco_free(var_val_dbl_out); }else{ /* !trv.flg_rgr */ /* Use standard NCO copy routine for variables that are not regridded 20190511: Copy them only once */ if(tm_idx == 0){ #pragma omp critical { /* begin OpenMP critical */ (void)nco_cpy_var_val(in_id,out_id,(FILE *)NULL,(md5_sct *)NULL,trv.nm,trv_tbl); } /* end OpenMP critical */ } /* !tm_idx */ } /* !flg_rgr */ } /* !xtr */ } /* end (OpenMP parallel for) loop over idx_tbl */ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"\n"); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s completion report: Variables interpolated = %d, copied unmodified = %d, omitted = %d, created = %d\n",nco_prg_nm_get(),fnc_nm,var_rgr_nbr,var_cpy_nbr,var_xcl_nbr,var_crt_nbr); } /* !tm_idx */ if(att_nm_fll_val) att_nm_fll_val=(char *)nco_free(att_nm_fll_val); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in); if(dmn_ids_out) dmn_ids_out=(int *)nco_free(dmn_ids_out); if(ilev_nm_in) ilev_nm_in=(char *)nco_free(ilev_nm_in); if(lev_nm_in) lev_nm_in=(char *)nco_free(lev_nm_in); if(hyai_in) hyai_in=(double *)nco_free(hyai_in); if(hyam_in) hyam_in=(double *)nco_free(hyam_in); if(hybi_in) hybi_in=(double *)nco_free(hybi_in); if(hybm_in) hybm_in=(double *)nco_free(hybm_in); if(ps_in) ps_in=(double *)nco_free(ps_in); if(prs_mdp_in) prs_mdp_in=(double *)nco_free(prs_mdp_in); if(prs_ntf_in) prs_ntf_in=(double *)nco_free(prs_ntf_in); if(hyai_out) hyai_out=(double *)nco_free(hyai_out); if(hyam_out) hyam_out=(double *)nco_free(hyam_out); if(hybi_out) hybi_out=(double *)nco_free(hybi_out); if(hybm_out) hybm_out=(double *)nco_free(hybm_out); if(ilev_out) ilev_out=(double *)nco_free(ilev_out); if(lev_in) lev_in=(double *)nco_free(lev_in); if(lev_out) lev_out=(double *)nco_free(lev_out); if(ps_out) ps_out=(double *)nco_free(ps_out); if(prs_mdp_out) prs_mdp_out=(double *)nco_free(prs_mdp_out); if(prs_ntf_out) prs_ntf_out=(double *)nco_free(prs_ntf_out); return rcd; } /* !nco_ntp_vrt() */ int /* O [enm] Return code */ nco_rgr_wgt /* [fnc] Regrid with external weights */ (rgr_sct * const rgr, /* I/O [sct] Regridding structure */ trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */ { /* Purpose: Regrid fields using external weights contained in a mapfile Examine ESMF, SCRIP, Tempest map-files: ncks --cdl -M -m ${DATA}/scrip/rmp_T42_to_POP43_conserv.nc | m ncks --cdl -M -m ${DATA}/maps/map_t42_to_fv129x256_aave.20150621.nc | m ncks --cdl -M -m ${DATA}/maps/map_ne30np4_to_ne120np4_tps.20150618.nc | m Test ESMF, SCRIP, Tempest map-files: ncks -D 5 -O --map=${DATA}/scrip/rmp_T42_to_POP43_conserv.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc ncks -D 5 -O --map=${DATA}/maps/map_t42_to_fv129x256_aave.20150621.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc ncks -D 5 -O --map=${DATA}/maps/map_ne30np4_to_ne120np4_tps.20150618.nc ${DATA}/ne30/rgr/ne30_1D.nc ~/foo.nc Mapfile formats ESMF, GRIDSPEC, SCRIP, and UGRID described here: http://www.earthsystemmodeling.org/esmf_releases/public/ESMF_6_3_0rp1/ESMF_refdoc/node3.html#sec:fileformat:scrip Conventions: grid_size: Number of gridcells (product of lat*lon) address: Source and destination index for each link pair num_links: Number of unique address pairs in remapping, i.e., size of sparse matrix num_wgts: Number of weights per vertice for given remapping (we only handle num_wgts == 1 below) = 1 Bilinear Destination grid value determined by weights times known source grid values at vertices of source quadrilateral that bounds destination point P One weight per vertice guarantees fxm but is not conservative Bilinear requires logically rectangular grid = 1 Distance-based: Distance-weighted uses values at num_neighbors points The weight is inversely proportional to the angular distance from the destination point to each neighbor on the source grid = 3 Second-order conservative: Described in Jones, P. W. (1999), Monthly Weather Review, 127, 2204-2210 First-order conservative schemes assume fluxes are constant within gridcell Destination fluxes are simple summations of sources fluxes weighted by overlap areas Old clm and bds remappers use a first-order algorithm Second-order improves this by using a first-order Taylor expansion of flux Source flux is centroid value plus directional offset determined by dot product of directional gradient and vector pointing from vertice to centroid. Three weights per vertice are centroid weight, weight times local theta-gradient from centroid to vertice, and weight times local phi-gradient from centroid to vertice. = 4 Bicubic: The four weights are gradients in each direction plus a cross-gradient term Same principle as bilinear, but more weights per vertice Bicubic requires logically rectangular grid wgt: Maximum number of source cells contributing to destination cell is not a dimension in SCRIP remapping files because SCRIP stores everying in 1-D sparse matrix arrays Definition of sparse matrix formulations and normalization terminology, SCRIP manual p. 8, 13, 16: for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ // Remap source function f = 1 in all unmasked source gridcells, zero elsewhere, to function F on destination grid // Normalization: fractional area (fracarea) (F = 1 where destination overlaps umasked source grid) dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0]; // Normalization: destination area (destarea) (weights in each destination cell sum to its area frcation) dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0]/dst_area[ddr_dst[lnk_idx]]; // Normalization: none (F = angular area that participates in remapping) dst[ddr_dst[lnk_idx]]+=src[ddr_src[lnk_idx]]*remap_matrix[lnk_idx,0]/(dst_area[ddr_dst[lnk_idx]]*dst_frc[ddr_dst[lnk_idx]); } // end loop over lnk Documentation: NCL special cases described in popRemap.ncl, e.g., at https://github.com/yyr/ncl/blob/master/ni/src/examples/gsun/popRemap.ncl ESMF Regridding Status: https://www.earthsystemcog.org/projects/esmf Sample regrid T42->POP43, SCRIP: ncks -O --map=${DATA}/scrip/rmp_T42_to_POP43_conserv.nc ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc */ const char fnc_nm[]="nco_rgr_wgt()"; /* [sng] Function name */ char *fl_in; char *fl_pth_lcl=NULL; const double rdn2dgr=180.0/M_PI; const double dgr2rdn=M_PI/180.0; const double eps_rlt=1.0e-14; /* [frc] Round-off error tolerance */ double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */ double area_out_ttl=0.0; /* [frc] Exact sum of area */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int in_id; /* I [id] Input netCDF file ID */ int md_open; /* [enm] Mode flag for nc_open() call */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int dmn_idx; /* [idx] Dimension index */ int dst_grid_corners_id; /* [id] Destination grid corners dimension ID */ int dst_grid_rank_id; /* [id] Destination grid rank dimension ID */ int dst_grid_size_id; /* [id] Destination grid size dimension ID */ int num_links_id; /* [id] Number of links dimension ID */ int num_wgts_id=NC_MIN_INT; /* [id] Number of weights dimension ID */ int src_grid_corners_id; /* [id] Source grid corners dimension ID */ int src_grid_rank_id; /* [id] Source grid rank dimension ID */ int src_grid_size_id; /* [id] Source grid size dimension ID */ long int lat_idx; long int lon_idx; short int bnd_idx; nco_bool FL_RTR_RMT_LCN; nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool flg_dgn_area_out=False; /* [flg] Diagnose area_out from grid boundaries */ nco_bool flg_bnd_1D_usable=False; /* [flg] Usable 1D cell vertices exist */ nco_bool flg_stg=rgr->flg_stg; /* [flg] Write staggered grid with FV output */ nco_grd_2D_typ_enm nco_grd_2D_typ=nco_grd_2D_nil; /* [enm] Two-dimensional grid-type enum */ nco_grd_lat_typ_enm nco_grd_lat_typ=nco_grd_lat_nil; /* [enm] Latitude grid-type enum */ nco_grd_lon_typ_enm nco_grd_lon_typ=nco_grd_lon_nil; /* [enm] Longitude grid-type enum */ nco_mpf_sct mpf; size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s obtaining mapping weights from %s\n",nco_prg_nm_get(),fnc_nm,rgr->fl_map); /* Duplicate (because nco_fl_mk_lcl() free()'s fl_in) */ fl_in=(char *)strdup(rgr->fl_map); /* Make sure file is on local system and is readable or die trying */ fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id); /* Identify mapping file type using string generated by weight-generator: ESMF: title = "ESMF Offline Regridding Weight Generator" ESMF_weight_only: title = "ESMF Regrid Weight Generator" NCO: Title = "netCDF Operators (NCO) Offline Regridding Weight Generator" MBTR: Title = "MOAB-TempestRemap Online Regridding Weight Generator" SCRIP: conventions = "SCRIP" Tempest: Title = "TempestRemap Offline Regridding Weight Generator" */ char *att_val; char *att_cnv_val=NULL; char *att_gnr_val=NULL; char *att_ttl_val=NULL; char *cnv_sng=NULL; /* netCDF standard is uppercase Conventions, though some models user lowercase */ char att_sng_Cnv[]="Conventions"; /* [sng] Unidata standard string (uppercase) */ char att_sng_cnv[]="conventions"; /* [sng] Unidata non-standard string (lowercase) */ char att_sng_gnr[]="weight_generator"; /* [sng] CMIP6 standard string */ char att_sng_Ttl[]="Title"; /* [sng] MBTR, NCO, and Tempest use "Title" attribute. MBTR and Tempest do not use "Conventions" */ char att_sng_ttl[]="title"; /* [sng] ERWG 7.1 weight_only uses "title" not "Conventions" attribute */ char name0_sng[]="name0"; /* [sng] Attribute where Tempest stores least-rapidly-varying dimension name */ nco_rgr_mpf_typ_enm nco_rgr_mpf_typ=nco_rgr_mpf_nil; /* [enm] Type of remapping file */ nco_rgr_typ_enm nco_rgr_typ=nco_rgr_grd_nil; /* [enm] Type of grid conversion */ /* Look for map-type signature in [cC]onventions or [tT]itle attribute */ att_cnv_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_cnv); if(!att_cnv_val) att_cnv_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_Cnv); att_gnr_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_gnr); att_ttl_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_ttl); if(!att_ttl_val) att_ttl_val=nco_char_att_get(in_id,NC_GLOBAL,att_sng_Ttl); /* Either "[cC]onventions" or "[tT]itle" attribute determines map-file type... */ if(att_cnv_val && strstr(att_cnv_val,"SCRIP")) nco_rgr_mpf_typ=nco_rgr_mpf_SCRIP; if(nco_rgr_mpf_typ == nco_rgr_mpf_nil && att_ttl_val){ if(strstr(att_ttl_val,"ESMF Offline Regridding Weight Generator")) nco_rgr_mpf_typ=nco_rgr_mpf_ESMF; else if(strstr(att_ttl_val,"netCDF Operators")) nco_rgr_mpf_typ=nco_rgr_mpf_NCO; else if(strstr(att_ttl_val,"MOAB-TempestRemap")) nco_rgr_mpf_typ=nco_rgr_mpf_MBTR; else if(strstr(att_ttl_val,"Tempest")) nco_rgr_mpf_typ=nco_rgr_mpf_Tempest; else if(strstr(att_ttl_val,"ESMF Regrid Weight Generator")) nco_rgr_mpf_typ=nco_rgr_mpf_ESMF_weight_only; } /* !att_ttl_val */ if(nco_rgr_mpf_typ == nco_rgr_mpf_nil && att_cnv_val){ if(strstr(att_cnv_val,"NCO")) nco_rgr_mpf_typ=nco_rgr_mpf_NCO; } /* !att_gnr_val */ if(nco_rgr_mpf_typ == nco_rgr_mpf_nil && att_gnr_val){ if(strstr(att_gnr_val,"NCO")) nco_rgr_mpf_typ=nco_rgr_mpf_NCO; } /* !att_gnr_val */ if(nco_rgr_mpf_typ == nco_rgr_mpf_nil){ (void)fprintf(stderr,"%s: WARNING %s unable to discern map-file type from global attributes \"[cC]onventions\" = \"%s\" and/or \"[tT]itle\" = \"%s\" and/or \"weight_generator\" = \"%s\"\n",nco_prg_nm_get(),fnc_nm,att_cnv_val ? att_cnv_val : "",att_ttl_val ? att_ttl_val : "",att_gnr_val ? att_gnr_val : ""); nco_rgr_mpf_typ=nco_rgr_mpf_unknown; } /* !nco_rgr_mpf_typ */ if(att_cnv_val) att_cnv_val=(char *)nco_free(att_cnv_val); if(att_gnr_val) att_gnr_val=(char *)nco_free(att_gnr_val); if(att_ttl_val) att_ttl_val=(char *)nco_free(att_ttl_val); switch(nco_rgr_mpf_typ){ case nco_rgr_mpf_SCRIP: rcd+=nco_inq_dimid(in_id,"src_grid_size",&src_grid_size_id); rcd+=nco_inq_dimid(in_id,"dst_grid_size",&dst_grid_size_id); rcd+=nco_inq_dimid(in_id,"src_grid_corners",&src_grid_corners_id); rcd+=nco_inq_dimid(in_id,"dst_grid_corners",&dst_grid_corners_id); rcd+=nco_inq_dimid(in_id,"src_grid_rank",&src_grid_rank_id); rcd+=nco_inq_dimid(in_id,"dst_grid_rank",&dst_grid_rank_id); rcd+=nco_inq_dimid(in_id,"num_links",&num_links_id); rcd+=nco_inq_dimid(in_id,"num_wgts",&num_wgts_id); break; case nco_rgr_mpf_ESMF_weight_only: rcd+=nco_inq_dimid(in_id,"n_s",&num_links_id); break; case nco_rgr_mpf_ESMF: case nco_rgr_mpf_MBTR: case nco_rgr_mpf_NCO: case nco_rgr_mpf_Tempest: case nco_rgr_mpf_unknown: rcd+=nco_inq_dimid(in_id,"n_a",&src_grid_size_id); rcd+=nco_inq_dimid(in_id,"n_b",&dst_grid_size_id); rcd+=nco_inq_dimid(in_id,"nv_a",&src_grid_corners_id); rcd+=nco_inq_dimid(in_id,"nv_b",&dst_grid_corners_id); rcd+=nco_inq_dimid(in_id,"src_grid_rank",&src_grid_rank_id); rcd+=nco_inq_dimid(in_id,"dst_grid_rank",&dst_grid_rank_id); if(nco_rgr_mpf_typ != nco_rgr_mpf_Tempest){ rcd+=nco_inq_dimid_flg(in_id,"num_wgts",&num_wgts_id); if(rcd != NC_NOERR){ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s reports map-file does not contain \"num_wgts\" dimension. ERWG always produces this as an orphan dimension, so post-processing could have removed it without harming other map-file fields. No harm, no foul.\n",nco_prg_nm_get(),fnc_nm); rcd=NC_NOERR; } /* !rcd */ } /* !nco_rgr_mpf_Tempest */ rcd+=nco_inq_dimid(in_id,"n_s",&num_links_id); break; default: (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unknown map-file type\n",nco_prg_nm_get(),fnc_nm); nco_dfl_case_generic_err(); /* NB: This return never executes because nco_dfl_case_generic_err() calls exit() Return placed here to suppress clang -Wsometimes-uninitialized warnings This is done many other times throughout the code, though explained only once, here */ return NCO_ERR; break; } /* end switch */ /* Use dimension IDs to get dimension sizes */ rcd+=nco_inq_dimlen(in_id,num_links_id,&mpf.num_links); if(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only){ rcd+=nco_inq_dimlen(in_id,src_grid_size_id,&mpf.src_grid_size); rcd+=nco_inq_dimlen(in_id,dst_grid_size_id,&mpf.dst_grid_size); rcd+=nco_inq_dimlen(in_id,src_grid_corners_id,&mpf.src_grid_corners); rcd+=nco_inq_dimlen(in_id,dst_grid_corners_id,&mpf.dst_grid_corners); rcd+=nco_inq_dimlen(in_id,src_grid_rank_id,&mpf.src_grid_rank); rcd+=nco_inq_dimlen(in_id,dst_grid_rank_id,&mpf.dst_grid_rank); /* TempestRemap does not generate num_wgts */ if(nco_rgr_mpf_typ == nco_rgr_mpf_MBTR || nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || num_wgts_id == NC_MIN_INT){ mpf.num_wgts=int_CEWI; }else{ rcd+=nco_inq_dimlen(in_id,num_wgts_id,&mpf.num_wgts); } /* !num_wgts_id */ assert(mpf.src_grid_size < INT_MAX && mpf.dst_grid_size < INT_MAX); }else{ mpf.src_grid_size=long_CEWI; mpf.dst_grid_size=long_CEWI; mpf.src_grid_corners=long_CEWI; mpf.dst_grid_corners=long_CEWI; mpf.src_grid_rank=long_CEWI; mpf.dst_grid_rank=long_CEWI; mpf.num_wgts=int_CEWI; } /* !ESMF_weight_only */ cnv_sng=strdup("normalization"); nco_rgr_nrm_typ_enm nco_rgr_nrm_typ=nco_rgr_nrm_nil; att_val=nco_char_att_get(in_id,NC_GLOBAL,cnv_sng); if(att_val){ if(strstr(att_val,"fracarea")) nco_rgr_nrm_typ=nco_rgr_nrm_fracarea; /* 20190912: map_gx1v6T_to_1x1_bilin.nc and map_0.1T_tripole_to_0.1x0.1_bilin.nc store "fracarea" in normalization attribute. I think NCAR created both maps for POP, probably by running ERWG with option --norm_type=fracarea. Hence "fracarea" seems to be the NCAR-way of guaranteeing that ESMF re-normalization is not performed by default. */ if(strstr(att_val,"destarea")) nco_rgr_nrm_typ=nco_rgr_nrm_destarea; /* ESMF conserve "aave" and bilinear "bilin" generate "destarea" by default */ if(strstr(att_val,"none")) nco_rgr_nrm_typ=nco_rgr_nrm_none; if(att_val) att_val=(char *)nco_free(att_val); }else{ /* 20150712: Tempest does not store a normalization attribute 20170620: ESMF weight_only does not store a normalization attribute 20190312: NCO does not yet store a normalization attribute */ if(nco_rgr_mpf_typ == nco_rgr_mpf_MBTR || nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || nco_rgr_mpf_typ == nco_rgr_mpf_NCO || nco_rgr_mpf_typ == nco_rgr_mpf_unknown || nco_rgr_mpf_typ == nco_rgr_mpf_ESMF_weight_only) nco_rgr_nrm_typ=nco_rgr_nrm_unknown; } /* endif normalization */ assert(nco_rgr_nrm_typ != nco_rgr_nrm_nil); if(cnv_sng) cnv_sng=(char *)nco_free(cnv_sng); cnv_sng=strdup("map_method"); nco_rgr_mth_typ_enm nco_rgr_mth_typ=nco_rgr_mth_nil; att_val=nco_char_att_get(in_id,NC_GLOBAL,cnv_sng); if(att_val){ if(strcasestr(att_val,"Conservative")) nco_rgr_mth_typ=nco_rgr_mth_conservative; if(strcasestr(att_val,"Bilinear")) nco_rgr_mth_typ=nco_rgr_mth_bilinear; if(strcasestr(att_val,"none")) nco_rgr_mth_typ=nco_rgr_mth_none; if(att_val) att_val=(char *)nco_free(att_val); }else{ /* Tempest does not store a map_method attribute */ if(nco_rgr_mpf_typ == nco_rgr_mpf_MBTR || nco_rgr_mpf_typ == nco_rgr_mpf_NCO || nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || nco_rgr_mpf_typ == nco_rgr_mpf_unknown) nco_rgr_mth_typ=nco_rgr_mth_unknown; } /* endif */ if(nco_rgr_mth_typ == nco_rgr_mth_nil) (void)fprintf(stdout,"%s: WARNING %s reports map global attribute %s = %s does not match SCRIP/ESMF conventions that support only values of \"Conservative\" and \"Bilinear\" for this attribute. Proceeding anyway...\n",nco_prg_nm_get(),fnc_nm,cnv_sng,att_val); if(cnv_sng) cnv_sng=(char *)nco_free(cnv_sng); if(nco_dbg_lvl_get() >= nco_dbg_scl){ (void)fprintf(stderr,"%s: INFO %s regridding input metadata and grid sizes: ",nco_prg_nm_get(),fnc_nm); (void)fprintf(stderr,"mapfile_generator = %s, map_method = %s, normalization = %s, src_grid_size = n_a = %li, dst_grid_size = n_b = %li, src_grid_corners = nv_a = %li, dst_grid_corners = nv_b = %li, src_grid_rank = %li, dst_grid_rank = %li, num_links = n_s = %li, num_wgts = %li\n",nco_rgr_mpf_sng(nco_rgr_mpf_typ),nco_rgr_mth_sng(nco_rgr_mth_typ),nco_rgr_nrm_sng(nco_rgr_nrm_typ),mpf.src_grid_size,mpf.dst_grid_size,mpf.src_grid_corners,mpf.dst_grid_corners,mpf.src_grid_rank,mpf.dst_grid_rank,mpf.num_links,mpf.num_wgts); } /* endif dbg */ /* 20190726: Allow normalization type to be "none" for bilinear regridding which UKMO SCRIP files set to "none"*/ if(nco_rgr_mth_typ == nco_rgr_mth_conservative && nco_rgr_nrm_typ == nco_rgr_nrm_none){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports requested normalization type = %s is not yet supported. Specifically, masks specified by a mask variable (dst_grid_imask,mask_b) are ignored. More specifically, any destination mask information is assumed to be built into the weight array so that no source points will contribute to masked locations. Talk to Charlie if you want this changed.\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ)); nco_exit(EXIT_FAILURE); } /* !msk */ /* Got to here in bullet-proofing code for weight-only map-files */ if(nco_rgr_mpf_typ == nco_rgr_mpf_ESMF_weight_only) (void)fprintf(stderr,"%s: WARNING %s reached end of ESMF_weight_only section\n",nco_prg_nm_get(),fnc_nm); assert(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only); /* Set type of grid conversion */ if(mpf.src_grid_rank == 1 && mpf.dst_grid_rank == 1) nco_rgr_typ=nco_rgr_grd_1D_to_1D; if(mpf.src_grid_rank == 1 && mpf.dst_grid_rank == 2) nco_rgr_typ=nco_rgr_grd_1D_to_2D; if(mpf.src_grid_rank == 2 && mpf.dst_grid_rank == 1) nco_rgr_typ=nco_rgr_grd_2D_to_1D; if(mpf.src_grid_rank == 2 && mpf.dst_grid_rank == 2) nco_rgr_typ=nco_rgr_grd_2D_to_2D; assert(nco_rgr_typ != nco_rgr_grd_nil); /* Save typing later */ nco_bool flg_grd_in_1D_dat_in_2D=False; nco_bool flg_grd_in_1D=False; nco_bool flg_grd_in_2D=False; nco_bool flg_grd_out_1D=False; nco_bool flg_grd_out_2D=False; if(nco_rgr_typ == nco_rgr_grd_1D_to_1D || nco_rgr_typ == nco_rgr_grd_1D_to_2D) flg_grd_in_1D=True; if(nco_rgr_typ == nco_rgr_grd_2D_to_1D || nco_rgr_typ == nco_rgr_grd_2D_to_2D) flg_grd_in_2D=True; if(nco_rgr_typ == nco_rgr_grd_1D_to_1D || nco_rgr_typ == nco_rgr_grd_2D_to_1D) flg_grd_out_1D=True; if(nco_rgr_typ == nco_rgr_grd_1D_to_2D || nco_rgr_typ == nco_rgr_grd_2D_to_2D) flg_grd_out_2D=True; int dmn_nbr_hrz_crd; /* [nbr] Number of horizontal dimensions in output grid */ if(flg_grd_out_2D) dmn_nbr_hrz_crd=2; else dmn_nbr_hrz_crd=1; /* Obtain grid values necessary to compute output latitude and longitude coordinates */ int area_dst_id; /* [id] Area variable ID */ int col_src_adr_id; /* [id] Source address (col) variable ID */ int dmn_sz_in_int_id; /* [id] Source grid dimension sizes ID */ int dmn_sz_out_int_id; /* [id] Destination grid dimension sizes ID */ int dst_grd_crn_lat_id; /* [id] Destination grid corner latitudes variable ID */ int dst_grd_crn_lon_id; /* [id] Destination grid corner longitudes variable ID */ int dst_grd_ctr_lat_id; /* [id] Destination grid center latitudes variable ID */ int dst_grd_ctr_lon_id; /* [id] Destination grid center longitudes variable ID */ int frc_dst_id; /* [id] Fraction variable ID */ int msk_dst_id=NC_MIN_INT; /* [id] Mask variable ID */ int row_dst_adr_id; /* [id] Destination address (row) variable ID */ int wgt_raw_id; /* [id] Remap matrix variable ID */ switch(nco_rgr_mpf_typ){ /* Obtain fields whose name depends on mapfile type */ case nco_rgr_mpf_SCRIP: rcd+=nco_inq_varid(in_id,"dst_grid_area",&area_dst_id); /* ESMF: area_b */ rcd+=nco_inq_varid(in_id,"dst_grid_center_lon",&dst_grd_ctr_lon_id); /* ESMF: xc_b */ rcd+=nco_inq_varid(in_id,"dst_grid_center_lat",&dst_grd_ctr_lat_id); /* ESMF: yc_b */ rcd+=nco_inq_varid(in_id,"dst_grid_corner_lon",&dst_grd_crn_lon_id); /* ESMF: xv_b */ rcd+=nco_inq_varid(in_id,"dst_grid_corner_lat",&dst_grd_crn_lat_id); /* ESMF: yv_b */ rcd+=nco_inq_varid(in_id,"dst_grid_frac",&frc_dst_id); /* ESMF: frac_b */ rcd+=nco_inq_varid(in_id,"dst_address",&row_dst_adr_id); /* ESMF: row */ rcd+=nco_inq_varid(in_id,"src_address",&col_src_adr_id); /* ESMF: col */ rcd+=nco_inq_varid(in_id,"remap_matrix",&wgt_raw_id); /* NB: remap_matrix[num_links,num_wgts] != S[n_s] */ break; case nco_rgr_mpf_ESMF: case nco_rgr_mpf_ESMF_weight_only: case nco_rgr_mpf_MBTR: case nco_rgr_mpf_NCO: case nco_rgr_mpf_Tempest: case nco_rgr_mpf_unknown: if(nco_rgr_mpf_typ != nco_rgr_mpf_ESMF_weight_only){ rcd+=nco_inq_varid(in_id,"area_b",&area_dst_id); /* SCRIP: dst_grid_area */ rcd+=nco_inq_varid(in_id,"xc_b",&dst_grd_ctr_lon_id); /* SCRIP: dst_grid_center_lon */ rcd+=nco_inq_varid(in_id,"yc_b",&dst_grd_ctr_lat_id); /* SCRIP: dst_grid_center_lat */ rcd+=nco_inq_varid(in_id,"xv_b",&dst_grd_crn_lon_id); /* SCRIP: dst_grid_corner_lon */ rcd+=nco_inq_varid(in_id,"yv_b",&dst_grd_crn_lat_id); /* SCRIP: dst_grid_corner_lat */ rcd+=nco_inq_varid(in_id,"frac_b",&frc_dst_id); /* SCRIP: dst_grid_frac */ } /* !nco_rgr_mpf_ESMF_weight_only */ rcd+=nco_inq_varid(in_id,"row",&row_dst_adr_id); /* SCRIP: dst_address */ rcd+=nco_inq_varid(in_id,"col",&col_src_adr_id); /* SCRIP: src_address */ rcd+=nco_inq_varid(in_id,"S",&wgt_raw_id); /* NB: remap_matrix[num_links,num_wgts] != S[n_s] */ break; default: (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unknown map file type\n",nco_prg_nm_get(),fnc_nm); nco_dfl_case_generic_err(); /* NB: This return never executes because nco_dfl_case_generic_err() calls exit() Return placed here to suppress clang -Wsometimes-uninitialized warnings This is done many other times throughout the code, though explained only once, here */ return NCO_ERR; break; } /* end switch */ /* Obtain fields whose presence depends on mapfile type */ nco_bool flg_msk_out=rgr->flg_msk_out; /* [flg] Add mask to output */ nco_bool flg_msk_apl=rgr->flg_msk_apl; /* [flg] Apply msk_out to variables after regridding */ msk_dst_id=NC_MIN_INT; if(flg_msk_out || flg_msk_apl){ switch(nco_rgr_mpf_typ){ case nco_rgr_mpf_SCRIP: rcd=nco_inq_varid_flg(in_id,"dst_grid_imask",&msk_dst_id); /* ESMF: mask_b */ break; case nco_rgr_mpf_ESMF: case nco_rgr_mpf_MBTR: case nco_rgr_mpf_NCO: case nco_rgr_mpf_Tempest: case nco_rgr_mpf_unknown: /* 20190315: TempestRemap did not propagate mask_a/b until ~201902 20210519: MBTR did not propagate mask_a/b as of ~202105 */ rcd=nco_inq_varid_flg(in_id,"mask_b",&msk_dst_id); /* SCRIP: dst_grid_imask */ break; default: (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unknown map-file type\n",nco_prg_nm_get(),fnc_nm); nco_dfl_case_generic_err(); } /* !nco_rgr_mpf_typ */ if(rcd == NC_ENOTVAR){ if(flg_msk_apl){ (void)fprintf(stderr,"%s: ERROR %s reports that user requested (with --mask_apply) the regridder to apply the destination mask field to variables after regridding. Unfortunately, the map-file lacks a destination mask of the expected name (usually \"mask_b\").\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* flg_msk_apl */ (void)fprintf(stderr,"%s: INFO %s reports map-file lacks mask_b. %sContinuing anyway without masks...\n",nco_prg_nm_get(),fnc_nm,(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest || nco_rgr_mpf_typ == nco_rgr_mpf_MBTR) ? "Probably this is either a TempestRemap map-file created before ~201902 when TR began to propagate mask_a/b variables, or it is a MOAB-TempestRemap file which has never (as of 202105) propagated mask_a/b variables" : ""); rcd=NC_NOERR; } /* !rcd */ if(msk_dst_id == NC_MIN_INT) flg_msk_out=False; } /* !flg_msk_out */ /* Obtain fields whose names are independent of mapfile type */ rcd+=nco_inq_varid(in_id,"src_grid_dims",&dmn_sz_in_int_id); rcd+=nco_inq_varid(in_id,"dst_grid_dims",&dmn_sz_out_int_id); int lon_psn_src; /* [idx] Ordinal position of longitude in rectangular source grid dimension-size array */ int lat_psn_src; /* [idx] Ordinal position of latitude in rectangular source grid dimension-size array */ int lon_psn_dst=int_CEWI; /* [idx] Ordinal position of longitude in rectangular destination grid dimension-size array */ int lat_psn_dst=int_CEWI; /* [idx] Ordinal position of latitude in rectangular destination grid dimension-size array */ if(flg_grd_in_2D){ lon_psn_src=0; /* SCRIP introduced [lon,lat] convention because more natural for Fortran */ lat_psn_src=1; if(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest){ /* Until 20150814, Tempest stored [src/dst]_grid_dims as [lat,lon] unlike SCRIP's [lon,lat] order Newer behavior follows SCRIP [lon,lat] order Challenge: Support both older and newer Tempest mapfiles Tempest (unlike SCRIP and ESMF) annotates mapfile [src/dst]_grid_dims with attributes that identify axis to which each element of [src/dst]_grid_dims refers Solution: Use Tempest mapfile [src/dst]_grid_dims attributes "name0" and/or "name1" to determine if axes' positions follow old order */ att_val=nco_char_att_get(in_id,dmn_sz_in_int_id,name0_sng); if(att_val){ if(strstr(att_val,"lat")){ lon_psn_src=1; lat_psn_src=0; } /* !lat */ if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ } /* !Tempest */ } /* !flg_grd_in_2D */ if(flg_grd_out_2D){ lon_psn_dst=0; lat_psn_dst=1; if(nco_rgr_mpf_typ == nco_rgr_mpf_Tempest){ att_val=nco_char_att_get(in_id,dmn_sz_in_int_id,name0_sng); if(att_val){ if(strstr(att_val,"lat")){ lon_psn_dst=1; lat_psn_dst=0; } /* !lat */ if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ } /* !Tempest */ } /* !flg_grd_out_2D */ const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */ const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */ double *area_out; /* [sr] Area of destination grid */ double *frc_out=NULL; /* [frc] Fraction of destination grid */ double *lat_bnd_out=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular destination grid */ double *lat_crn_out=NULL; /* [dgr] Latitude corners of rectangular destination grid */ double *lat_ctr_out=NULL_CEWI; /* [dgr] Latitude centers of rectangular destination grid */ double *lat_ntf_out=NULL; /* [dgr] Latitude interfaces of rectangular destination grid */ double *lat_wgt_out=NULL; /* [dgr] Latitude weights of rectangular destination grid */ double *lon_bnd_out=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular destination grid */ double *lon_crn_out=NULL; /* [dgr] Longitude corners of rectangular destination grid */ double *lon_ctr_out=NULL_CEWI; /* [dgr] Longitude centers of rectangular destination grid */ double *lon_ntf_out=NULL; /* [dgr] Longitude interfaces of rectangular destination grid */ double *slat_ctr_out=NULL_CEWI; /* [dgr] Latitude centers of staggered FV destination grid */ double *slat_wgt_out=NULL_CEWI; /* [frc] Latitude weights of staggered FV destination grid */ double *slon_ctr_out=NULL_CEWI; /* [dgr] Longitude centers of staggered FV destination grid */ double *wgt_raw; /* [frc] Remapping weights */ int *col_src_adr; /* [idx] Source address (col) */ int *row_dst_adr; /* [idx] Destination address (row) */ int *msk_out=NULL; /* [flg] Mask on destination grid */ int *dmn_sz_in_int; /* [nbr] Array of dimension sizes of source grid */ int *dmn_sz_out_int; /* [nbr] Array of dimension sizes of destination grid */ long *dmn_cnt_in=NULL; long *dmn_cnt_out=NULL; long *dmn_cnt=NULL; long *dmn_srt=NULL; long *dmn_srd=NULL; long idx; /* [idx] Counting index for unrolled grids */ /* Allocate space to hold dimension metadata for destination grid */ dmn_srt=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long)); dmn_cnt=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long)); dmn_srd=(long *)nco_malloc(dmn_nbr_grd_max*sizeof(long)); dmn_srt[0]=0L; dmn_cnt[0]=mpf.src_grid_rank; dmn_sz_in_int=(int *)nco_malloc(mpf.src_grid_rank*nco_typ_lng((nc_type)NC_INT)); rcd=nco_get_vara(in_id,dmn_sz_in_int_id,dmn_srt,dmn_cnt,dmn_sz_in_int,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=mpf.dst_grid_rank; dmn_sz_out_int=(int *)nco_malloc(mpf.dst_grid_rank*nco_typ_lng((nc_type)NC_INT)); rcd=nco_get_vara(in_id,dmn_sz_out_int_id,dmn_srt,dmn_cnt,dmn_sz_out_int,(nc_type)NC_INT); /* Check-for and workaround faulty Tempest and MPAS-O/I grid sizes */ if(flg_grd_in_1D && (mpf.src_grid_size != dmn_sz_in_int[0])){ (void)fprintf(stdout,"%s: INFO %s reports input grid dimension sizes disagree: mpf.src_grid_size = %ld != %d = dmn_sz_in[0]. Problem may be caused by incorrect src_grid_dims variable. This is a known issue with some TempestRemap mapfiles generated prior to ~20150901, and in some ESMF mapfiles for MPAS-O/I. This problem can be safely ignored if workaround succeeds. Attempting workaround ...\n",nco_prg_nm_get(),fnc_nm,mpf.src_grid_size,dmn_sz_in_int[0]); dmn_sz_in_int[0]=mpf.src_grid_size; } /* !bug */ if(flg_grd_out_1D && (mpf.dst_grid_size != dmn_sz_out_int[0])){ (void)fprintf(stdout,"%s: INFO %s reports output grid dimension sizes disagree: mpf.dst_grid_size = %ld != %d = dmn_sz_out[0]. Problem may be caused by incorrect dst_grid_dims variable. This is a known issue with some TempestRemap mapfiles generated prior to ~20150901, and in some ESMF mapfiles for MPAS-O/I. This problem can be safely ignored if workaround succeeds. Attempting workaround ...\n",nco_prg_nm_get(),fnc_nm,mpf.dst_grid_size,dmn_sz_out_int[0]); dmn_sz_out_int[0]=mpf.dst_grid_size; } /* !bug */ long col_nbr_in; /* [idx] Number of columns in source grid */ long lon_nbr_in; /* [idx] Number of longitudes in rectangular source grid */ long lat_nbr_in; /* [idx] Number of latitudes in rectangular source grid */ const size_t grd_sz_in=mpf.src_grid_size; /* [nbr] Number of elements in single layer of input grid */ const size_t grd_sz_out=mpf.dst_grid_size; /* [nbr] Number of elements in single layer of output grid */ if(flg_grd_in_1D){ col_nbr_in=dmn_sz_in_int[0]; lon_nbr_in=dmn_sz_in_int[0]; lat_nbr_in=dmn_sz_in_int[0]; }else if(flg_grd_in_2D){ col_nbr_in=0; lon_nbr_in=dmn_sz_in_int[lon_psn_src]; lat_nbr_in=dmn_sz_in_int[lat_psn_src]; /* Sanity-check */ assert(lat_nbr_in*lon_nbr_in == (long)grd_sz_in); } /* !src_grid_rank */ const int bnd_tm_nbr_out=2; /* [nbr] Number of boundaries for output time */ int bnd_nbr_out=int_CEWI; /* [nbr] Number of boundaries for output time and rectangular grid coordinates, and number of vertices for output non-rectangular grid coordinates */ long col_nbr_out=long_CEWI; /* [nbr] Number of columns in destination grid */ long lon_nbr_out=long_CEWI; /* [nbr] Number of longitudes in rectangular destination grid */ long lat_nbr_out=long_CEWI; /* [nbr] Number of latitudes in rectangular destination grid */ long slat_nbr_out=long_CEWI; /* [nbr] Number of latitudes in staggered FV grid destination grid */ long slon_nbr_out=long_CEWI; /* [nbr] Number of longitudes in staggered FV grid destination grid */ if(flg_grd_out_1D){ bnd_nbr_out=mpf.dst_grid_corners; col_nbr_out=dmn_sz_out_int[0]; lat_nbr_out=dmn_sz_out_int[0]; lon_nbr_out=dmn_sz_out_int[0]; /* Sanity-check */ assert(col_nbr_out == (long)grd_sz_out); }else if(flg_grd_out_2D){ col_nbr_out=lat_nbr_out*lon_nbr_out; lat_nbr_out=dmn_sz_out_int[lat_psn_dst]; lon_nbr_out=dmn_sz_out_int[lon_psn_dst]; slat_nbr_out=lat_nbr_out-1L; slon_nbr_out=lon_nbr_out; /* Sanity-check */ assert(lat_nbr_out*lon_nbr_out == (long)grd_sz_out); } /* !dst_grid_rank */ /* Ensure coordinates are in degrees not radians for simplicity and CF-compliance NB: ${DATA}/scrip/rmp_T42_to_POP43_conserv.nc has [xy]?_a in degrees and [xy]?_b in radians! */ nco_bool flg_crd_rdn=False; /* [flg] Destination coordinates are in radians not degrees */ char unt_sng[]="units"; /* [sng] netCDF-standard units attribute name */ att_val=nco_char_att_get(in_id,dst_grd_ctr_lat_id,unt_sng); if(att_val){ /* Match "radian" and "radians" */ if(strstr(att_val,"radian")) flg_crd_rdn=True; if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ nco_bool flg_grd_out_crv=False; /* [flg] Curvilinear coordinates */ nco_bool flg_grd_out_rct=False; /* [flg] Rectangular coordinates */ const nc_type crd_typ_out=NC_DOUBLE; if(flg_grd_out_2D){ lon_ctr_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); lat_ctr_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); lon_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*grd_sz_out*nco_typ_lng(crd_typ_out)); lat_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*grd_sz_out*nco_typ_lng(crd_typ_out)); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out); rcd=nco_get_vara(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,lat_ctr_out,crd_typ_out); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_out; dmn_cnt[1]=mpf.dst_grid_corners; rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_crn_out,crd_typ_out); rcd=nco_get_vara(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,lat_crn_out,crd_typ_out); /* User may specify curvilinear grid (with --rgr crv). Otherwise, manually test for curvilinear source grid. */ flg_grd_out_crv=rgr->flg_crv; /* [flg] Curvilinear coordinates */ if(flg_grd_out_crv){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Output grid specified to be %s\n",nco_prg_nm_get(),flg_grd_out_crv ? "Curvilinear" : "Rectangular"); }else{ long idx_tst=long_CEWI; /* [idx] Index of first latitude or longitude */ for(idx=0;idx<(long)grd_sz_out;idx++){ if(idx%lon_nbr_out == 0) idx_tst=idx; if(lat_ctr_out[idx] != lat_ctr_out[idx_tst]) break; // (void)fprintf(stdout,"%s: DEBUG lat_ctr_out[%li] = %g, lat_ctr_out[%li] = %g\n",nco_prg_nm_get(),idx,lat_ctr_out[idx],idx_tst,lat_ctr_out[idx_tst]); /* fxm: also test lon */ } /* !rectangular */ if(idx != (long)grd_sz_out) flg_grd_out_crv=True; else flg_grd_out_rct=True; if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO Output grid detected to be %s\n",nco_prg_nm_get(),flg_grd_out_crv ? "Curvilinear" : "Rectangular"); } /* !flg_grd_out_crv */ if(flg_grd_out_crv) bnd_nbr_out=mpf.dst_grid_corners; if(flg_grd_out_rct) bnd_nbr_out=2; /* NB: Assumes rectangular latitude and longitude and is invalid for other quadrilaterals */ } /* !flg_grd_out_2D */ if(nco_dbg_lvl_get() >= nco_dbg_scl){ (void)fprintf(stderr,"%s: INFO %s grid conversion type = %s with expected input and prescribed output grid sizes: ",nco_prg_nm_get(),fnc_nm,nco_rgr_grd_sng(nco_rgr_typ)); (void)fprintf(stderr,"lat_in = %li, lon_in = %li, col_in = %li, lat_out = %li, lon_out = %li, col_out = %li\n",lat_nbr_in,lon_nbr_in,col_nbr_in,lat_nbr_out,lon_nbr_out,col_nbr_out); } /* endif dbg */ /* Allocate space for and obtain coordinates */ if(flg_grd_out_1D){ lon_ctr_out=(double *)nco_malloc(col_nbr_out*nco_typ_lng(crd_typ_out)); lat_ctr_out=(double *)nco_malloc(col_nbr_out*nco_typ_lng(crd_typ_out)); lon_bnd_out=(double *)nco_malloc(col_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); lat_bnd_out=(double *)nco_malloc(col_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); } /* !flg_grd_out_1D */ if(flg_grd_out_rct){ if(lat_ctr_out) lat_ctr_out=(double *)nco_free(lat_ctr_out); if(lon_ctr_out) lon_ctr_out=(double *)nco_free(lon_ctr_out); if(lat_crn_out) lat_crn_out=(double *)nco_free(lat_crn_out); if(lon_crn_out) lon_crn_out=(double *)nco_free(lon_crn_out); lon_ctr_out=(double *)nco_malloc(lon_nbr_out*nco_typ_lng(crd_typ_out)); lat_ctr_out=(double *)nco_malloc(lat_nbr_out*nco_typ_lng(crd_typ_out)); lon_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*lon_nbr_out*nco_typ_lng(crd_typ_out)); lat_crn_out=(double *)nco_malloc(mpf.dst_grid_corners*lat_nbr_out*nco_typ_lng(crd_typ_out)); lat_wgt_out=(double *)nco_malloc(lat_nbr_out*nco_typ_lng(crd_typ_out)); lon_ntf_out=(double *)nco_malloc((lon_nbr_out+1L)*nco_typ_lng(crd_typ_out)); lat_ntf_out=(double *)nco_malloc((lat_nbr_out+1L)*nco_typ_lng(crd_typ_out)); lon_bnd_out=(double *)nco_malloc(lon_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); lat_bnd_out=(double *)nco_malloc(lat_nbr_out*bnd_nbr_out*nco_typ_lng(crd_typ_out)); } /* !flg_grd_out_rct */ /* Arrays unroll into all longitudes for first latitude, then second latitude, ... Obtain longitudes by reading first block contiguously (unstrided) Obtain latitudes by reading unrolled data with stride of lon_nbr */ if(flg_grd_out_1D){ dmn_srt[0]=0L; dmn_cnt[0]=col_nbr_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out); dmn_srt[0]=0L; dmn_cnt[0]=col_nbr_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,lat_ctr_out,crd_typ_out); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr_out; dmn_cnt[1]=bnd_nbr_out; rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_bnd_out,crd_typ_out); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr_out; dmn_cnt[1]=bnd_nbr_out; rcd=nco_get_vara(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,lat_bnd_out,crd_typ_out); if(flg_crd_rdn){ for(idx=0;idx<col_nbr_out;idx++){ lon_ctr_out[idx]*=rdn2dgr; lat_ctr_out[idx]*=rdn2dgr; } /* !idx */ for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++){ lon_bnd_out[idx]*=rdn2dgr; lat_bnd_out[idx]*=rdn2dgr; } /* !idx */ } /* !rdn */ /* Is 1D interface information usable? Yes, unless if all interfaces are zeros NB: fxm Better algorithm for "usable" is that not all interfaces in any cell are equal */ flg_bnd_1D_usable=True; for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++) if(lon_bnd_out[idx] != 0.0) break; if(idx == col_nbr_out*bnd_nbr_out){ flg_bnd_1D_usable=False; }else{ for(idx=0;idx<col_nbr_out*bnd_nbr_out;idx++) if(lat_bnd_out[idx] != 0.0) break; if(idx == col_nbr_out*bnd_nbr_out) flg_bnd_1D_usable=False; } /* !usable */ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0;idx<lat_nbr_out;idx++){ (void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr_out[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr_out;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd_out[bnd_nbr_out*idx+bnd_idx],bnd_idx == bnd_nbr_out-1 ? "]\n" : ", "); } /* end loop over lat */ for(idx=0;idx<lon_nbr_out;idx++){ (void)fprintf(stdout,"lon[%li] = %g, vertices = ",idx,lon_ctr_out[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr_out;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lon_bnd_out[bnd_nbr_out*idx+bnd_idx],bnd_idx == bnd_nbr_out-1 ? "]\n" : ", "); } /* end loop over lon */ } /* endif dbg */ } /* !flg_grd_out_1D */ if(flg_grd_out_rct){ /* fxm: sub-sample these from the already-read ctr/crn arrays */ dmn_srt[0L]=0L; dmn_cnt[0L]=lon_nbr_out; rcd=nco_get_vara(in_id,dst_grd_ctr_lon_id,dmn_srt,dmn_cnt,lon_ctr_out,crd_typ_out); dmn_srt[0L]=0L; dmn_cnt[0L]=lat_nbr_out; dmn_srd[0L]=lon_nbr_out; rcd=nco_get_vars(in_id,dst_grd_ctr_lat_id,dmn_srt,dmn_cnt,dmn_srd,lat_ctr_out,crd_typ_out); dmn_srt[0L]=dmn_srt[1]=0L; dmn_cnt[0L]=lon_nbr_out; dmn_cnt[1]=mpf.dst_grid_corners; rcd=nco_get_vara(in_id,dst_grd_crn_lon_id,dmn_srt,dmn_cnt,lon_crn_out,crd_typ_out); dmn_srt[0L]=0L; dmn_cnt[0L]=lat_nbr_out; dmn_srd[0L]=lon_nbr_out; dmn_srt[1]=0L; dmn_cnt[1]=mpf.dst_grid_corners; dmn_srd[1]=1L; rcd=nco_get_vars(in_id,dst_grd_crn_lat_id,dmn_srt,dmn_cnt,dmn_srd,lat_crn_out,crd_typ_out); if(flg_crd_rdn){ for(idx=0L;idx<lon_nbr_out;idx++) lon_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<lat_nbr_out;idx++) lat_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<lon_nbr_out*mpf.dst_grid_corners;idx++) lon_crn_out[idx]*=rdn2dgr; for(idx=0L;idx<lat_nbr_out*mpf.dst_grid_corners;idx++) lat_crn_out[idx]*=rdn2dgr; } /* !rdn */ } /* !flg_grd_out_rct */ if(flg_grd_out_crv){ if(flg_crd_rdn){ for(idx=0L;idx<(long)grd_sz_out;idx++) lon_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<(long)grd_sz_out;idx++) lat_ctr_out[idx]*=rdn2dgr; for(idx=0L;idx<(long)grd_sz_out*mpf.dst_grid_corners;idx++) lon_crn_out[idx]*=rdn2dgr; for(idx=0L;idx<(long)grd_sz_out*mpf.dst_grid_corners;idx++) lat_crn_out[idx]*=rdn2dgr; } /* !rdn */ } /* !flg_grd_out_crv */ /* Allocate space for and obtain area, fraction, and mask, which are needed for both 1D and 2D grids */ area_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); dmn_srt[0L]=0L; dmn_cnt[0L]=grd_sz_out; rcd=nco_get_vara(in_id,area_dst_id,dmn_srt,dmn_cnt,area_out,crd_typ_out); frc_out=(double *)nco_malloc(grd_sz_out*nco_typ_lng(crd_typ_out)); dmn_srt[0L]=0L; dmn_cnt[0L]=grd_sz_out; rcd=nco_get_vara(in_id,frc_dst_id,dmn_srt,dmn_cnt,frc_out,crd_typ_out); if(msk_dst_id != NC_MIN_INT){ msk_out=(int *)nco_malloc(grd_sz_out*nco_typ_lng(NC_INT)); dmn_srt[0L]=0L; dmn_cnt[0L]=grd_sz_out; rcd=nco_get_vara(in_id,msk_dst_id,dmn_srt,dmn_cnt,msk_out,(nc_type)NC_INT); } /* !msk */ /* Derive 2D interface boundaries from lat and lon grid-center values NB: Procedures to derive interfaces from midpoints on rectangular grids are theoretically possible However, ESMF often outputs interfaces values (e.g., yv_b) for midpoint coordinates (e.g., yc_b) For example, ACME standard map from ne120np4 to 181x360 has yc_b[0] = yv_b[0] = -90.0 Latitude = -90 is, by definition, not a midpoint coordinate This appears to be an artifact of the non-physical representation of the FV grid, i.e., a grid center located at the pole where longitudes collapse in the model, but cannot be represented as collapsed on a rectangular 2D grid with non-zero areas. Unfortunately, ESMF supports this nonsense by labeling the grid center as at the pole so that applications can easily diagnose an FV grid when they read-in datasets. A superior application could diagnose FV just fine from actual non-polar gridcell centers Maybe ESMF could introduce a flag or something to indicate/avoid this special case? Safer to read boundary interfaces directly from grid corner/vertice arrays in map file Derivation of boundaries xv_b, yv_b from _correct_ xc_b, yc_b is follows Do not implement this procedure until resolving midpoint/center issue described above: lon_ntf_out[0L]=0.5*(lon_ctr_out[0L]+lon_ctr_out[lon_nbr_out-1L])-180.0; // Extrapolation lat_ntf_out[0L]=lat_ctr_out[0L]-0.5*(lat_ctr_out[1L]-lat_ctr_out[0L]); // Extrapolation for(idx=1L;idx<lon_nbr_out;idx++) lon_ntf_out[idx]=0.5*(lon_ctr_out[idx-1L]+lon_ctr_out[idx]); for(idx=1L;idx<lat_nbr_out;idx++) lat_ntf_out[idx]=0.5*(lat_ctr_out[idx-1L]+lat_ctr_out[idx]); lon_ntf_out[lon_nbr_out]=lon_ntf_out[0L]+360.0; lat_ntf_out[lat_nbr_out]=lat_ctr_out[lat_nbr_out-1L]+0.5*(lat_ctr_out[lat_nbr_out-1L]-lat_ctr_out[lat_nbr_out-2L]); */ if(flg_grd_out_rct){ double lon_spn; /* [dgr] Longitude span */ double lat_spn; /* [dgr] Latitude span */ nco_bool flg_s2n=True; /* I [enm] Latitude grid-direction is South-to-North */ if(lat_ctr_out[1L] < lat_ctr_out[0L]) flg_s2n=False; /* Obtain 1-D rectangular interfaces from unrolled 1-D vertice arrays */ for(idx=0L;idx<lon_nbr_out;idx++) lon_ntf_out[idx]=lon_crn_out[mpf.dst_grid_corners*idx]; /* 20201009 The four possible CCW RLL orderings start with the ul, ll, lr, or ur vertice NCO grid generators store vertices in order (0,1,2,3)=(ul,ll,lr,ur) NCO final latitude is in upper vertices (0,3) for S2N grids, lower vertices (1,2) for N2S grids NCO final longitude is in RHS vertices (2,3) for S2N and N2S grids Need generic algorithm to pick easternmost longitude for any of the four CCW orderings What is ESMF vertice ordering? or does ESMF always copy from input grid? Most grid generators probably start with ul or ll so vertice 2 is good choice for easternmost */ // lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-(mpf.dst_grid_corners-1L)]; // ESMF? lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-2L]; // NCO lr if(lon_ntf_out[lon_nbr_out-1] == lon_ntf_out[lon_nbr_out]) lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-1L]; // NCO ur if(lon_ntf_out[lon_nbr_out-1] == lon_ntf_out[lon_nbr_out]) lon_ntf_out[lon_nbr_out]=lon_crn_out[mpf.dst_grid_corners*lon_nbr_out-3L]; // NCO ll assert(lon_ntf_out[lon_nbr_out-1] != lon_ntf_out[lon_nbr_out]); lon_spn=lon_ntf_out[lon_nbr_out]-lon_ntf_out[0L]; for(idx=0L;idx<lat_nbr_out;idx++) lat_ntf_out[idx]=lat_crn_out[mpf.dst_grid_corners*idx]; if(flg_s2n) lat_ntf_out[lat_nbr_out]=max_dbl(lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-1L],lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-2L]); else lat_ntf_out[lat_nbr_out]=min_dbl(lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-1L],lat_crn_out[mpf.dst_grid_corners*lat_nbr_out-2L]); assert(lat_ntf_out[lat_nbr_out] != lat_ntf_out[lat_nbr_out-1]); lat_spn=fabs(lat_ntf_out[lat_nbr_out]-lat_ntf_out[0L]); /* Place 1-D rectangular interfaces into 2-D coordinate boundaries */ for(idx=0L;idx<lon_nbr_out;idx++){ lon_bnd_out[2L*idx]=lon_ntf_out[idx]; lon_bnd_out[2L*idx+1L]=lon_ntf_out[idx+1L]; } /* !lon_nbr_out */ for(idx=0L;idx<lat_nbr_out;idx++){ lat_bnd_out[2L*idx]=lat_ntf_out[idx]; lat_bnd_out[2L*idx+1L]=lat_ntf_out[idx+1L]; } /* !lat_nbr_out */ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0L;idx<lon_nbr_out;idx++) (void)fprintf(stdout,"lon[%li] = [%g, %g, %g]\n",idx,lon_bnd_out[2L*idx],lon_ctr_out[idx],lon_bnd_out[2L*idx+1L]); for(idx=0L;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li] = [%g, %g, %g]\n",idx,lat_bnd_out[2L*idx],lat_ctr_out[idx],lat_bnd_out[2L*idx+1L]); } /* endif dbg */ /* Global or regional grid? */ nco_grd_xtn_enm nco_grd_xtn; /* [enm] Extent of grid */ if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; /* Diagnose type of latitude output grid by testing second latitude center against formulae */ double lat_ctr_tst_eqa; double lat_ctr_tst_fv; if(flg_s2n) lat_ctr_tst_eqa=lat_ntf_out[0L]+lat_spn*1.5/lat_nbr_out; else lat_ctr_tst_eqa=lat_ntf_out[0L]-lat_spn*1.5/lat_nbr_out; if(flg_s2n) lat_ctr_tst_fv=lat_ntf_out[0L]+lat_spn/(lat_nbr_out-1L); else lat_ctr_tst_fv=lat_ntf_out[0L]-lat_spn/(lat_nbr_out-1L); double lat_ctr_tst_gss; /* In diagnosing grids, agreement to slightly worse than single-precision is "good enough for government work" Hence some comparisons cast from double to float before comparison 20150526: T42 grid from SCRIP and related maps, and NCL-generated Gaussian grids for CESM, are accurate to at most ~eight digits 20150611: map_ne120np4_to_fv801x1600_bilin.150418.nc has yc_b[1600]=-89.775000006 not expected exact value lat_ctr[1]=-89.775000000000006 20170521: T62 grid from NCEP-NCAR Reanalysis 1 is worse than single precision, has yc_[192]=-86.6531 not expected exact value lat_ctr[1]=-86.6532 */ if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_eqa) nco_grd_lat_typ=nco_grd_lat_eqa; if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_fv) nco_grd_lat_typ=nco_grd_lat_fv; double *wgt_Gss_out=NULL; // [frc] Gaussian weights double precision if(nco_grd_lat_typ == nco_grd_lat_nil){ /* Check for Gaussian grid */ double *lat_sin_out; // [frc] Sine of Gaussian latitudes double precision lat_sin_out=(double *)nco_malloc(lat_nbr_out*sizeof(double)); wgt_Gss_out=(double *)nco_malloc(lat_nbr_out*sizeof(double)); (void)nco_lat_wgt_gss(lat_nbr_out,flg_s2n,lat_sin_out,wgt_Gss_out); lat_ctr_tst_gss=rdn2dgr*asin(lat_sin_out[1L]); /* Gaussian weights on output grid will be double-precision accurate Grid itself is kept as user-specified so area diagnosed by ESMF_RegridWeightGen may be slightly inconsistent with weights */ if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stderr,"%s: INFO %s reports lat_ctr_out[1] = %g, lat_ctr_tst_gss = %g\n",nco_prg_nm_get(),fnc_nm,lat_ctr_out[1L],lat_ctr_tst_gss); if((float)lat_ctr_out[1L] == (float)lat_ctr_tst_gss) nco_grd_lat_typ=nco_grd_lat_gss; if(lat_sin_out) lat_sin_out=(double *)nco_free(lat_sin_out); } /* !Gaussian */ if(nco_grd_lat_typ == nco_grd_lat_nil){ /* If still of unknown type, this 2D grid may be weird This occurs, e.g., with POP3 destination grid Change gridtype from nil (which means not-yet-set) to unknown (which means none of the others matched) */ nco_grd_lat_typ=nco_grd_lat_unk; } /* !nil */ /* Currently grd_lat_typ and grd_2D_typ are equivalent, though that may be relaxed in future */ if(nco_grd_lat_typ == nco_grd_lat_unk) nco_grd_2D_typ=nco_grd_2D_unk; else if(nco_grd_lat_typ == nco_grd_lat_gss) nco_grd_2D_typ=nco_grd_2D_gss; else if(nco_grd_lat_typ == nco_grd_lat_fv) nco_grd_2D_typ=nco_grd_2D_fv; else if(nco_grd_lat_typ == nco_grd_lat_eqa) nco_grd_2D_typ=nco_grd_2D_eqa; else assert(False); if(nco_grd_lon_typ == nco_grd_lon_nil){ /* NB: Longitude grid diagnosis is susceptible to mistakes when input mapfile embeds common faulty grids, e.g., ACME *150418* FV maps map_ne30np4_to_fv129x256_aave.150418.nc is diagnosed as regional grid of unknown type because of input grid flaws map_ne30np4_to_fv129x256_aave.20150901.nc is (correctly) diagnosed as global grid of with lon_Grn_ctr */ if( (float)lon_ctr_out[0L] == 0.0f && (float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_Grn_ctr; else if((float)lon_ctr_out[0L] == -180.0f && (float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_180_ctr; else if((float)lon_ntf_out[0L] == 0.0f && (float)lon_ntf_out[1L] == (float)(lon_ntf_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_Grn_wst; else if((float)lon_ntf_out[0L] == -180.0f && (float)lon_ntf_out[1L] == (float)(lon_ntf_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_180_wst; else if((float)lon_ctr_out[1L] == (float)(lon_ctr_out[0L]+lon_spn/lon_nbr_out)) nco_grd_lon_typ=nco_grd_lon_bb; else nco_grd_lon_typ=nco_grd_lon_unk; } /* !nco_grd_lon_typ */ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output latitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lat_sng(nco_grd_lat_typ)); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output longitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lon_sng(nco_grd_lon_typ)); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO %s diagnosed output grid-extent: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_xtn_sng(nco_grd_xtn)); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ slat_ctr_out=(double *)nco_malloc(slat_nbr_out*nco_typ_lng(crd_typ_out)); slat_wgt_out=(double *)nco_malloc(slat_nbr_out*nco_typ_lng(crd_typ_out)); slon_ctr_out=(double *)nco_malloc(slon_nbr_out*nco_typ_lng(crd_typ_out)); for(idx=0L;idx<slat_nbr_out;idx++){ slat_ctr_out[idx]=lat_ntf_out[idx+1L]; slat_wgt_out[idx]=fabs(sin(dgr2rdn*lat_ctr_out[idx+1L])-sin(dgr2rdn*lat_ctr_out[idx])); /* fabs() ensures positive area in n2s grids */ } /* !lat_nbr_out */ for(idx=0L;idx<slon_nbr_out;idx++){ slon_ctr_out[idx]=lon_ntf_out[idx]; } /* !lat_nbr_out */ } /* !nco_grd_lat_fv */ switch(nco_grd_lat_typ){ case nco_grd_lat_eqa: case nco_grd_lat_fv: for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=fabs(sin(dgr2rdn*lat_bnd_out[2*idx+1L])-sin(dgr2rdn*lat_bnd_out[2*idx])); /* fabs() ensures positive area in n2s grids */ break; case nco_grd_lat_gss: for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=wgt_Gss_out[idx]; if(wgt_Gss_out) wgt_Gss_out=(double *)nco_free(wgt_Gss_out); break; case nco_grd_lat_unk: for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_out[idx]=0.0; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING %s reports unknown output latitude grid-type. Unable to guess what latitude weights should be.\n",nco_prg_nm_get(),fnc_nm); break; default: nco_dfl_case_generic_err(); break; } /* end nco_grd_lat_typ switch */ /* Fuzzy test of latitude weight normalization */ lat_wgt_ttl=0.0; for(idx=0L;idx<lat_nbr_out;idx++) lat_wgt_ttl+=lat_wgt_out[idx]; if(nco_grd_lat_typ == nco_grd_lat_eqa || nco_grd_lat_typ == nco_grd_lat_fv){ double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */ lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd_out[2L*(lat_nbr_out-1L)+1L])-sin(dgr2rdn*lat_bnd_out[0L])); /* fabs() ensures positive area in n2s grids */ assert(fabs(1.0-lat_wgt_ttl/lat_wgt_ttl_xpc) < eps_rlt); if(lat_wgt_ttl_xpc < 0.0) abort(); /* CEWI Use lat_wgt_ttl_xpc at least once outside of assert() to avoid gcc 4.8.2 set-but-not-used warning */ } /* !nco_grd_lat_eqa, !nco_grd_lat_fv */ } /* !flg_grd_out_rct */ /* When possible, ensure area_out is non-zero 20150722: ESMF documentation says "The grid area array is only output when the conservative remapping option is used" Actually, ESMF does (always?) output area, but area == 0.0 unless conservative remapping is used 20150721: ESMF bilinear interpolation map ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.150418.nc has area == 0.0 20150710: Tempest regionally refined grids like bilinearly interpolated CONUS for ACME RRM has area_out == 0 20150821: ESMF always outputs area_out == 0.0 for bilinear interpolation Check whether NCO must diagnose and provide its own area_out */ /* If area_out contains any zero... */ for(idx=0;idx<(long)grd_sz_out;idx++) if(area_out[idx] == 0.0) break; if(idx != (long)grd_sz_out){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Output grid detected with zero-valued output area(s) at idx = %ld (and likely others, too).\n",nco_prg_nm_get(),idx); } /* !zero */ for(idx=0;idx<(long)grd_sz_out;idx++) if(area_out[idx] != 0.0) break; if(idx == (long)grd_sz_out){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports area_out from mapfile is everywhere zero. This is expected for bilinearly interpolated output maps produced by ESMF_RegridWeightGen. ",nco_prg_nm_get(),fnc_nm); if(flg_grd_out_2D && flg_grd_out_rct && (bnd_nbr_out == 2 || bnd_nbr_out == 4)){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable named \"%s\") from the destination gridcell boundaries. NCO diagnoses quadrilateral area for rectangular output grids from a formula that assumes that cell boundaries follow arcs of constant latitude and longitude. This differs from the area of cells with boundaries that follow great circle arcs (used by, e.g., ESMF_RegridWeightGen and TempestRemap). Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm); flg_dgn_area_out=True; }else if(flg_grd_out_2D && flg_grd_out_crv && (bnd_nbr_out == 2 || bnd_nbr_out == 4)){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable named \"%s\") from the destination gridcell boundaries. NCO diagnoses quadrilateral area for curvilinear output grids from formulae that assume that cell boundaries follow great circle arcs (as do, e.g., ESMF_RegridWeightGen and TempestRemap). This differs from the area of cells with boundaries that follow lines of constant latitude or longitude. Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm); flg_dgn_area_out=True; }else if(flg_grd_out_1D && flg_bnd_1D_usable){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"Since the destination grid provides cell bounds information, NCO will diagnose area (and output it as a variable name \"%s\") from the destination gridcell boundaries. NCO diagnoses spherical polygon area for unstructured output grids from formulae that assume that cell boundaries follow great circle arcs (as do, e.g., ESMFRegridWeightGen and TempestRemap). This differs from the area of cells with boundaries that follow lines of constant latitude or longitude. Be warned that NCO correctly diagnoses area for all convex polygons, yet not for most concave polygons. To determine whether the diagnosed areas are fully consistent with the output grid, one must know such exact details. If your grid has analytic areas that NCO does not yet diagnose correctly from provided cell boundaries, please contact us.\n",rgr->area_nm); flg_dgn_area_out=True; }else{ /* !1D */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"However, NCO cannot find enough boundary information, or it is too stupid about spherical trigonometry, to diagnose area_out. NCO will output an area variable (named \"%s\") copied from the input mapfile. This area will be everywhere zero.\n",rgr->area_nm); } /* !2D */ } /* !area */ if(flg_dgn_area_out){ if(flg_grd_out_1D && flg_bnd_1D_usable){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"INFO: Diagnosing area_out for 1D grid\n"); /* Area of unstructured grids requires spherical trigonometry */ nco_sph_plg_area(rgr,lat_bnd_out,lon_bnd_out,col_nbr_out,bnd_nbr_out,area_out); } /* !1D */ if(flg_grd_out_crv){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"INFO: Diagnosing area_out for curvilinear grid\n"); /* Area of curvilinear grids requires spherical trigonometry */ nco_sph_plg_area(rgr,lat_crn_out,lon_crn_out,grd_sz_out,bnd_nbr_out,area_out); } /* !flg_grd_out_crv */ if(flg_grd_out_rct && nco_grd_2D_typ != nco_grd_2D_unk){ /* Mr. Enenstein and George O. Abell taught me the area of spherical zones Spherical zone area is exact and faithful to underlying rectangular equi-angular grid However, ESMF and Tempest approximate spherical polygons as connected by great circle arcs fxm: Distinguish spherical zone shapes (e.g., equi-angular) from great circle arcs (e.g., unstructured polygons) */ for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++) area_out[lat_idx*lon_nbr_out+lon_idx]=fabs(dgr2rdn*(lon_bnd_out[2*lon_idx+1]-lon_bnd_out[2*lon_idx])*(sin(dgr2rdn*lat_bnd_out[2*lat_idx+1])-sin(dgr2rdn*lat_bnd_out[2*lat_idx]))); /* fabs() ensures positive area in n2s grids */ } /* !spherical zones */ } /* !flg_dgn_area_out */ if(rgr->tst == -1){ /* Passing --rgr tst=-1 causes regridder to fail here This failure should cause host climo script to abort */ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports regridder instructed to fail here. This tests failure mode in climo scripts...\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !tst */ /* Verify that frc_out is sometimes non-zero ESMF: "The grid frac arrays (frac_a and frac_b) are calculated by ESMF_RegridWeightGen. For conservative remapping, the grid frac array returns the area fraction of the grid cell which participates in the remapping. For bilinear and patch remapping, the destination grid frac array [frac_b] is one where the grid point participates in the remapping and zero otherwise. For bilinear and patch remapping, the source grid frac array is always set to zero." SCRIP: Similar to ESMF For both ESMF+SCRIP frac_[ab] are computed by the weight-generation algorithm and are not specified as part of the input grids How does an input ocean grid indicate that, say, half the gridcell is land and half ocean? Does it use the area variable to tell the weight generation algorithm that a gridcell is fractional? In other words does it use grid_imask=1 and grid_area=0.5*full_gridcell_area and, e.g., T=273.0? */ for(idx=0;idx<(long)grd_sz_out;idx++) if(frc_out[idx] != 0.0) break; if(idx == (long)grd_sz_out){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports frc_out == frac_b contains all zeros\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !always zero */ /* Test whether frc_out is ever zero... */ for(idx=0;idx<(long)grd_sz_out;idx++) if(frc_out[idx] == 0.0) break; if(nco_dbg_lvl_get() >= nco_dbg_std) if(idx != (long)grd_sz_out) (void)fprintf(stdout,"%s: INFO %s reports frc_out == frac_b contains zero-elements (e.g., at 1D idx = %ld)\n",nco_prg_nm_get(),fnc_nm,idx); /* Normalizing by frc_out is redundant iff frc_out == 1.0, so we can save time without sacrificing accuracy However, frc_out is often (e.g., for CS <-> RLL maps) close but not equal to unity (ESMF_RegridWeightGen issue?) Hence, decide whether to normalize by frc_out by diagnosing the furthest excursion of frc_out from unity */ nco_bool flg_frc_out_one=True; /* [flg] Destination gridcell fraction frc_out == frac_b is in [1-epsilon,frc_out,1+epsilon] */ nco_bool flg_frc_out_wrt=False; /* [flg] Write destination gridcell fraction frc_out == frac_b to regridded files */ double frc_out_dff_one; /* [frc] Deviation of frc_out from 1.0 */ double frc_out_dff_one_max=0.0; /* [frc] Maximum deviation of frc_out from 1.0 */ long idx_max_dvn; /* [idx] Index of maximum deviation from 1.0 */ for(idx=0;idx<(long)grd_sz_out;idx++){ frc_out_dff_one=fabs(frc_out[idx]-1.0); if(frc_out_dff_one > frc_out_dff_one_max){ frc_out_dff_one_max=frc_out_dff_one; idx_max_dvn=idx; } /* !max */ } /* !idx */ if(frc_out_dff_one_max > eps_rlt) flg_frc_out_one=False; nco_bool flg_frc_nrm=False; /* [flg] Must normalize by frc_out == frac_b because frc_out is not always unity and specified normalization is destarea or none */ if(!flg_frc_out_one && /* If fraction is sometimes "far" from 1.0 and ... */ ((nco_rgr_mpf_typ == nco_rgr_mpf_ESMF && nco_rgr_mth_typ == nco_rgr_mth_conservative && (nco_rgr_nrm_typ == nco_rgr_nrm_destarea || nco_rgr_nrm_typ == nco_rgr_nrm_none)) || /* ESMF map-file specifies conservative regridding with "destarea" or "none" or ... */ (nco_rgr_mpf_typ != nco_rgr_mpf_ESMF)) /* 20191003: Weight-generator does not adhere to ESMF "normalization type" convention */ && True){ flg_frc_nrm=True; /* Avoid writing frc_out unless discrepancies are particularly egregious Otherwise would frc_out for standard remaps like ne30->fv129x256 for which eps=2.46e-13 */ double eps_rlt_wrt_thr=3.0e-13; /* 20181104: Never write frac_b for CMIP6! */ /* if(frc_out_dff_one_max > eps_rlt_wrt_thr) flg_frc_out_wrt=True; */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s reports global metadata specifies conservative remapping with normalization of type = %s. Furthermore, destination fractions frc_dst = dst_frac = frac_b = frc_out contain non-unity elements (maximum deviation from unity of %g exceeds hard-coded (in variable eps_rlt) relative-epsilon threshold of %g for frc_out[%ld] = %g). Thus normalization issues will be explicitly treated. Will apply \'destarea\' normalization (i.e., divide by non-zero frc_out[dst_idx]) to all regridded arrays.\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ),frc_out_dff_one_max,eps_rlt,idx_max_dvn,frc_out[idx_max_dvn]); if(nco_dbg_lvl_get() >= nco_dbg_std && flg_frc_out_wrt) (void)fprintf(stdout,"%s: INFO %s Maximum deviation %g exceeds threshold of %g that triggers automatic writing of fractional destination area as variable named frac_b in regridded output.\n",nco_prg_nm_get(),fnc_nm,frc_out_dff_one_max,eps_rlt_wrt_thr); } /* !sometimes non-unity */ if(nco_dbg_lvl_get() >= nco_dbg_std && flg_frc_nrm && rgr->flg_rnr){ // 20190918: Weaken from WARNING to INFO because NCO no longer renormalizes when using "destarea" maps unless specifically requested to with --rnr_thr (void)fprintf(stdout,"%s: INFO %s reports manual request to renormalize partially overlapped destination gridcells (i.e., gridcells with non-unity frc_dst = dst_frac = frac_b) to preserve mean-value of valid fraction of source gridcells (usually most useful for state variables), rather than dilute valid-fraction mean over total destination gridcell area to preserve area-integral of source data (the default, often most useful for ensuring global conservation of fluxes).\n",nco_prg_nm_get(),fnc_nm); //(void)fprintf(stdout,"%s: INFO %s reports manual request (with --rnr) to renormalize fields with non-unity frc_dst = dst_frac = frac_b at same time global metadata specifies normalization type = %s. Normalizing twice can be an error, depending on intent of each. Charlie is all ears on how NCO should handle this :)\n",nco_prg_nm_get(),fnc_nm,nco_rgr_nrm_sng(nco_rgr_nrm_typ)); //nco_exit(EXIT_FAILURE); } /* !flg_rnr */ /* Detailed summary of 2D grids now available including quality-checked coordinates and area */ if(flg_grd_out_2D && nco_dbg_lvl_get() >= nco_dbg_sbr){ lat_wgt_ttl=0.0; area_out_ttl=0.0; if(flg_grd_out_rct){ (void)fprintf(stderr,"%s: INFO %s reports destination rectangular latitude grid:\n",nco_prg_nm_get(),fnc_nm); for(idx=0;idx<lat_nbr_out;idx++) lat_wgt_ttl+=lat_wgt_out[idx]; } /* !flg_grd_out_rct */ for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++) area_out_ttl+=area_out[lat_idx*lon_nbr_out+lon_idx]; (void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_out_ttl,area_out_ttl/(4.0*M_PI)); if(flg_grd_out_rct){ for(idx=0;idx<lon_nbr_out;idx++) (void)fprintf(stdout,"lon[%li] = [%g, %g, %g]\n",idx,lon_bnd_out[2*idx],lon_ctr_out[idx],lon_bnd_out[2*idx+1]); for(idx=0;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li] = [%g, %g, %g]\n",idx,lat_bnd_out[2*idx],lat_ctr_out[idx],lat_bnd_out[2*idx+1]); for(idx=0;idx<lat_nbr_out;idx++) (void)fprintf(stdout,"lat[%li], wgt[%li] = %20.15f, %20.15f\n",idx,idx,lat_ctr_out[idx],lat_wgt_out[idx]); } /* !flg_grd_out_rct */ if(nco_dbg_lvl_get() > nco_dbg_crr) for(lat_idx=0;lat_idx<lat_nbr_out;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr_out;lon_idx++) (void)fprintf(stdout,"lat[%li] = %g, lon[%li] = %g, area[%li,%li] = %g\n",lat_idx,lat_ctr_out[lat_idx],lon_idx,lon_ctr_out[lon_idx],lat_idx,lon_idx,area_out[lat_idx*lon_nbr_out+lon_idx]); assert(area_out_ttl > 0.0); assert(area_out_ttl <= 4.0*M_PI + 5.0e-15); } /* !flg_grd_out_2D && !dbg */ /* Allocate space for and obtain weights and addresses */ wgt_raw=(double *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_DOUBLE),fnc_nm,"Unable to malloc() value buffer for remapping weights"); col_src_adr=(int *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() value buffer for remapping addresses"); row_dst_adr=(int *)nco_malloc_dbg(mpf.num_links*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() value buffer for remapping addresses"); /* Obtain remap matrix addresses and weights from map file */ dmn_srt[0]=0L; dmn_cnt[0]=mpf.num_links; rcd=nco_get_vara(in_id,col_src_adr_id,dmn_srt,dmn_cnt,col_src_adr,NC_INT); rcd=nco_get_vara(in_id,row_dst_adr_id,dmn_srt,dmn_cnt,row_dst_adr,NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=mpf.num_links; if(nco_rgr_mpf_typ != nco_rgr_mpf_SCRIP){ rcd=nco_get_vara(in_id,wgt_raw_id,dmn_srt,dmn_cnt,wgt_raw,NC_DOUBLE); }else{ /* SCRIP mapfiles store 2D weight array remap_matrix[num_links,num_wgts] Apply only first weight for first-order conservative accuracy (i.e., area overlap) Apply all three weights for second-order conservative accuracy (by including gradients from centroid to vertices) */ dmn_srd[0]=1L; dmn_srt[1]=0L; dmn_cnt[1]=1L; dmn_srd[1]=mpf.num_wgts; rcd=nco_get_vars(in_id,wgt_raw_id,dmn_srt,dmn_cnt,dmn_srd,wgt_raw,NC_DOUBLE); } /* !SCRIP */ /* Pre-subtract one from row/column addresses (stored, by convention, as Fortran indices) to optimize later access with C indices */ size_t lnk_nbr; /* [nbr] Number of links */ size_t lnk_idx; /* [idx] Link index */ lnk_nbr=mpf.num_links; for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) row_dst_adr[lnk_idx]--; for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) col_src_adr[lnk_idx]--; if(nco_dbg_lvl_get() >= nco_dbg_io){ (void)fprintf(stdout,"idx row_dst col_src wgt_raw\n"); for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) (void)fprintf(stdout,"%li %d %d %g\n",lnk_idx,row_dst_adr[lnk_idx],col_src_adr[lnk_idx],wgt_raw[lnk_idx]); } /* endif dbg */ /* Free memory associated with input file */ if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt) dmn_cnt=(long *)nco_free(dmn_cnt); if(dmn_srd) dmn_srd=(long *)nco_free(dmn_srd); /* Close input netCDF file */ nco_close(in_id); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in); /* Above this line, fl_in and in_id refer to map file Below this line, fl_in and in_id refer to input file to be regridded */ /* Initialize */ in_id=rgr->in_id; out_id=rgr->out_id; /* Sanity check that input data file matches expectations from mapfile */ char *col_nm_in=rgr->col_nm_in; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ char *lat_nm_in=rgr->lat_nm_in; /* [sng] Name of input dimension to recognize as latitude */ char *lon_nm_in=rgr->lon_nm_in; /* [sng] Name of input dimension to recognize as longitude */ int dmn_id_col=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_lat; /* [id] Dimension ID */ int dmn_id_lon; /* [id] Dimension ID */ /* 20160503 Discover coordinates via CF Convention if indicated This copies method used in nco_grd_nfr() */ /* Begin CF-coordinates block */ cf_crd_sct *cf=NULL; char *rgr_var; /* [sng] Variable for special regridding treatment */ nco_bool flg_cf=False; /* [flg] Follow CF Coordinates convention to find and infer grid */ rgr_var=rgr->var_nm; if(rgr_var){ /* Infer grid from special variable Intended to be variable that has both horizontal dimensions and "coordinates" attribute, e.g., ncks --cdl -m ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc | grep coordinates 4LFTX_221_SPDY_S113:coordinates = "gridlat_221 gridlon_221" ; Usage: ncks -O -D 3 --rgr infer --rgr_var=ALBDO_221_SFC_S113 --rgr grid=${HOME}/grd_narr.nc ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc ~/foo.nc */ char crd_sng[]="coordinates"; /* CF-standard coordinates attribute name */ cf=(cf_crd_sct *)nco_malloc(sizeof(cf_crd_sct)); cf->crd=False; /* [flg] CF coordinates information is complete */ cf->crd_id[0]=NC_MIN_INT; /* [id] Coordinate ID, first */ cf->crd_id[1]=NC_MIN_INT; /* [id] Coordinate ID, second */ cf->crd_nm[0]=NULL; /* [sng] Coordinate name, first */ cf->crd_nm[1]=NULL; /* [sng] Coordinate name, second */ cf->crd_sng=NULL; /* [sng] Coordinates attribute value */ cf->dmn_id[0]=NC_MIN_INT; /* [id] Dimension ID, first */ cf->dmn_id[1]=NC_MIN_INT; /* [id] Dimension ID, second */ cf->dmn_nm[0]=NULL; /* [sng] Dimension name, first */ cf->dmn_nm[1]=NULL; /* [sng] Dimension name, second */ cf->unt_sng[0]=NULL; /* [sng] Units string, first coordinate */ cf->unt_sng[1]=NULL; /* [sng] Units string, second coordinate */ cf->var_id=NC_MIN_INT; /* [id] Coordinate variable ID */ cf->var_nm=NULL; /* [sng] Coordinates variable name */ cf->var_type=NC_NAT; /* [enm] Coordinates variable type */ if((rcd=nco_inq_varid_flg(in_id,rgr_var,&cf->var_id)) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports special \"coordinates\" variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd */ cf->crd_sng=nco_char_att_get(in_id,cf->var_id,crd_sng); if(cf->crd_sng){ cf->crd=True; }else{ /* !rcd && att_typ */ (void)fprintf(stderr,"%s: WARNING %s reports coordinates variable %s does not have character-valued \"coordinates\" attribute. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd && att_typ */ /* Valid coordinates attribute requires two coordinate names separated by space character */ char *crd_nm[NCO_MAX_CRD_PER_VAR]; /* [sng] Coordinate name start position */ char *crd_dpl; /* [sng] Modifiable duplicate of coordinates string */ char *spc_ptr; /* [sng] Pointer to space character (' ') */ int crd_nbr=0; /* [nbr] Number of names in coordinates attribute */ int crd_spt=0; /* [nbr] Number of "spatial-like" (that include "degree" in units) coordinates */ int crd_idx=0; /* [idx] Counter for coordinate names */ for(crd_idx=0;crd_idx<NCO_MAX_CRD_PER_VAR;crd_idx++) crd_nm[crd_idx]=NULL; crd_dpl=(char *)strdup(cf->crd_sng); /* Search for spaces starting from end of string */ while((spc_ptr=strrchr(crd_dpl,' '))){ crd_nm[crd_nbr]=spc_ptr+1L; crd_nbr++; /* NUL-terminate so next search ends here */ *spc_ptr='\0'; } /* !sbs_ptr */ /* Final coordinate name begins where coordinate string starts */ crd_nm[crd_nbr]=crd_dpl; /* Change crd_nbr from 0-based index to actual coordinate number */ crd_nbr++; if(crd_nbr < 2){ (void)fprintf(stderr,"%s: WARNING %s found only %d coordinate(s) in \"coordinates\" attribute \"%s\", at least two are required. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,crd_nbr,cf->crd_sng); goto skp_cf; } /* !crd_nbr */ /* If more than two coordinate names are present, choose first two (searching backwards from end) with "degree" in units attributes, otherwise just choose first two */ crd_idx=crd_spt=0; while(crd_spt < 2 && crd_idx < crd_nbr){ cf->crd_nm[crd_spt]=crd_nm[crd_idx]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[crd_spt],&cf->crd_id[crd_spt])) == NC_NOERR){ cf->unt_sng[crd_spt]=nco_char_att_get(in_id,cf->crd_id[crd_spt],unt_sng); if(cf->unt_sng[crd_spt]){ if(strcasestr(cf->unt_sng[crd_spt],"degree")){ /* Increment count of spatial-like coordinates... */ crd_spt++; }else{ /* ...or free() memory allocated during search */ cf->unt_sng[crd_spt]=(char *)nco_free(cf->unt_sng[crd_spt]); } /* !strcasestr() */ crd_idx++; } /* !rcd && att_typ */ } /* !rcd */ } /* !crd_spt */ /* If while()-loop above was successful, our search is over Otherwise, use first two coordinate names regardless of units, and print more diagnostics */ if(crd_spt < 2){ cf->crd_nm[0]=crd_nm[0]; cf->crd_nm[1]=crd_nm[1]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[0],&cf->crd_id[0])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s not found. Turning-off CF coordinates search for this file.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0]); goto skp_cf; } /* !rcd */ if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[1],&cf->crd_id[1])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s not found. Turning-off CF coordinates search for this file.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1]); goto skp_cf; } /* !rcd */ cf->unt_sng[0]=nco_char_att_get(in_id,cf->crd_id[0],unt_sng); if(cf->unt_sng[0]){ if(!strcasestr(cf->unt_sng[0],"degrees_")) (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->unt_sng[0]); } /* !rcd && att_typ */ cf->unt_sng[1]=nco_char_att_get(in_id,cf->crd_id[1],unt_sng); if(cf->unt_sng[1]){ if(!strcasestr(cf->unt_sng[1],"degrees_")) (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1],cf->unt_sng[1]); } /* !rcd && att_typ */ } /* !crd_spt */ int crd_rnk; /* [nbr] Coordinate rank */ rcd=nco_inq_varndims(in_id,cf->crd_id[0],&crd_rnk); if(crd_rnk != 2){ (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s has %i dimension(s). Skipping CF coordinates method.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],crd_rnk); goto skp_cf; } /* !crd_rnk */ rcd=nco_inq_vardimid(in_id,cf->crd_id[0],cf->dmn_id); cf->dmn_nm[0]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); cf->dmn_nm[1]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); rcd=nco_inq_dimname(in_id,cf->dmn_id[0],cf->dmn_nm[0]); rcd=nco_inq_dimname(in_id,cf->dmn_id[1],cf->dmn_nm[1]); /* "coordinates" convention does not guarantee lat, lon are specified in that order Use "units" values, if any, to determine order In absence of "units", assume order is lat, lon */ nco_bool crd0_is_lat=False; /* [flg] First coordinate is latitude */ nco_bool crd0_is_lon=False; /* [flg] First coordinate is longitude */ nco_bool crd1_is_lat=False; /* [flg] Second coordinate is latitude */ nco_bool crd1_is_lon=False; /* [flg] Second coordinate is longitude */ if(cf->unt_sng[0]){ if(!strcasecmp(cf->unt_sng[0],"degrees_north")) crd0_is_lat=True; if(!strcasecmp(cf->unt_sng[0],"degrees_east")) crd0_is_lon=True; } /* endif */ if(cf->unt_sng[1]){ if(!strcasecmp(cf->unt_sng[1],"degrees_north")) crd1_is_lat=True; if(!strcasecmp(cf->unt_sng[1],"degrees_east")) crd1_is_lon=True; } /* endif */ assert((crd0_is_lat && crd1_is_lon) || (crd0_is_lon && crd1_is_lat)); int idx_lat; int idx_lon; if(crd0_is_lat && crd1_is_lon){ idx_lat=0; idx_lon=1; }else{ idx_lat=1; idx_lon=0; } /* endif */ /* Dimensions and coordinates have been vetted. Store as primary lookup names. Dimensions are always returned in order [LRV,MRV]=[0,1] LRV is along-track direction, and MRV is across-track (at least in NASA data) Internally we label LRV as "lat" and MRV as "lon" so that code looks similar for curvilinear and rectangular grids */ dmn_id_lat=cf->dmn_id[0]; dmn_id_lon=cf->dmn_id[1]; /* Subtlety: lat_nm_in is coordinate (variable+dimension) name when specified from command-line (as in nco_grd_nfr()), dimension name when found through CF-method (as in nco_rgr_wgt()). This confusing distinction could be avoided by passing command-line dimension names through-to nco_rgr_wgt(). However, that route would require complex priorities for what to do when passing command-line coordinate names not dimension names and visa-versa. */ lat_nm_in=strdup(cf->dmn_nm[0]); lon_nm_in=strdup(cf->dmn_nm[1]); //lat_nm_in=strdup(cf->crd_nm[idx_lat]); //lon_nm_in=strdup(cf->crd_nm[idx_lon]); /* Next four lines unnecessary in nco_rgr_wgt() which only needs dimension names (it reads input coordinates from map-file not data-file) */ //lat_ctr_id=cf->crd_id[idx_lat]; //lon_ctr_id=cf->crd_id[idx_lon]; //lat_dmn_nm=strdup(cf->dmn_nm[0]); //lon_dmn_nm=strdup(cf->dmn_nm[1]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s \"coordinates\" attribute \"%s\" points to coordinates %s and %s. Latitude coordinate \"%s\" has dimensions \"%s\" and \"%s\". Longitude coordinate \"%s\" has dimensions \"%s\" and \"%s\".\n",nco_prg_nm_get(),fnc_nm,rgr_var,cf->crd_sng,cf->crd_nm[0],cf->crd_nm[1],cf->crd_nm[idx_lat],cf->dmn_nm[idx_lat],cf->dmn_nm[idx_lon],cf->crd_nm[idx_lon],cf->dmn_nm[idx_lat],cf->dmn_nm[idx_lon]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s Coordinates %s and %s \"units\" values are \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->crd_nm[1],cf->unt_sng[0] ? cf->unt_sng[0] : "(non-existent)",cf->unt_sng[1] ? cf->unt_sng[1] : "(non-existent)"); /* Clean-up CF coordinates memory */ if(crd_dpl) crd_dpl=(char *)nco_free(crd_dpl); if(cf->crd_sng) cf->crd_sng=(char *)nco_free(cf->crd_sng); if(cf->dmn_nm[0]) cf->dmn_nm[0]=(char *)nco_free(cf->dmn_nm[0]); if(cf->dmn_nm[1]) cf->dmn_nm[1]=(char *)nco_free(cf->dmn_nm[1]); if(cf->unt_sng[0]) cf->unt_sng[0]=(char *)nco_free(cf->unt_sng[0]); if(cf->unt_sng[1]) cf->unt_sng[1]=(char *)nco_free(cf->unt_sng[1]); // if(foo) foo=(char *)nco_free(foo); } /* !rgr_var */ /* goto skp_cf */ skp_cf: /* free() any abandoned cf structure now */ if(!flg_cf) if(cf) cf=(cf_crd_sct *)nco_free(cf); rcd=NC_NOERR; /* End CF-coordinates block */ if(flg_grd_in_1D){ long col_nbr_in_dat; /* [nbr] Number of columns in input datafile */ /* Check default or command-line option first, then search usual suspects, and if that fails then guess unstructured dimension is dimension in input file with size n_a expected by input map file, suggested by PJCS Using internal database names first ensures users can pick between multiple dimensions of size n_a 20180313: fxm New PJCS algorithm is superior, should eliminate internal database for unstructured grids? Database is necessary for 2D grids because otherwise no good way to disambiguate latitude from longitude */ if(col_nm_in && (rcd=nco_inq_dimid_flg(in_id,col_nm_in,&dmn_id_col)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"lndgrid",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("lndgrid"); /* CLM */ else if((rcd=nco_inq_dimid_flg(in_id,"nCells",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("nCells"); /* MPAS-O/I */ else if((rcd=nco_inq_dimid_flg(in_id,"nEdges",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("nEdges"); /* MPAS-O/I */ else if((rcd=nco_inq_dimid_flg(in_id,"ncol_d",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("ncol_d"); /* EAM dynamics grid */ else if((rcd=nco_inq_dimid_flg(in_id,"ncol_p",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("ncol_d"); /* EAM physics grid */ else if((rcd=nco_inq_dimid_flg(in_id,"sounding_id",&dmn_id_col)) == NC_NOERR) col_nm_in=strdup("sounding_id"); /* OCO2 */ /* 20180605: Database matches to above names may be false-positives ALM/CLM/CTSM/ELM store all possible dimension names that archived variables could use NCO only prints dimensions used in variables, while ncdump prints all dimensions From ncdump we find usually unused ALM/CLM/CTSM/ELM dimensions: gridcell, lndunit, column, pft, levurb, numrad, levsno Check that matched dimension has expected size: */ if(dmn_id_col != NC_MIN_INT){ rcd=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr_in_dat); if(col_nbr_in != col_nbr_in_dat){ dmn_id_col=NC_MIN_INT; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s database-prioritized unstructured dimension candidate \"%s\" has size not expected by supplied map-file: mapfile col_nbr_in = %ld != %ld = col_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,col_nm_in,col_nbr_in,col_nbr_in_dat); } /* !col_nbr_in */ }else{ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s expects data on an unstructured grid yet cannot find a dimension name that matches the usual suspects for unstructured dimensions (ncol, gridcell, lndgrid, nCells, nEdges, sounding_id). Consider specifying horizontal dimension name to ncks with \"--rgr col_nm=foo\" or to ncremap with \"ncremap -R '--rgr col_nm=foo'\", and consider requesting the NCO project to add this horizontal dimension name to its internal database.\n",nco_prg_nm_get(),fnc_nm); } /* !dmn_id_col */ if(dmn_id_col == NC_MIN_INT){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s Proceeding with fallback algorithm to guess unstructured dimension as first dimension in data file of equal size to that expected by supplied map-file...\n",nco_prg_nm_get(),fnc_nm); /* 20180312: Unstructured dimension must have same size as input map file, suggested by PJCS */ int *dmn_ids_in; /* [nbr] Input file dimension IDs */ int dmn_nbr_in; /* [nbr] Number of dimensions in input file */ const int flg_prn=0; /* [enm] Parent flag */ rcd=nco_inq_dimids(in_id,&dmn_nbr_in,NULL,flg_prn); dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); rcd=nco_inq_dimids(in_id,NULL,dmn_ids_in,flg_prn); /* Find dimension, if any, with same size as map "a" src_grid_dims[0] = n_a dimension */ for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ dmn_id_col=dmn_ids_in[dmn_idx]; rcd=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr_in_dat); if(col_nbr_in == col_nbr_in_dat){ rcd=nco_inq_dimname(in_id,dmn_id_col,col_nm_in); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s found that dimension %s in datafile has same size (n_a = %ld) expected by map-file. Assuming %s is the unstructured dimension.\n",nco_prg_nm_get(),fnc_nm,col_nm_in,col_nbr_in,col_nm_in); break; } /* !col_nbr_in */ } /* !dmn_idx */ if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in); if(dmn_idx == dmn_nbr_in){ dmn_id_col=NC_MIN_INT; (void)fprintf(stdout,"%s: WARNING received a map-file constructed to process data on an unstructured (one-dimensional) grid, but %s (aka \"the regridder\") cannot find a dimension in the input data file (or, with ncremap, a possibly already subsetted intermediate file) that matches the size of the unstructured dimension in the supplied map-file = src_grd_dims[0] = n_a = %ld.\nHINT: Ensure at least one member of the variable extraction list has a spatial dimension of size = %ld\n",nco_prg_nm_get(),fnc_nm,col_nbr_in,col_nbr_in); (void)fprintf(stdout,"%s: INFO %s reports a third, last-ditch (aka \"Hail Mary\") workaround may work. The Hail-Mary allows logically 1D map-files to regrid logically 2D datasets, so long as the product of the horizontal dimension sizes in the 2D input data file equals the map-file 1D dimension size.\n",nco_prg_nm_get(),fnc_nm); /* Hail Mary algorithm: Use following 2D input grid block to identify horizontal coordinates and dimensions */ flg_grd_in_1D_dat_in_2D=True; flg_grd_in_2D=True; //nco_exit(EXIT_FAILURE); } /* !dmn_idx */ } /* !col_nm_in */ } /* !1D */ if(flg_grd_in_2D){ long lat_nbr_in_dat; /* [nbr] Number of latitudes in input datafile */ if(lat_nm_in && (rcd=nco_inq_dimid_flg(in_id,lat_nm_in,&dmn_id_lat)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("latitude"); else if((rcd=nco_inq_dimid_flg(in_id,"lat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("lat"); else if((rcd=nco_inq_dimid_flg(in_id,"Latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Latitude"); else if((rcd=nco_inq_dimid_flg(in_id,"Lat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Lat"); else if((rcd=nco_inq_dimid_flg(in_id,"south_north",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("south_north"); /* WRF */ else if((rcd=nco_inq_dimid_flg(in_id,"south_north_stag",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("south_north_stag"); else if((rcd=nco_inq_dimid_flg(in_id,"YDim:location",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("YDim:location"); /* AIRS L3 */ else if((rcd=nco_inq_dimid_flg(in_id,"YDim:MOD_Grid_monthly_CMG_VI",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("YDim:MOD_Grid_monthly_CMG_VI"); /* MODIS MOD13C2 */ else if((rcd=nco_inq_dimid_flg(in_id,"natrack",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("natrack"); /* MODIS DeepBlue SeaWiFS L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"nj",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nj"); /* CICE RTM */ else if((rcd=nco_inq_dimid_flg(in_id,"lsmlat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("lsmlat"); /* CISM/CLM/ELM */ else if((rcd=nco_inq_dimid_flg(in_id,"nlat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nlat"); /* POP */ else if((rcd=nco_inq_dimid_flg(in_id,"rlat",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("rlat"); /* RACMO */ else if((rcd=nco_inq_dimid_flg(in_id,"nscan",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nscan"); /* AMSR, TRMM */ else if((rcd=nco_inq_dimid_flg(in_id,"nTimes",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("nTimes"); /* OMI L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"number_of_lines",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("number_of_lines"); /* DSCOVR L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoTrack",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("GeoTrack"); /* AIRS L2 DAP NC */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoTrack:L2_Standard_atmospheric&surface_product",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("GeoTrack:L2_Standard_atmospheric&surface_product"); /* AIRS L2 HDF */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Along_Swath:mod04",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Cell_Along_Swath:mod04"); /* MODIS MOD04 L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Along_Swath_mod04",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("Cell_Along_Swath_mod04"); /* MODIS MOD04 L2 (ncl_convert2nc changes colon to underscore) */ else if((rcd=nco_inq_dimid_flg(in_id,"CO_Latitude",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("CO_Latitude"); else if((rcd=nco_inq_dimid_flg(in_id,"j",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("j"); /* CMIP5 NorESM1 ocean */ else if((rcd=nco_inq_dimid_flg(in_id,"latitude0",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("latitude0"); /* Oxford */ else if((rcd=nco_inq_dimid_flg(in_id,"y",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("y"); /* NEMO */ else if((rcd=nco_inq_dimid_flg(in_id,"x",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("x"); /* NSIDC polar stereographic (NB: unfortunate incompatible conflict between NEMO & NSIDC names) */ else if((rcd=nco_inq_dimid_flg(in_id,"y1",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("y1"); /* NSIDC EASE */ else if((rcd=nco_inq_dimid_flg(in_id,"ygrid",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("ygrid"); /* SSM/I */ else if((rcd=nco_inq_dimid_flg(in_id,"ygrid_0",&dmn_id_lat)) == NC_NOERR) lat_nm_in=strdup("ygrid_0"); /* NWS HRRR */ else{ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports unable to find latitude dimension in input file. Tried the usual suspects. HINT: Inform regridder of input latitude dimension name with \"ncks --rgr lat_nm_in=name\" or \"ncremap -R '--rgr lat_nm_in=name'\"\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !lat */ rcd=nco_inq_dimlen(in_id,dmn_id_lat,&lat_nbr_in_dat); if(lat_nbr_in != lat_nbr_in_dat && !flg_grd_in_1D_dat_in_2D){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports mapfile and data file dimension sizes disagree: mapfile lat_nbr_in = %ld != %ld = lat_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,lat_nbr_in,lat_nbr_in_dat); nco_exit(EXIT_FAILURE); } /* !err */ long lon_nbr_in_dat; /* [nbr] Number of longitudes in input datafile */ if(lon_nm_in && (rcd=nco_inq_dimid_flg(in_id,lon_nm_in,&dmn_id_lon)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"longitude",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("longitude"); else if((rcd=nco_inq_dimid_flg(in_id,"lon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("lon"); else if((rcd=nco_inq_dimid_flg(in_id,"Longitude",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Longitude"); else if((rcd=nco_inq_dimid_flg(in_id,"Lon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Lon"); else if((rcd=nco_inq_dimid_flg(in_id,"west_east",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("west_east"); /* WRF */ else if((rcd=nco_inq_dimid_flg(in_id,"west_east_stag",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("west_east_stag"); else if((rcd=nco_inq_dimid_flg(in_id,"XDim:location",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("XDim:location"); /* AIRS L3 */ else if((rcd=nco_inq_dimid_flg(in_id,"XDim:MOD_Grid_monthly_CMG_VI",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("XDim:MOD_Grid_monthly_CMG_VI"); /* MODIS MOD13C2 */ else if((rcd=nco_inq_dimid_flg(in_id,"ni",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("ni"); /* CICE RTM */ else if((rcd=nco_inq_dimid_flg(in_id,"lsmlon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("lsmlon"); /* CISM/CLM/ELM */ else if((rcd=nco_inq_dimid_flg(in_id,"nlon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nlon"); /* POP */ else if((rcd=nco_inq_dimid_flg(in_id,"rlon",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("rlon"); /* POP */ else if((rcd=nco_inq_dimid_flg(in_id,"npix",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("npix"); /* AMSR */ else if((rcd=nco_inq_dimid_flg(in_id,"npixel",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("npixel"); /* TRMM */ else if((rcd=nco_inq_dimid_flg(in_id,"nxtrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nxtrack"); /* MODIS DeepBlue SeaWiFS L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"nXtrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("nXtrack"); /* OMI L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"number_of_pixels",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("number_of_pixels"); /* DSCOVR L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoXTrack",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("GeoXTrack"); /* AIRS L2 DAP NC */ else if((rcd=nco_inq_dimid_flg(in_id,"GeoXTrack:L2_Standard_atmospheric&surface_product",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("GeoXTrack:L2_Standard_atmospheric&surface_product"); /* AIRS L2 HDF */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Across_Swath:mod04",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Cell_Across_Swath:mod04"); /* MODIS MOD04 L2 */ else if((rcd=nco_inq_dimid_flg(in_id,"Cell_Across_Swath_mod04",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("Cell_Across_Swath_mod04"); /* MODIS MOD04 L2 (ncl_convert2nc changes colon to underscore) */ else if((rcd=nco_inq_dimid_flg(in_id,"i",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("i"); /* CMIP5 NorESM1 ocean */ else if((rcd=nco_inq_dimid_flg(in_id,"longitude0",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("longitude0"); /* Oxford */ else if((rcd=nco_inq_dimid_flg(in_id,"x",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("x"); /* NEMO */ else if((rcd=nco_inq_dimid_flg(in_id,"y",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("y"); /* NSIDC polar stereographic (NB: unfortunate incompatible conflict between NEMO & NSIDC names) */ else if((rcd=nco_inq_dimid_flg(in_id,"x1",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("x1"); /* NSIDC EASE */ else if((rcd=nco_inq_dimid_flg(in_id,"xgrid",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("xgrid"); /* SSM/I */ else if((rcd=nco_inq_dimid_flg(in_id,"xgrid_0",&dmn_id_lon)) == NC_NOERR) lon_nm_in=strdup("xgrid_0"); /* NWS HRRR */ else{ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports unable to find longitude dimension in input file. Tried the usual suspects. HINT: Inform regridder of input longitude dimension name with \"ncks --rgr lon_nm_in=name\" or \"ncremap -R '--rgr lon_nm_in=name'\"\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !lat */ rcd=nco_inq_dimlen(in_id,dmn_id_lon,&lon_nbr_in_dat); if(lon_nbr_in != lon_nbr_in_dat && !flg_grd_in_1D_dat_in_2D){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") reports mapfile and data file dimension sizes disagree: mapfile lon_nbr_in = %ld != %ld = lon_nbr_in from datafile. HINT: Check that source grid (i.e., \"grid A\") used to create mapfile matches grid on which data are stored in input datafile.\n",nco_prg_nm_get(),fnc_nm,lon_nbr_in,lon_nbr_in_dat); nco_exit(EXIT_FAILURE); } /* !err */ if(flg_grd_in_1D_dat_in_2D){ if(lon_nbr_in_dat*lat_nbr_in_dat == col_nbr_in){ (void)fprintf(stdout,"%s: INFO %s Hail Mary algorithm reports tentative success in that product of identifed horizontal dimension sizes in the 2D input data file equals the map-file 1D dimension size = %ld.\n",nco_prg_nm_get(),fnc_nm,col_nbr_in); lat_nbr_in=lat_nbr_in_dat; lon_nbr_in=lon_nbr_in_dat; }else{ /* !col_nbr_in */ (void)fprintf(stdout,"%s: ERROR %s Hail Mary algorithm reports final failure since product of identifed horizontal dimension sizes in the 2D input data file does not equal the map-file 1D dimension size = %ld.\n",nco_prg_nm_get(),fnc_nm,col_nbr_in); nco_exit(EXIT_FAILURE); } /* !col_nbr_in */ } /* !flg_grd_in_1D_dat_in_2D */ } /* !2D */ /* Do not extract grid variables (that are also extensive variables) like lon, lat, area, and masks If necessary, use remap data to diagnose them from scratch Other extensive variables (like counts, population) will be extracted and summed not averaged */ /* Exception list source: ALM/CLM: landmask (20170504: Debatable, including erroneous mask may be better than completely excluding an expected mask) (20170504: must keep landfrac since regridded by ncremap for SGS option) AMSR: Latitude, Longitude CAM, CERES, CMIP5: lat, lon CAM, CMIP5: gw, lat_bnds, lon_bnds CAM-FV: slon, slat, w_stag (w_stag is weights for slat grid, analagous to gw for lat grid) CAM-SE, EAM, MOSART: area CICE: latt_bounds, lont_bounds, latu_bounds, lonu_bounds, TLAT, TLON, ULAT, ULON (NB: CICE uses ?LON and POP uses ?LONG) (aice is ice area, tmask is state-variable mask, both not currently excluded, although all binary masks like tmask should be recomputed on new grid) CISM/CLM/ELM: LATIXY, LONGXY (glacier mask files) DSCOVR L2: latitude, longitude ESMF: gridcell_area GPM: S1_Latitude, S1_Longitude HIRDLS: Latitude MAR/RACMO: LAT, LON MLS: CO_Latitude MPAS-O/I/LI: areaCell, latCell, lonCell and others that are all handled by separated MPAS convention implementation below NCO: lat_vertices, lon_vertices NEMO: nav_lat, nav_lon NWS HRRR: gridlat_0, gridlon_0 OCO2: latitude_bnds, longitude_bnds OMI DOMINO: Latitude, LatitudeCornerpoints, Longitude, LongitudeCornerpoints Oxford: global_latitude0, global_longitude0, latitude0, longitude0 POP: TLAT, TLONG, ULAT, ULONG (NB: CICE uses ?LON and POP uses ?LONG) (POP does not archive spatial bounds) RACMO: rlat, rlon TRMM: Latitude, Longitude UV-CDAT regridder: bounds_lat, bounds_lon Unknown: XLAT_M, XLONG_M WRF: XLAT, XLONG */ const int var_xcl_lst_nbr=53; /* [nbr] Number of objects on exclusion list */ const char *var_xcl_lst[]={"/area","/gridcell_area","/gw","/LAT","/lat","/Latitude","/latitude","/nav_lat","/global_latitude0","gridlat_0","/latitude0","/rlat","/slat","/LATIXY","/LONGXY","/TLAT","/ULAT","/XLAT","/XLAT_M","/CO_Latitude","/S1_Latitude","/lat_bnds","/lat_vertices","/latt_bounds","/latu_bounds","/latitude_bnds","/LatitudeCornerpoints","/bounds_lat","/LON","/lon","/Longitude","/longitude","/nav_lon","/global_longitude0","gridlon_0","/longitude0","/rlon","/slon","/TLON","/TLONG","/ULON","/ULONG","/XLONG","/XLONG_M","/CO_Longitude","/S1_Longitude","/lon_bnds","/lon_vertices","/lont_bounds","/lonu_bounds","/longitude_bnds","/LongitudeCornerpoints","/bounds_lon","/w_stag"}; int var_cpy_nbr=0; /* [nbr] Number of copied variables */ int var_rgr_nbr=0; /* [nbr] Number of regridded variables */ int var_xcl_nbr=0; /* [nbr] Number of deleted variables */ int var_crt_nbr=0; /* [nbr] Number of created variables */ int var_xtn_nbr=0; /* [nbr] Number of extensive variables */ unsigned int idx_tbl; /* [idx] Counter for traversal table */ const unsigned int trv_nbr=trv_tbl->nbr; /* [idx] Number of traversal table entries */ for(idx=0;idx<var_xcl_lst_nbr;idx++){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,var_xcl_lst[idx])) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* endif */ } /* !idx */ cnv_sct *cnv; /* [sct] Convention structure */ /* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */ cnv=nco_cnv_ini(in_id); if(cnv->MPAS){ /* 20160228: MPAS has a host of mysterious grid and extensive variables that should probably not be regridded 20180206: Add from MPAS-LI xCell, yCell, zCell, and [xyz]Edge, and [xyz]Vertex 20180917: Restrict exclusion list to a subset of variables with nCells-dimension Six nCells-variables may be valuable when regridded to lat/lon mpas_xcl_lst in nco_rgr_wgt() and MPAS var_xcl_lst in nco_var_is_fix() differ by these six variables: areaCell for comparison to area(lat,lon) cellMask for area-weighted mask maxLevelCell for area-weighted underwater topographic mask xCell, yCell, zCell for area-weighted cartesian coordinates 20180918: Regridder currently only works on cell-based coordinates Decided regridder will omit not copy fields on vertex- or edge-based coordinates until it can regrid them Regridding vertex- or edge-based fields would require new sparse matrix for vertices or edges How would ERWG or TempestRemap handle that? MPAS geophysical variables on vertex-based (not cell-based) coordinates include: avg_airStressVertexUGeo_1, avg_airStressVertexVGeo_1, uOceanVelocityVertexGeo_1, uVelocityGeo_1, vOceanVelocityVertexGeo_1, vVelocityGeo_1 MPAS geophysical variables on edge-based (not cell-based) coordinates include: principalStress1Var_1, principalStress2Var_1 */ const int mpas_xcl_lst_nbr=35; const char *mpas_xcl_lst[]={"/angleEdge","/areaTriangle","/cellsOnCell","/cellsOnEdge","/cellsOnVertex","/dcEdge","/dvEdge","/edgeMask","/edgesOnCell","/edgesOnEdge","/edgesOnVertex","/indexToCellID","/indexToEdgeID","/indexToVertexID","/kiteAreasOnVertex","/latCell","/latEdge","/latVertex","/lonCell","/lonEdge","/lonVertex","/maxLevelEdgeTop","/meshDensity","/nEdgesOnCell","/nEdgesOnEdge","/vertexMask","/verticesOnCell","/verticesOnEdge","/weightsOnEdge","/xEdge","/yEdge","/zEdge","/xVertex","/yVertex","/zVertex"}; for(idx=0;idx<mpas_xcl_lst_nbr;idx++){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++) if(!strcmp(trv_tbl->lst[idx_tbl].nm_fll,mpas_xcl_lst[idx])) break; if(idx_tbl < trv_nbr){ if(trv_tbl->lst[idx_tbl].flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) pre-defined MPAS exclusion-list variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); var_xcl_nbr++; } /* endif */ trv_tbl->lst[idx_tbl].flg_xtr=False; } /* endif */ } /* !idx */ } /* !MPAS */ char *dmn_nm_cp; /* [sng] Dimension name as char * to reduce indirection */ int dmn_nbr_in; /* [nbr] Number of dimensions in input variable */ int dmn_nbr_out; /* [nbr] Number of dimensions in output variable */ nco_bool has_lon; /* [flg] Contains longitude dimension */ nco_bool has_lat; /* [flg] Contains latitude dimension */ trv_sct trv; /* [sct] Traversal table object structure to reduce indirection */ /* Define regridding flag for each variable */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; dmn_nbr_in=trv_tbl->lst[idx_tbl].nbr_dmn; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ has_lon=False; has_lat=False; if(flg_grd_in_2D){ for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ /* Pre-determine flags necessary during next loop */ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; /* fxm: Generalize to include any variable containing two coordinates with "standard_name" = "latitude" and "longitude" */ if(!has_lon) has_lon=!strcmp(dmn_nm_cp,lon_nm_in); if(!has_lat) has_lat=!strcmp(dmn_nm_cp,lat_nm_in); } /* end loop over dimensions */ } /* !flg_grd_in_2D */ for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; /* Regrid variables containing the horizontal spatial dimension on 1D grids, and both latitude and longitude on 2D grids */ if(!strcmp(dmn_nm_cp,col_nm_in) || (has_lon && has_lat)){ trv_tbl->lst[idx_tbl].flg_rgr=True; var_rgr_nbr++; break; } /* endif */ } /* end loop over dimensions */ if(dmn_idx == dmn_nbr_in){ /* Not regridded, so must be omitted or copied... */ if(flg_grd_in_2D && (has_lon || has_lat)){ /* Single spatial dimensional variables on 2D input grids are likely extensive (e.g., grd_mrd_lng from bds) These could be salvaged with explicit rules or implicit assumptions */ trv_tbl->lst[idx_tbl].flg_xtr=False; var_xcl_nbr++; if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO automatically omitting (not copying or regridding from input) extensive-seeming (e.g., 1D spatial variable in 2D input grid, or 2D spatial variable without primary grid dimensions from multi-grid file (e.g., west_east_stag or south_north_stag instead of west_east or south_north)) variable %s\n",nco_prg_nm_get(),trv_tbl->lst[idx_tbl].nm_fll); }else{ /* !omitted */ /* Copy all variables that are not regridded or omitted */ var_cpy_nbr++; } /* !omitted */ } /* endif not regridded */ } /* end nco_obj_typ_var */ } /* end idx_tbl */ if(!var_rgr_nbr) (void)fprintf(stdout,"%s: WARNING %s reports no variables fit regridding criteria. The regridder expects something to regrid, and variables not regridded are copied straight to output. HINT: If the name(s) of the input horizontal spatial dimensions to be regridded (e.g., latitude and longitude or column) do not match NCO's preset defaults (case-insensitive unambiguous forms and abbreviations of \"latitude\", \"longitude\", and \"ncol\", respectively) then change the dimension names that NCO looks for. Instructions are at http://nco.sf.net/nco.html#regrid, e.g., \"ncks --rgr col=lndgrid --rgr lat=north\" or \"ncremap -R '--rgr col=lndgrid --rgr lat=north'\".\n",nco_prg_nm_get(),fnc_nm); for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.flg_rgr){ for(int xtn_idx=0;xtn_idx<rgr->xtn_nbr;xtn_idx++){ /* 20150927: Extensive variable treatments are still in alpha-development Currently testing on AIRS TSurfStd_ct (by summing not averaging) In future may consider variables that need more complex (non-summing) extensive treatment MPAS-O/I has a zillion of these [xyz]Cell, cellsOnCell, fCell, indexToCellID, maxLevelCell, meshDensity Not to mention the variables that depend on nEdges and nVertices... */ if(!strcmp(trv.nm,rgr->xtn_var[xtn_idx])){ trv_tbl->lst[idx_tbl].flg_xtn=True; var_xtn_nbr++; if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"%s: INFO Variable %s will be treated as extensive (summed not averaged)\n",nco_prg_nm_get(),trv.nm_fll); } /* !strcmp */ } /* !xtn_idx */ } /* !flg_rgr */ } /* !idx_tbl */ if(nco_dbg_lvl_get() >= nco_dbg_sbr){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr) (void)fprintf(stderr,"Regrid %s? %s\n",trv.nm,trv.flg_rgr ? "Yes" : "No"); } /* end idx_tbl */ } /* end dbg */ /* Lay-out regridded file */ aed_sct aed_mtd; char *area_nm_out; char *att_nm; char *bnd_nm_out; char *bnd_tm_nm_out; char *col_nm_out; char *frc_nm_out; char *lat_bnd_nm_out; char *lat_dmn_nm_out; char *lat_nm_out; char *lat_wgt_nm; char *lon_bnd_nm_out; char *lon_dmn_nm_out; char *lon_nm_out; char *msk_nm_out; char *slat_nm_out=NULL; char *slat_wgt_nm_out=NULL; char *slon_nm_out=NULL; int dmn_id_bnd; /* [id] Dimension ID */ int dmn_id_bnd_tm; /* [id] Dimension ID */ int dmn_id_slat; /* [id] Dimension ID */ int dmn_id_slon; /* [id] Dimension ID */ int area_out_id; /* [id] Variable ID for area */ int frc_out_id; /* [id] Variable ID for fraction */ int lon_out_id; /* [id] Variable ID for longitude */ int lat_out_id; /* [id] Variable ID for latitude */ int lat_wgt_id; /* [id] Variable ID for latitude weight */ int lon_bnd_id; /* [id] Variable ID for lon_bnds/lon_vertices */ int lat_bnd_id; /* [id] Variable ID for lat_bnds/lat_vertices */ int msk_out_id; /* [id] Variable ID for mask */ int slat_out_id; /* [id] Variable ID for staggered latitude */ int slat_wgt_id; /* [id] Variable ID for staggered latitude weight */ int slon_out_id; /* [id] Variable ID for staggered longitude */ int dmn_ids_out[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */ long dmn_srt_out[dmn_nbr_grd_max]; long dmn_cnt_tuo[dmn_nbr_grd_max]; /* Name output dimensions/variables */ area_nm_out=rgr->area_nm; bnd_tm_nm_out=rgr->bnd_tm_nm; frc_nm_out=rgr->frc_nm; lat_bnd_nm_out=rgr->lat_bnd_nm; lat_wgt_nm=rgr->lat_wgt_nm; lon_bnd_nm_out=rgr->lon_bnd_nm; msk_nm_out=rgr->msk_nm; /* Use explicitly specified output names, if any, otherwise use input names (either explicitly specified or discovered by fuzzing) */ if(rgr->col_nm_out) col_nm_out=rgr->col_nm_out; else col_nm_out=col_nm_in; if(rgr->lat_dmn_nm) lat_dmn_nm_out=rgr->lat_dmn_nm; else lat_dmn_nm_out=lat_nm_in; if(rgr->lon_dmn_nm) lon_dmn_nm_out=rgr->lon_dmn_nm; else lon_dmn_nm_out=lon_nm_in; if(rgr->lat_nm_out) lat_nm_out=rgr->lat_nm_out; else lat_nm_out=lat_nm_in; if(rgr->lon_nm_out) lon_nm_out=rgr->lon_nm_out; else lon_nm_out=lon_nm_in; if(flg_grd_out_1D){ bnd_nm_out=rgr->vrt_nm; lat_bnd_nm_out=rgr->lat_vrt_nm; lon_bnd_nm_out=rgr->lon_vrt_nm; } /* !flg_grd_out_1D */ if(flg_grd_out_crv){ bnd_nm_out=rgr->bnd_nm; } /* !flg_grd_out_crv */ if(flg_grd_out_rct){ bnd_nm_out=rgr->bnd_tm_nm; /* NB: default to bnd_tm_nm for spatial bounds */ } /* !flg_grd_out_rct */ if(flg_grd_out_2D){ lat_bnd_nm_out=rgr->lat_bnd_nm; lon_bnd_nm_out=rgr->lon_bnd_nm; } /* !flg_grd_out_2D */ if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ slat_nm_out=strdup("slat"); slat_wgt_nm_out=strdup("w_stag"); slon_nm_out=strdup("slon"); } /* !nco_grd_lat_fv */ /* Ensure temporal bounds dimension name is distinct from spatial bounds when their sizes differ */ if(bnd_nbr_out != bnd_tm_nbr_out){ if(!strcmp(bnd_nm_out,bnd_tm_nm_out)){ (void)fprintf(stdout,"%s: INFO %s reports spatial and temporal output bounds dimensions are identical (and named \"%s\") by default for rectangular output grids because both can be stored as 2D arrays. That cannot work for this mapping because temporal and spatial bounds dimensions sizes differ (bnd_nbr_out = %d, bnd_tm_nbr_out = %d). Using fall-back spatial bounds name \"%s\" instead. HINT: You may change one or both manually with \"ncks --rgr bnd_nm=name\" or \"ncks --rgr bnd_tm_nm=name\", or, using ncremap, with \"ncremap -R '--rgr bnd_nm=name'\" or \"ncremap -R '--rgr bnd_tm_nm=name'\"\n",nco_prg_nm_get(),fnc_nm,bnd_tm_nm_out,bnd_nbr_out,bnd_tm_nbr_out,bnd_nm_out); } /* !strcmp() */ } /* !bnd_nbr_out */ /* Persistent metadata */ aed_sct aed_mtd_crd; char *att_val_crd=NULL; char *att_nm_crd=NULL; att_nm_crd=strdup("coordinates"); aed_mtd_crd.att_nm=att_nm_crd; if(flg_grd_out_1D || flg_grd_out_crv) aed_mtd_crd.mode=aed_overwrite; else aed_mtd_crd.mode=aed_delete; aed_mtd_crd.type=NC_CHAR; aed_mtd_crd.sz=strlen(lat_nm_out)+strlen(lon_nm_out)+1L; att_val_crd=(char *)nco_malloc((aed_mtd_crd.sz+1L)*nco_typ_lng(aed_mtd_crd.type)); (void)sprintf(att_val_crd,"%s %s",lat_nm_out,lon_nm_out); aed_mtd_crd.val.cp=att_val_crd; /* Reminder: Regridder area_out options, e.g., --rgr area_out, set flg_area_out to control adding "area" variable to regridded output Regridder cll_msr options, --rgr cll_msr, set flg_cll_msr to control adding "cell_measures" attribute to regridded output ncks & ncra cll_msr options, --cll_msr, set EXTRACT_CLL_MSR to control adding "cell_measures" variables (e.g., area) to extraction list of input file EXTRACT_CLL_MSR supercedes --rgr area_out in determining whether to add "area" to regridded output */ nco_bool flg_area_out=rgr->flg_area_out; /* [flg] Add area to output */ nco_bool flg_cll_msr=rgr->flg_cll_msr; /* [flg] Add cell_measures attribute */ aed_sct aed_mtd_cll_msr; char *att_nm_cll_msr=NULL; char *att_val_cll_msr=NULL; if(flg_cll_msr){ att_nm_cll_msr=strdup("cell_measures"); aed_mtd_cll_msr.att_nm=att_nm_cll_msr; aed_mtd_cll_msr.mode=aed_overwrite; aed_mtd_cll_msr.type=NC_CHAR; att_val_cll_msr=(char *)nco_malloc((strlen(area_nm_out)+6L+1L)*nco_typ_lng(aed_mtd_cll_msr.type)); (void)sprintf(att_val_cll_msr,"area: %s",area_nm_out); aed_mtd_cll_msr.sz=strlen(att_val_cll_msr); aed_mtd_cll_msr.val.cp=att_val_cll_msr; } /* !flg_cll_msr */ /* Define new horizontal dimensions before all else */ if(flg_grd_out_1D){ rcd+=nco_def_dim(out_id,col_nm_out,col_nbr_out,&dmn_id_col); } /* !flg_grd_out_1D */ if(flg_grd_out_2D){ rcd+=nco_def_dim(out_id,lat_dmn_nm_out,lat_nbr_out,&dmn_id_lat); rcd+=nco_def_dim(out_id,lon_dmn_nm_out,lon_nbr_out,&dmn_id_lon); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ rcd+=nco_def_dim(out_id,slat_nm_out,slat_nbr_out,&dmn_id_slat); rcd+=nco_def_dim(out_id,slon_nm_out,slon_nbr_out,&dmn_id_slon); } /* !nco_grd_lat_fv */ } /* !flg_grd_out_2D */ /* If dimension has not been defined, define it */ rcd=nco_inq_dimid_flg(out_id,bnd_tm_nm_out,&dmn_id_bnd_tm); if(rcd != NC_NOERR) rcd=nco_def_dim(out_id,bnd_tm_nm_out,bnd_tm_nbr_out,&dmn_id_bnd_tm); /* If dimension has not been defined, define it */ rcd=nco_inq_dimid_flg(out_id,bnd_nm_out,&dmn_id_bnd); if(rcd != NC_NOERR) rcd=nco_def_dim(out_id,bnd_nm_out,bnd_nbr_out,&dmn_id_bnd); char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */ char *var_nm; /* [sng] Variable name */ int *dmn_id_in=NULL; /* [id] Dimension IDs */ int *dmn_id_out=NULL; /* [id] Dimension IDs */ int var_id_in; /* [id] Variable ID */ int var_id_out; /* [id] Variable ID */ nc_type var_typ_out; /* [enm] Variable type to write to disk */ nc_type var_typ_rgr; /* [enm] Variable type used during regridding */ nco_bool PCK_ATT_CPY=True; /* [flg] Copy attributes "scale_factor", "add_offset" */ int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; dfl_lvl=rgr->dfl_lvl; fl_out_fmt=rgr->fl_out_fmt; /* Define new coordinates and grid variables in regridded file */ if(flg_grd_out_1D){ rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&lat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&lon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_col; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_col; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&area_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col,&frc_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd+=nco_def_var(out_id,msk_nm_out,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_col,&msk_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_msk_out */ } /* !flg_grd_out_1D */ if(flg_grd_out_crv){ dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_lon; rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&area_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&frc_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd+=nco_def_var(out_id,msk_nm_out,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids_out,&msk_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_msk_out */ dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_lon; dmn_ids_out[2]=dmn_id_bnd; rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_3D,dmn_ids_out,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_3D,dmn_ids_out,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_grd_out_crv */ if(flg_grd_out_rct){ rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lat,&lat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lon,&lon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ rcd+=nco_def_var(out_id,slat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slat,&slat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slat_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,slat_wgt_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slat,&slat_wgt_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slat_wgt_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,slon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_slon,&slon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,slon_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !nco_grd_lat_fv */ dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lat_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_lon; dmn_ids_out[1]=dmn_id_bnd; rcd+=nco_def_var(out_id,lon_bnd_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; rcd+=nco_def_var(out_id,lat_wgt_nm,crd_typ_out,dmn_nbr_1D,&dmn_id_lat,&lat_wgt_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_wgt_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; dmn_ids_out[0]=dmn_id_lat; dmn_ids_out[1]=dmn_id_lon; if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&area_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd+=nco_def_var(out_id,frc_nm_out,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&frc_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,frc_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd+=nco_def_var(out_id,msk_nm_out,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids_out,&msk_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_out_id,shuffle,deflate,dfl_lvl); var_crt_nbr++; } /* !flg_msk_out */ } /* !flg_grd_out_rct */ /* Add _FillValue to empty destination cells, if requested */ nco_bool flg_add_fll=rgr->flg_add_fll; /* [flg] Add _FillValue to fields with empty destination cells */ nco_bool flg_dst_mpt=False; /* [flg] At least one destination cell is empty */ size_t dst_idx; /* [idx] Index on destination grid */ /* Determine whether any destination cells are, in fact, empty Logic here could be replaced by examining frac_b variable, if we trust input frac_b... ...and we do trust input frac_b since it is already used for renormalization */ if(flg_add_fll){ if(flg_msk_apl){ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(msk_out[dst_idx] == 0) break; if(dst_idx < grd_sz_out) flg_dst_mpt=True; if(flg_dst_mpt && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports at least one destination cell, Fortran (1-based) row index %lu, is empty. User requested (with --msk_apl) that masked cells receive _FillValue, so regridder will ensure that all regridded fields have _FillValue attribute.\n",nco_prg_nm_get(),fnc_nm,dst_idx+1L); }else{ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ /* For each destination cell... */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ /* ...does any weight... */ if(row_dst_adr[lnk_idx] == dst_idx){ /* ...contribute to that cell? */ /* If so, break lnk_idx loop and continue to next iteration of dst_idx loop */ break; } /* !row_dst_adr */ } /* !lnk_idx */ /* If weight loop reached end without a match, then this destination cell is empty */ if(lnk_idx == lnk_nbr){ flg_dst_mpt=True; break; } /* !lnk_idx */ } /* !dst_idx */ if(flg_dst_mpt && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports at least one destination cell, Fortran (1-based) row index %lu, is empty. User requested (with --add_fll) that empty cells receive _FillValue, so regridder will ensure that all regridded fields have _FillValue attribute.\n",nco_prg_nm_get(),fnc_nm,dst_idx+1L); } /* !flg_msk_apl */ } /* !flg_add_fll */ /* Pre-allocate dimension ID and cnt/srt space */ int dmn_nbr_max; /* [nbr] Maximum number of dimensions variable can have in input or output */ int dmn_in_fst; /* [idx] Offset of input- relative to output-dimension due to non-MRV dimension insertion */ int dmn_nbr_rec; /* [nbr] Number of unlimited dimensions */ int *dmn_ids_rec=NULL; /* [id] Unlimited dimension IDs */ rcd+=nco_inq_ndims(in_id,&dmn_nbr_max); dmn_nbr_max++; /* Safety in case regridding adds dimension */ dmn_id_in=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* Identify all record-dimensions in input file */ rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec); if(dmn_nbr_rec > 0){ dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int)); rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec); } /* !dmn_nbr_rec */ int flg_pck; /* [flg] Variable is packed on disk */ nco_bool has_mss_val; /* [flg] Has numeric missing value attribute */ double mss_val_dbl; double mss_val_cmp_dbl; /* Missing value for comparison to double precision values */ /* Define regridded and copied variables in output file */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv_tbl->lst[idx_tbl].flg_mrv=True; trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ var_nm=trv.nm; /* Preserve input type in output type */ var_typ_out=trv.var_typ; /* Demote DP to SP to save space. fxm: missing value type will then be inconsistent if copied without demotion */ //if(trv.var_typ == NC_DOUBLE) var_typ_out=NC_FLOAT; else var_typ_out=trv.var_typ; dmn_nbr_in=trv.nbr_dmn; dmn_nbr_out=trv.nbr_dmn; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid_flg(out_id,var_nm,&var_id_out); /* If variable has not been defined, define it */ if(rcd != NC_NOERR){ if(trv.flg_rgr){ /* Regrid */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); dmn_in_fst=0; rcd=nco_inq_var_packing(in_id,var_id_in,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports variable \"%s\" is packed so results unpredictable. HINT: If regridded values seems weird, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,var_nm); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); /* Is horizontal dimension last, i.e., most-rapidly-varying? */ if(flg_grd_in_1D && !strcmp(dmn_nm,col_nm_in)){ if(dmn_idx != dmn_nbr_in-1){ /* Unstructured input grid has col in non-MRV location (expect this with, e.g., MPAS-O/I native grid dimension-ordering */ (void)fprintf(stdout,"%s: WARNING %s reports unstructured grid spatial coordinate %s is (zero-based) dimension %d of input variable to be regridded %s which has %d dimensions. The NCO regridder does not support unstructured spatial dimensions that are not the last (i.e., most rapidly varying) dimension of an input variable, so results are likely garbage.\nHINT: Re-arrange input file dimensions to place horizontal dimension(s) last with, e.g., \'ncpdq -a time,lev,%s in.nc out.nc\' prior to calling the regridder. E3SM users: If this is an MPAS dataset with a new (unknown to ncremap) dimension, please ask Charlie to add the dimension to the ncremap dimension permutation list.\n",nco_prg_nm_get(),fnc_nm,dmn_nm,dmn_idx,var_nm,dmn_nbr_in,dmn_nm); trv_tbl->lst[idx_tbl].flg_mrv=False; } /* !dmn_idx */ } /* !flg_grd_in_1D */ if(flg_grd_in_2D && (!strcmp(dmn_nm,lat_nm_in) || !strcmp(dmn_nm,lon_nm_in))){ /* Are horizontal dimensions most-rapidly-varying? */ if(dmn_idx != dmn_nbr_in-1 && dmn_idx != dmn_nbr_in-2){ /* NB: Lat/lon input grid has lat/lon in non-MRV location (expect this with, e.g., AIRS L2 grid dimension-ordering */ (void)fprintf(stdout,"%s: WARNING %s reports lat-lon grid spatial coordinate %s is (zero-based) dimension %d of input variable to be regridded %s which has %d dimensions. The NCO regridder does not support rectangular lat-lon dimension(s) that are not the last two (i.e., most rapidly varying) dimensions of an input variable, so results are likely garbage.\nHINT: Re-arrange input file dimensions to place horizontal dimensions last with, e.g., \'ncpdq -a time,lev,lat,lon in.nc out.nc\' prior to calling the regridder.\n",nco_prg_nm_get(),fnc_nm,dmn_nm,dmn_idx,var_nm,dmn_nbr_in); trv_tbl->lst[idx_tbl].flg_mrv=False; } /* !dmn_idx */ } /* !flg_grd_in_2D */ if(flg_grd_out_1D){ if((nco_rgr_typ == nco_rgr_grd_2D_to_1D) && (!strcmp(dmn_nm,lat_nm_in) || !strcmp(dmn_nm,lon_nm_in))){ /* Replace orthogonal horizontal dimensions by unstructured horizontal dimension already defined */ if(!strcmp(dmn_nm,lat_nm_in)){ /* Replace lat with col */ dmn_id_out[dmn_idx]=dmn_id_col; dmn_cnt[dmn_idx]=col_nbr_out; } /* endif lat */ if(!strcmp(dmn_nm,lon_nm_in)){ /* Assume non-MRV dimensions are ordered lat/lon. Replace lat with col. Shift MRV dimensions to left after deleting lon. */ dmn_id_out[dmn_idx]=NC_MIN_INT; dmn_cnt[dmn_idx]=NC_MIN_INT; dmn_nbr_out--; /* Reduce output dimension position of all subsequent input dimensions by one */ if(!trv_tbl->lst[idx_tbl].flg_mrv) dmn_in_fst=-1; } /* endif lon */ }else{ /* Dimension col_nm_in has already been defined as col_nm_out, replicate all other dimensions */ if(!strcmp(dmn_nm,col_nm_in)) rcd=nco_inq_dimid_flg(out_id,col_nm_out,dmn_id_out+dmn_idx); else rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx+dmn_in_fst); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx+dmn_in_fst); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt[dmn_idx+dmn_in_fst]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx+dmn_in_fst],dmn_id_out+dmn_idx+dmn_in_fst); } /* !rcd */ } /* !lat && !lon */ } /* !flg_grd_out_1D */ if(flg_grd_out_2D){ if(nco_rgr_typ == nco_rgr_grd_1D_to_2D && !strcmp(dmn_nm,col_nm_in)){ /* Replace unstructured horizontal dimension by orthogonal horizontal dimensions already defined */ dmn_id_out[dmn_idx]=dmn_id_lat; dmn_id_out[dmn_idx+1]=dmn_id_lon; dmn_cnt[dmn_idx]=lat_nbr_out; dmn_cnt[dmn_idx+1]=lon_nbr_out; dmn_nbr_out++; /* Increase output dimension position of all subsequent input dimensions by one */ if(!trv_tbl->lst[idx_tbl].flg_mrv) dmn_in_fst=1; }else{ /* Dimensions lat/lon_nm_in have already been defined as lat/lon_nm_out, replicate all other dimensions */ if(!strcmp(dmn_nm,lat_nm_in)) rcd=nco_inq_dimid_flg(out_id,lat_dmn_nm_out,dmn_id_out+dmn_idx); else if(!strcmp(dmn_nm,lon_nm_in)) rcd=nco_inq_dimid_flg(out_id,lon_dmn_nm_out,dmn_id_out+dmn_idx); else rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx+dmn_in_fst); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx+dmn_in_fst); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt[dmn_idx+dmn_in_fst]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx+dmn_in_fst],dmn_id_out+dmn_idx+dmn_in_fst); } /* !rcd */ } /* !col */ } /* !1D_to_2D */ } /* !dmn_idx */ }else{ /* !flg_rgr */ /* Replicate non-regridded variables */ rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_id_in[dmn_idx],dmn_nm); rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_id_out+dmn_idx); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_id_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt[dmn_idx],dmn_id_out+dmn_idx); } /* !rcd */ } /* !dmn_idx */ } /* !flg_rgr */ rcd=nco_def_var(out_id,var_nm,var_typ_out,dmn_nbr_out,dmn_id_out,&var_id_out); /* Duplicate netCDF4 settings when possible */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC){ /* Deflation */ if(dmn_nbr_out > 0){ int dfl_lvl_in; /* [enm] Deflate level [0..9] */ rcd=nco_inq_var_deflate(in_id,var_id_in,&shuffle,&deflate,&dfl_lvl_in); /* Copy original deflation settings */ if(deflate || shuffle) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl_in); /* Overwrite HDF Lempel-Ziv compression level, if requested */ if(dfl_lvl == 0) deflate=(int)False; else deflate=(int)True; /* Turn-off shuffle when uncompressing otherwise chunking requests may fail */ if(dfl_lvl == 0) shuffle=NC_NOSHUFFLE; /* Shuffle never, to my knowledge, increases filesize, so shuffle by default when manually deflating */ if(dfl_lvl >= 0) shuffle=NC_SHUFFLE; if(dfl_lvl >= 0) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl); } /* !dmn_nbr_out */ } /* !NC_FORMAT_NETCDF4 */ (void)nco_att_cpy(in_id,out_id,var_id_in,var_id_out,PCK_ATT_CPY); if(trv.flg_rgr){ aed_mtd_crd.var_nm=var_nm; aed_mtd_crd.id=var_id_out; (void)nco_aed_prc(out_id,var_id_out,aed_mtd_crd); if(flg_cll_msr){ aed_mtd_cll_msr.var_nm=var_nm; aed_mtd_cll_msr.id=var_id_out; (void)nco_aed_prc(out_id,var_id_out,aed_mtd_cll_msr); } /* !flg_cll_msr */ /* 20210602: Ensure all regridded variables have _FillValue if user requested _FillValue in empty cells and there are empty cells */ if(flg_add_fll && flg_dst_mpt){ /* Check for _FillValue here iff user requests non-default behavior */ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,(double *)NULL); if(!has_mss_val){ val_unn mss_val_dfl; /* [] Default _FillValue */ mss_val_dfl=nco_mss_val_dfl_get(var_typ_out); rcd=nco_put_att(out_id,var_id_out,"_FillValue",var_typ_out,1L,(void *)(&mss_val_dfl)); } /* !has_mss_val */ } /* !flg_add_fll */ } /* !flg_rgr */ } /* !rcd */ } /* !var */ } /* !idx_tbl */ /* Free pre-allocated array space */ /* col_nm_in will not otherwise be free'd if it was guessed as usual suspect */ if(col_nm_in != rgr->col_nm_in) col_nm_in=(char *)nco_free(col_nm_in); if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt) dmn_cnt=(long *)nco_free(dmn_cnt); if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec); /* Define new metadata in regridded file */ if(flg_area_out){ rcd=nco_char_att_put(out_id,area_nm_out,"long_name","Solid angle subtended by gridcell"); rcd=nco_char_att_put(out_id,area_nm_out,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm_out,"units","steradian"); if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); att_val=(char *)nco_calloc((strlen(lat_dmn_nm_out)+strlen(lon_dmn_nm_out)+8L),sizeof(char)); (void)sprintf(att_val,"%s, %s: sum",lat_dmn_nm_out,lon_dmn_nm_out); rcd=nco_char_att_put(out_id,area_nm_out,"cell_mathods",att_val); if(att_val) att_val=(char *)nco_free(att_val); } /* !flg_area_out */ if(flg_frc_out_wrt){ rcd=nco_char_att_put(out_id,frc_nm_out,"long_name","Fraction of gridcell valid on destination grid"); if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); att_val=(char *)nco_calloc((strlen(lat_dmn_nm_out)+strlen(lon_dmn_nm_out)+8L),sizeof(char)); (void)sprintf(att_val,"%s, %s: sum",lat_dmn_nm_out,lon_dmn_nm_out); rcd=nco_char_att_put(out_id,frc_nm_out,"cell_mathods",att_val); } /* !flg_frc_out_wrt */ if(flg_msk_out){ rcd=nco_char_att_put(out_id,msk_nm_out,"long_name","Mask (0 = invalid destination, 1 = valid destination)"); if(flg_grd_out_1D || flg_grd_out_crv) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); } /* !flg_msk_out */ rcd=nco_char_att_put(out_id,lat_nm_out,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lat_nm_out,"standard_name","latitude"); rcd=nco_char_att_put(out_id,lat_nm_out,"units","degrees_north"); // 20200205: Attach "axis" attribute to single-dimensional geospatial coordinates not to two-dimensional coordinate variables per CF Conventions section 5.2 if(!flg_grd_out_crv) rcd=nco_char_att_put(out_id,lat_nm_out,"axis","Y"); double vld_min; vld_min=-90.0; att_nm=strdup("valid_min"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lat_nm_out; aed_mtd.id=lat_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_min; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lat_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); double vld_max; vld_max=90.0; att_nm=strdup("valid_max"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lat_nm_out; aed_mtd.id=lat_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_max; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lat_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,lat_nm_out,"bounds",lat_bnd_nm_out); if(flg_grd_out_rct) att_val=strdup("Gridcell latitude interfaces"); else att_val=strdup("Gridcell latitude vertices"); rcd=nco_char_att_put(out_id,lat_bnd_nm_out,"long_name",att_val); rcd=nco_char_att_put(out_id,lon_nm_out,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lon_nm_out,"standard_name","longitude"); rcd=nco_char_att_put(out_id,lon_nm_out,"units","degrees_east"); // 20200205: Attach "axis" attribute to single-dimensional geospatial coordinates not to two-dimensional coordinate variables per CF Conventions section 5.2 if(!flg_grd_out_crv) rcd=nco_char_att_put(out_id,lon_nm_out,"axis","X"); /* UGRID Conventions define "topology" and "modulo" attributes https://github.com/ugrid-conventions/ugrid-conventions My understanding is these should only be utilized for global grids */ if(nco_rgr_typ == nco_rgr_grd_2D_to_2D){ /* fxm: change this to check whether lon_spn >= 360 or nco_grd_xtn == global */ att_nm=strdup("modulo"); double modulo=360.0; aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&modulo; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,lon_nm_out,"topology","circular"); } /* !nco_rgr_grd_2D_to_2D */ if(lon_ctr_out[0] >= 0.0) vld_min=0.0; else vld_min=-180.0; att_nm=strdup("valid_min"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_min; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); if(lon_ctr_out[0] >= 0.0) vld_max=360.0; else vld_max=180.0; att_nm=strdup("valid_max"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=1; aed_mtd.type=NC_DOUBLE; aed_mtd.val.dp=&vld_max; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,lon_nm_out,"bounds",lon_bnd_nm_out); att_nm=strdup("bounds"); att_val=lon_bnd_nm_out; aed_mtd.att_nm=att_nm; aed_mtd.var_nm=lon_nm_out; aed_mtd.id=lon_out_id; aed_mtd.sz=strlen(att_val); aed_mtd.type=NC_CHAR; aed_mtd.val.cp=att_val; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,lon_out_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); if(flg_grd_out_rct) att_val=strdup("Gridcell longitude interfaces"); else att_val=strdup("Gridcell longitude vertices"); rcd=nco_char_att_put(out_id,lon_bnd_nm_out,"long_name",att_val); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ rcd=nco_char_att_put(out_id,slat_nm_out,"long_name","Latitude for staggered FV grid"); rcd=nco_char_att_put(out_id,slat_nm_out,"units","degrees_north"); rcd=nco_char_att_put(out_id,slat_wgt_nm_out,"long_name","Latitude weights for staggered FV grid"); rcd=nco_char_att_put(out_id,slon_nm_out,"long_name","Longitude for staggered FV grid"); rcd=nco_char_att_put(out_id,slon_nm_out,"units","degrees_east"); } /* !nco_grd_lat_fv */ if(flg_grd_out_rct) rcd=nco_char_att_put(out_id,lat_wgt_nm,"long_name","Latitude quadrature weights (normalized to sum to 2.0 on global grids)"); rcd=nco_char_att_put(out_id,NULL,"map_file",fl_in); rcd=nco_char_att_put(out_id,NULL,"input_file",rgr->fl_in); /* Annotate persistent metadata that should appear last in attribute list */ if(flg_grd_out_1D){ if(flg_area_out) rcd=nco_char_att_put(out_id,area_nm_out,att_nm_crd,att_val_crd); if(flg_frc_out_wrt) rcd=nco_char_att_put(out_id,frc_nm_out,att_nm_crd,att_val_crd); if(flg_msk_out) rcd=nco_char_att_put(out_id,msk_nm_out,att_nm_crd,att_val_crd); } /* !flg_grd_out_1D */ /* Persistent metadata */ if(att_nm_crd) att_nm_crd=(char *)nco_free(att_nm_crd); if(att_val_crd) att_val_crd=(char *)nco_free(att_val_crd); if(flg_cll_msr){ if(att_nm_cll_msr) att_nm_cll_msr=(char *)nco_free(att_nm_cll_msr); if(att_val_cll_msr) att_val_cll_msr=(char *)nco_free(att_val_cll_msr); } /* !flg_cll_msr */ if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ if(slat_nm_out) slat_nm_out=(char *)nco_free(slat_nm_out); if(slat_wgt_nm_out) slat_wgt_nm_out=(char *)nco_free(slat_wgt_nm_out); if(slon_nm_out) slon_nm_out=(char *)nco_free(slon_nm_out); } /* !nco_grd_lat_fv */ /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Begin data mode */ (void)nco_enddef(out_id); /* Write new coordinates and variables to regridded file */ if(flg_grd_out_1D){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out); dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=col_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_bnd_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=col_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_bnd_out,crd_typ_out); if(flg_area_out){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out); } /* !flg_area_out */ if(flg_msk_out){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=col_nbr_out; (void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,(nc_type)NC_INT); } /* !flg_msk_out */ } /* !flg_grd_out_1D */ if(flg_grd_out_crv){ dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=lon_nbr_out; (void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out); (void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out); if(flg_area_out){ (void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out); } /* !flg_area_out */ if(flg_frc_out_wrt){ (void)nco_put_vara(out_id,frc_out_id,dmn_srt_out,dmn_cnt_tuo,frc_out,crd_typ_out); } /* !flg_frc_out_wrt */ if(flg_msk_out){ (void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,(nc_type)NC_INT); } /* !flg_msk_out */ dmn_srt_out[0]=dmn_srt_out[1]=dmn_srt_out[2]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=lon_nbr_out; dmn_cnt_tuo[2]=bnd_nbr_out; /* NB: 20160803 Semantically confusing---curvilinear grids must write *_crn_out data into *_bnd_out arrays */ (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_crn_out,crd_typ_out); (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_crn_out,crd_typ_out); } /* !flg_grd_out_crv */ if(flg_grd_out_rct){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=lat_nbr_out; (void)nco_put_vara(out_id,lat_out_id,dmn_srt_out,dmn_cnt_tuo,lat_ctr_out,crd_typ_out); dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=lon_nbr_out; (void)nco_put_vara(out_id,lon_out_id,dmn_srt_out,dmn_cnt_tuo,lon_ctr_out,crd_typ_out); if(nco_grd_lat_typ == nco_grd_lat_fv && flg_stg){ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=slat_nbr_out; (void)nco_put_vara(out_id,slat_out_id,dmn_srt_out,dmn_cnt_tuo,slat_ctr_out,crd_typ_out); (void)nco_put_vara(out_id,slat_wgt_id,dmn_srt_out,dmn_cnt_tuo,slat_wgt_out,crd_typ_out); dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=slon_nbr_out; (void)nco_put_vara(out_id,slon_out_id,dmn_srt_out,dmn_cnt_tuo,slon_ctr_out,crd_typ_out); if(slat_ctr_out) slat_ctr_out=(double *)nco_free(slat_ctr_out); if(slat_wgt_out) slat_wgt_out=(double *)nco_free(slat_wgt_out); if(slon_ctr_out) slon_ctr_out=(double *)nco_free(slon_ctr_out); } /* !nco_grd_lat_fv */ dmn_srt_out[0]=0L; dmn_cnt_tuo[0]=lat_nbr_out; (void)nco_put_vara(out_id,lat_wgt_id,dmn_srt_out,dmn_cnt_tuo,lat_wgt_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt_out,dmn_cnt_tuo,lat_bnd_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lon_nbr_out; dmn_cnt_tuo[1]=bnd_nbr_out; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt_out,dmn_cnt_tuo,lon_bnd_out,crd_typ_out); dmn_srt_out[0]=dmn_srt_out[1]=0L; dmn_cnt_tuo[0]=lat_nbr_out; dmn_cnt_tuo[1]=lon_nbr_out; if(flg_area_out){ (void)nco_put_vara(out_id,area_out_id,dmn_srt_out,dmn_cnt_tuo,area_out,crd_typ_out); } /* !flg_area_out */ if(flg_frc_out_wrt){ (void)nco_put_vara(out_id,frc_out_id,dmn_srt_out,dmn_cnt_tuo,frc_out,crd_typ_out); } /* !flg_frc_out_wrt */ if(flg_msk_out){ (void)nco_put_vara(out_id,msk_out_id,dmn_srt_out,dmn_cnt_tuo,msk_out,(nc_type)NC_INT); } /* !flg_msk_out */ } /* !flg_grd_out_rct */ /* Regrid or copy variable values */ const double wgt_vld_thr=rgr->wgt_vld_thr; /* [frc] Weight threshold for valid destination value */ const nco_bool flg_rnr=rgr->flg_rnr; /* [flg] Renormalize destination values by valid area */ char *sgs_frc_nm=NULL; char *sgs_msk_nm=NULL; double *sgs_frc_in=NULL; double *sgs_frc_out=NULL; double *var_val_dbl_in=NULL; double *var_val_dbl_out=NULL; double *wgt_vld_out=NULL; double var_val_crr; int *tally=NULL; /* [nbr] Number of valid (non-missing) values */ int lvl_idx; /* [idx] Level index */ int lvl_nbr; /* [nbr] Number of levels */ int thr_idx; /* [idx] Thread index */ size_t idx_in; /* [idx] Input grid index */ size_t idx_out; /* [idx] Output grid index */ size_t var_sz_in; /* [nbr] Number of elements in variable (will be self-multiplied) */ size_t var_sz_out; /* [nbr] Number of elements in variable (will be self-multiplied) */ size_t val_in_fst; /* [nbr] Number of elements by which current N-D slab input values are offset from origin */ size_t val_out_fst; /* [nbr] Number of elements by which current N-D slab output values are offset from origin */ /* 20190322: Prior to entering OpenMP loop, collect specified SGS information */ const double sgs_nrm=rgr->sgs_nrm; /* [frc] Sub-gridscale normalization */ if(rgr->sgs_frc_nm){ /* Normalization test: fl_in=20181217.CNTL_CNPCTC1850_OIBGC.ne30_oECv3.edison.clm2.h0.2000-12.nc /bin/cp -f ${DATA}/hdf/${fl_in} ~/elm_raw.nc ncremap -P sgs -v FSDS,TBOT,GPP -a aave -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/cmip6_180x360_scrip.20181001.nc ~/elm_raw.nc ~/elm_sgs.nc # Original SGS method ncks -A -v grid_area ${DATA}/grids/ne30np4_pentagons.091226.nc ~/elm_sgs.nc ncremap -P gsg -v FSDS,TBOT,GPP -m ${DATA}/maps/map_ne30np4_to_cmip6_180x360_aave.20181001.nc ~/elm_raw.nc ~/elm_gsg.nc # New SGS method */ if(rgr->sgs_msk_nm) sgs_msk_nm=(char *)strdup(rgr->sgs_msk_nm); sgs_frc_nm=(char *)strdup(rgr->sgs_frc_nm); var_nm=sgs_frc_nm; var_typ_rgr=NC_DOUBLE; /* NB: Regrid in double precision */ var_typ_out=NC_DOUBLE; /* NB: sgs_frc_out must be double precision */ var_sz_in=1L; /* Compute from scratch to be sure it matches grd_sz_in */ var_sz_out=grd_sz_out; /* Assume this holds */ char *fl_sgs=NULL; /* [sng] External sub-gridscale file name */ int sgs_id; /* [id] netCDF file ID for external sub-gridscale file */ sgs_id=in_id; if((rcd=nco_inq_varid_flg(sgs_id,var_nm,&var_id_in)) != NC_NOERR){ /* If sgs_frc_nm is not in input file then search for it in external area file */ #ifdef WIN32 const char sls_chr='\\'; /* [chr] Slash character */ #else /* !WIN32 */ const char sls_chr='/'; /* [chr] Slash character */ #endif /* !WIN32 */ char *sls_ptr; /* [sng] Pointer to last slash character (' ') */ sls_ptr=strrchr(var_nm,sls_chr); if(!sls_ptr){ (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unable to find sgs_frc_nm = %s in current input file, and unable to identify filename (ending with slash '/' or backslash '\\', as appropriate) portion of that string to serve as local external file for sgs_frc input, exiting\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm); nco_exit(EXIT_FAILURE); } /* !sls_ptr */ sgs_frc_nm=(char *)strdup(sls_ptr+1L); /* Copy variable-name portion of string */ *sls_ptr='\0'; /* NULL-terminate filename */ fl_sgs=(char *)strdup(var_nm); var_nm=sgs_frc_nm; /* NB: too tricky? */ rcd=nco_open(fl_sgs,NC_NOWRITE,&sgs_id); if((rcd=nco_inq_varid_flg(sgs_id,var_nm,&var_id_in)) != NC_NOERR){ (void)fprintf(stderr,"%s: ERROR %s (aka \"the regridder\") reports unable to find sgs_frc_nm = \"%s\" in local external file %s, exiting\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm,fl_sgs); nco_exit(EXIT_FAILURE); } /* !rcd */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s obtaining sgs_frc = %s from file %s\n",nco_prg_nm_get(),fnc_nm,sgs_frc_nm,fl_sgs); } /* !rcd */ rcd=nco_inq_varndims(sgs_id,var_id_in,&dmn_nbr_in); dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out; dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */ dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); rcd=nco_inq_vardimid(sgs_id,var_id_in,dmn_id_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(sgs_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx); var_sz_in*=dmn_cnt_in[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ if(var_sz_in != grd_sz_in){ (void)fprintf(stdout,"%s: ERROR %s (aka \"the regridder\") requires that sgs_frc = %s be same size as spatial grid but var_sz_in = %lu != %lu = grd_sz_in\n",nco_prg_nm_get(),fnc_nm,var_nm,var_sz_in,grd_sz_in); nco_exit(EXIT_FAILURE); } /* !var_sz_in */ /* Missing value setup (NB: ELM landfrac has _FillValue and is _FillValue where masked */ has_mss_val=nco_mss_val_get_dbl(sgs_id,var_id_in,&mss_val_dbl); if(has_mss_val) mss_val_cmp_dbl=mss_val_dbl; else mss_val_cmp_dbl=NC_FILL_DOUBLE; sgs_frc_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() sgs_frc_in value buffer"); rcd=nco_get_vara(sgs_id,var_id_in,dmn_srt,dmn_cnt_in,sgs_frc_in,var_typ_rgr); /* If sgs_frc comes from external local file, close it now */ if(fl_sgs){ rcd=nco_close(sgs_id); fl_sgs=(char *)nco_free(fl_sgs); } /* !fl_sgs */ /* Initialize output */ sgs_frc_out=(double *)nco_malloc_dbg(grd_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() sgs_frc_out value buffer"); /* Initialize and regrid sgs_frc_out 20190907: sgs_frc_in (landfrac) is _FillValue (1.0e36) for ELM datasets in all masked gridcells, and is always positive definite (never zero) in all unmasked gridcells because it it a true area. ELM sgs_frc_out is always positive definite gridcell area everywhere, with no missing values and no zero values. 20190910: MPAS-Seaice datasets have no mask, and sgs_frc_in (timeMonthly_avg_iceAreaCell) is never (ncatted-appended) _FillValue (-9.99999979021477e+33) and is usually zero because it is time-mean area-fraction of sea ice which only exists in polar regions. MPAS-Seaice sgs_frc_out is zero in all gridcells without sea-ice. Regardless of input source, following blocks guarantee that sgs_frc_out is defined everywhere, is never a missing value (sgs_frc_out is zero where sgs_frc_in may have been _FillValue), and is always safe to multiply and normalize by sgs_frc_out in main regridding loop */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) sgs_frc_out[dst_idx]=0.0; for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) if((var_val_crr=sgs_frc_in[col_src_adr[lnk_idx]]) != mss_val_cmp_dbl) sgs_frc_out[row_dst_adr[lnk_idx]]+=var_val_crr*wgt_raw[lnk_idx]; /* Sanity check sgs_frc_out */ if(nco_dbg_lvl_get() >= nco_dbg_fl){ /* 20190326: sgs_frc expressed as a fraction must never exceed sgs_nrm CICE expresses sgs_frc (aice) in percent, i.e., sgs_nrm=100.0 Sum total value of sgs_frc (as opposed to gridcell_area) depends on grid resolution */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ /* 20190907: Approximate comparison because rounding causes frequent exceedances of sgs_nrm by epsilon ~ 1.0e-15 */ if((float)sgs_frc_out[dst_idx] > sgs_nrm) (void)fprintf(stdout,"%s: INFO %s reports sgs_frc_out[%lu] = %19.15f > %g = sgs_nrm\n",nco_prg_nm_get(),fnc_nm,dst_idx,sgs_frc_out[dst_idx],sgs_nrm); } /* !dst_idx */ } /* !dbg */ // for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ // (void)fprintf(stdout,"%s: INFO %s reports sgs_frc_out[%lu] = %19.15f\n",nco_prg_nm_get(),fnc_nm,dst_idx,sgs_frc_out[dst_idx]); // } /* !dst_idx */ if(dmn_id_in) dmn_id_in=(int *)nco_free(dmn_id_in); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); } /* !sgs_frc_nm */ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"Regridding progress: # means regridded, ~ means copied\n"); /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped as shared in parallel clause */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ /* OpenMP notes: default(none): GCC9.x does not accept this (https://github.com/nco/nco/issues/114) perhaps because of fp_stdout/stderr? Intel accepts it. firstprivate(): Pointers that could be inadvertently free()'d if they lost their NULL-initialization private(): Almost everything else shared(): uggh...shared clause depends on both compiler and compiler-version 1. Const variables (e.g., flg_rnr,fnc_nm,wgt_vld_thr) are default shared for gcc >= 4.9.2, 2. fnc_nm (only!) must be explicit shared for g++ 4.6.3 (travis) 3. flg_rnr,fnc_nm,wgt_vld_thr must be explicit shared for icc 13.1.3 (rhea) 4. assert() cannot be used in OpenMP blocks 5. Good discussion of "const" variables in shared() clause here http://jakascorner.com/blog/2016/07/omp-default-none-and-const.html 20200221: fxm Revisit default(none) in light of above article */ #ifdef __GNUG__ # define GCC_LIB_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ ) # if GCC_LIB_VERSION < 490 # define GXX_OLD_OPENMP_SHARED_TREATMENT 1 # endif /* 480 */ # if GCC_LIB_VERSION >= 900 # define GXX_WITH_OPENMP5_GPU_SUPPORT 1 # endif /* 900 */ #endif /* !__GNUC__ */ #if defined( __INTEL_COMPILER) # pragma omp parallel for default(none) firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_cmp_dbl,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_add_fll,flg_frc_nrm,flg_msk_apl,flg_msk_out,flg_rnr,fnc_nm,frc_out,lnk_nbr,msk_out,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw,wgt_vld_thr) #else /* !__INTEL_COMPILER */ # ifdef GXX_OLD_OPENMP_SHARED_TREATMENT # pragma omp parallel for default(none) firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_cmp_dbl,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_add_fll,flg_frc_nrm,flg_msk_apl,flg_msk_out,flg_rnr,fnc_nm,frc_out,lnk_nbr,msk_out,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw) # else /* !old g++ */ # if defined(GXX_WITH_OPENMP5_GPU_SUPPORT) && 0 # pragma omp target teams distribute parallel for firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_cmp_dbl,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_add_fll,flg_frc_nrm,flg_msk_apl,flg_msk_out,flg_rnr,frc_out,lnk_nbr,msk_out,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw) # else # pragma omp parallel for firstprivate(dmn_cnt_in,dmn_cnt_out,dmn_srt,dmn_id_in,dmn_id_out,tally,var_val_dbl_in,var_val_dbl_out,wgt_vld_out) private(dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dst_idx,has_mss_val,idx,idx_in,idx_out,idx_tbl,in_id,lnk_idx,lvl_idx,lvl_nbr,mss_val_cmp_dbl,mss_val_dbl,rcd,thr_idx,trv,val_in_fst,val_out_fst,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ_out,var_typ_rgr,var_val_crr) shared(col_src_adr,dmn_nbr_hrz_crd,flg_add_fll,flg_frc_nrm,flg_msk_apl,flg_msk_out,frc_out,lnk_nbr,msk_out,out_id,row_dst_adr,sgs_frc_nm,sgs_frc_in,sgs_frc_out,sgs_msk_nm,wgt_raw) # endif /* !GCC >= 9.0 */ # endif /* !GCC < 4.9 */ #endif /* !__INTEL_COMPILER */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; thr_idx=omp_get_thread_num(); in_id=trv_tbl->in_id_arr[thr_idx]; #ifdef _OPENMP if(nco_dbg_lvl_get() >= nco_dbg_grp && !thr_idx && !idx_tbl) (void)fprintf(fp_stdout,"%s: INFO %s reports regrid loop uses %d thread%s\n",nco_prg_nm_get(),fnc_nm,omp_get_num_threads(),(omp_get_num_threads() > 1) ? "s" : ""); if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s: INFO thread = %d, idx_tbl = %d, nm = %s\n",nco_prg_nm_get(),thr_idx,idx_tbl,trv.nm); #endif /* !_OPENMP */ if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s%s ",trv.flg_rgr ? "#" : "~",trv.nm); if(trv.flg_rgr){ /* Regrid variable */ var_nm=trv.nm; var_typ_rgr=NC_DOUBLE; /* NB: Perform regridding in double precision */ var_typ_out=trv.var_typ; /* NB: Output type in file is same as input type */ var_sz_in=1L; var_sz_out=1L; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid(out_id,var_nm,&var_id_out); rcd=nco_inq_varndims(in_id,var_id_in,&dmn_nbr_in); rcd=nco_inq_varndims(out_id,var_id_out,&dmn_nbr_out); dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out; dmn_id_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_id_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */ dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); rcd=nco_inq_vardimid(in_id,var_id_in,dmn_id_in); rcd=nco_inq_vardimid(out_id,var_id_out,dmn_id_out); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(in_id,dmn_id_in[dmn_idx],dmn_cnt_in+dmn_idx); var_sz_in*=dmn_cnt_in[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ rcd=nco_inq_dimlen(out_id,dmn_id_out[dmn_idx],dmn_cnt_out+dmn_idx); if(dmn_cnt_out[dmn_idx] == 0L){ /* No records have been written, so overwrite zero output record size with input record size */ char dmn_rec_nm[NC_MAX_NAME]; /* [sng] Record dimension name */ int dmn_rec_id_in; rcd=nco_inq_dimname(out_id,dmn_id_out[dmn_idx],dmn_rec_nm); rcd=nco_inq_dimid(in_id,dmn_rec_nm,&dmn_rec_id_in); rcd=nco_inq_dimlen(in_id,dmn_rec_id_in,dmn_cnt_out+dmn_idx); } /* !dmn_cnt_out */ var_sz_out*=dmn_cnt_out[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ /* Compute number and size of non-lat/lon or non-col dimensions (e.g., level, time, species, wavelength) Denote their convolution by level or 'lvl' for shorthand There are lvl_nbr elements for each lat/lon or col position 20151011: Until today assume lat/lon and col are most-rapidly varying dimensions 20151011: Until today lvl_nbr missed last non-spatial dimension for 1D output */ lvl_nbr=1; /* Simple prescription of lvl_nbr works when horizontal dimension(s) is/are MRV */ for(dmn_idx=0;dmn_idx<dmn_nbr_out-dmn_nbr_hrz_crd;dmn_idx++) lvl_nbr*=dmn_cnt_out[dmn_idx]; /* Determining whether an individual field _uses_ missing values is important because memory requirements of next four malloc's (i.e., exclusive of wgt_raw) can sum to ~7*sizeof(uncompressed var) for NC_FLOAT and ~3.5*sizeof(uncompressed var) for NC_DOUBLE. Traditionally has_mss_val answers "does this variable _have_ and explicit missing value?" As of 20210909, we expand the meaning of has_mss_val, though only in nco_rgr_wgt() Now has_mss_val means does the variable use the explicitly defined missing value, or, failing that, does it use the implicitly defined missing value? Only variables that _use_ a missing value need tally and wgt_vld_out arrays mss_val_dbl is what nco_mss_val_get_dbl() returns---its meaning has not changed However, it is no longer intended to be used Instead we create mss_val_cmp_dbl, a more general value for comparison and assignment */ var_val_dbl_in=(double *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() input value buffer"); var_val_dbl_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output value buffer"); /* Obtain input variable */ rcd=nco_get_vara(in_id,var_id_in,dmn_srt,dmn_cnt_in,var_val_dbl_in,var_typ_rgr); /* 20210909: Begin new missing value treatment */ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl); /* NB: mss_val_cmp_dbl must be defined since it is now always used by regridder (even when has_mss_val is False) For instance flg_msk_apl block, below, uses mss_val_cmp_dbl for masked fields And test for _usage_ of missing values, below, necessarily compares to mss_val_cmp_dbl If missing value is not explicitly declared, use default missing value */ if(has_mss_val) mss_val_cmp_dbl=mss_val_dbl; else mss_val_cmp_dbl=NC_FILL_DOUBLE; /* Override float/double value with appropriate default missing value for integers */ if(!has_mss_val){ switch(var_typ_out){ case NC_BYTE: mss_val_cmp_dbl=NC_FILL_BYTE; break; case NC_CHAR: mss_val_cmp_dbl=NC_FILL_CHAR; break; case NC_SHORT: mss_val_cmp_dbl=NC_FILL_SHORT; break; case NC_INT: mss_val_cmp_dbl=NC_FILL_INT; break; case NC_FLOAT: mss_val_cmp_dbl=NC_FILL_FLOAT; break; case NC_DOUBLE: mss_val_cmp_dbl=NC_FILL_DOUBLE; break; case NC_UBYTE: mss_val_cmp_dbl=NC_FILL_UBYTE; break; case NC_USHORT: mss_val_cmp_dbl=NC_FILL_USHORT; break; case NC_UINT: mss_val_cmp_dbl=NC_FILL_UINT; break; /* 20210909: Implicit type conversion generates warnings: 'long long' to 'double' changes value from -9223372036854775806 to -9223372036854775808 'unsigned long long' to 'double' changes value from 18446744073709551614 to 18446744073709551616 Warnings can be fixed with -Wimplicit-const-int-float-conversion */ case NC_INT64: mss_val_cmp_dbl=NC_FILL_INT64; break; case NC_UINT64: mss_val_cmp_dbl=NC_FILL_UINT64; break; case NC_STRING: default: nco_dfl_case_nc_type_err(); break; } /* !var_typ_in */ } /* !has_mss_val */ /* Re-initialize Boolean to True and override with False if variable _uses_ missing values */ has_mss_val=True; for(idx_in=0;idx_in<var_sz_in;idx_in++){ if(var_val_dbl_in[idx_in] == mss_val_cmp_dbl) break; } /* !idx_in */ /* If neither implicit nor explicit missing value is present, treat all values as valid */ if(idx_in == var_sz_in) has_mss_val=False; /* 20210909: End new missing value treatment */ /* Memory allocation that depends on _FillValue and input variable contents */ if(has_mss_val) tally=(int *)nco_malloc_dbg(var_sz_out*nco_typ_lng(NC_INT),fnc_nm,"Unable to malloc() tally buffer"); if(has_mss_val && flg_rnr) wgt_vld_out=(double *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ_rgr),fnc_nm,"Unable to malloc() output renormalization weight buffer"); /* Initialize output */ (void)memset(var_val_dbl_out,0,var_sz_out*nco_typ_lng(var_typ_rgr)); if(has_mss_val) (void)memset(tally,0,var_sz_out*nco_typ_lng(NC_INT)); if(wgt_vld_out) (void)memset(wgt_vld_out,0,var_sz_out*nco_typ_lng(var_typ_rgr)); /* 20150914: Intensive variables require normalization, extensive do not Intensive variables (temperature, wind speed, mixing ratio) do not depend on gridcell boundaries Extensive variables (population, counts, numbers of things) depend on gridcell boundaries Extensive variables are the exception in models, yet are commonly used for sampling information, e.g., number of photons, number of overpasses Pass extensive variable list to NCO with, e.g., --xtn=TSurfStd_ct,... 20190420: Remove languishing, unfinished intensive variable code */ clock_t tm_srt; /* [us] Microseconds at start */ clock_t tm_end; /* [us] Microseconds at end */ float tm_drn; /* [s] Seconds elapsed */ if(nco_dbg_lvl_get() >= nco_dbg_var) tm_srt=clock(); /* This first block is for "normal" variables without sub-gridscale fractions */ if(!sgs_frc_out){ /* Apply weights */ if(!has_mss_val){ if(lvl_nbr == 1){ /* Weight single-level fields without missing values */ #ifdef ENABLE_GPU # pragma omp target data map(to:col_src_adr[0:lnk_nbr],row_dst_adr[0:lnk_nbr],var_val_dbl_in[0:var_sz_in],wgt_raw[0:lnk_nbr]) map(tofrom:var_val_dbl_out[0:var_sz_out]) # pragma omp target teams distribute parallel for simd schedule(static,1) #else /* !ENABLE_GPU */ # if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 ) # pragma omp simd # endif /* !__GNUC__ */ #endif /* !ENABLE_GPU */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) var_val_dbl_out[row_dst_adr[lnk_idx]]+=var_val_dbl_in[col_src_adr[lnk_idx]]*wgt_raw[lnk_idx]; }else{ val_in_fst=0L; val_out_fst=0L; /* Weight multi-level fields without missing values */ #ifdef ENABLE_GPU # pragma omp target data map(to:col_src_adr[0:lnk_nbr],row_dst_adr[0:lnk_nbr],var_val_dbl_in[0:var_sz_in],wgt_raw[0:lnk_nbr]) map(tofrom:var_val_dbl_out[0:var_sz_out]) # pragma omp parallel for reduction(+:val_in_fst,val_out_fst) #endif /* !ENABLE_GPU */ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ //if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(fp_stdout,"%s lvl_idx = %d val_in_fst = %li, val_out_fst = %li\n",trv.nm,lvl_idx,val_in_fst,val_out_fst); #ifdef ENABLE_GPU # pragma omp target teams distribute parallel for simd schedule(static,1) #else /* !ENABLE_GPU */ # if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 ) # pragma omp simd # endif /* !__GNUC__ */ #endif /* !ENABLE_GPU */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) var_val_dbl_out[row_dst_adr[lnk_idx]+val_out_fst]+=var_val_dbl_in[col_src_adr[lnk_idx]+val_in_fst]*wgt_raw[lnk_idx]; val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ }else{ /* has_mss_val */ if(lvl_nbr == 1){ /* Weight single-level fields with missing values */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]; idx_out=row_dst_adr[lnk_idx]; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_cmp_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]; if(wgt_vld_out) wgt_vld_out[idx_out]+=wgt_raw[lnk_idx]; tally[idx_out]++; } /* !mss_val_cmp_dbl */ } /* !lnk_idx */ }else{ /* lvl_nbr > 1 */ val_in_fst=0L; val_out_fst=0L; /* Weight multi-level fields with missing values */ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]+val_in_fst; idx_out=row_dst_adr[lnk_idx]+val_out_fst; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_cmp_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]; if(wgt_vld_out) wgt_vld_out[idx_out]+=wgt_raw[lnk_idx]; tally[idx_out]++; } /* !mss_val_cmp_dbl */ } /* !lnk_idx */ val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ } /* !has_mss_val */ if(!has_mss_val){ /* frc_dst = frc_out = dst_frac = frac_b contains non-unity elements and normalization type is "destarea" or "dstarea" or "none" When this occurs for conservative remapping, follow "destarea" normalization procedure See SCRIP manual p. 11 and http://www.earthsystemmodeling.org/esmf_releases/public/last, specifically http://www.earthsystemmodeling.org/esmf_releases/public/last/ESMF_refdoc/node3.html#SECTION03029000000000000000 "frac_a: When a conservative regridding method is used, this contains the fraction of each source cell that participated in the regridding. When a non-conservative regridding method is used, this array is set to 0.0. frac_b: When a conservative regridding method is used, this contains the fraction of each destination cell that participated in the regridding. When a non-conservative regridding method is used, this array is set to 1.0 where the point participated in the regridding (i.e. was within the unmasked source grid), and 0.0 otherwise. If the first-order conservative interpolation method is specified ("-m conserve") then the destination field may need to be adjusted by the destination fraction (frac_b). This should be done if the normalization type is ``dstarea'' (sic, really "destarea") and if the destination grid extends outside the unmasked source grid. If it isn't known if the destination extends outside the source, then it doesn't hurt to apply the destination fraction. (If it doesn't extend outside, then the fraction will be 1.0 everywhere anyway.) The following code shows how to adjust an already interpolated destination field (dst_field) by the destination fraction. The variables n_b, and frac_b are from the weight file: ! Adjust destination field by fraction do i=1, n_b if (frac_b(i) .ne. 0.0) then dst_field(i)=dst_field(i)/frac_b(i) endif enddo" NB: Non-conservative interpolation methods (e.g., bilinear) should NOT apply this normalization (theoretically there is no danger in doing so because frc_out == 1 always for all gridcells that participate in bilinear remapping and frc_out == 0 otherwise) NCO's renormalization procedure below is similar to the ESMF-recommended procedure above. However, users can control NCO renormalization with, e.g., --rnr_thr=0.1, or override it completely with --rnr_thr=none. Moreover, frac_b == frc_dst is determined solely by solely by gridcell binary mask overlaps during weight generation. It is time-invariant and 2D. Missing values (e.g., AOD) can vary in time and can be 3D (or N-D) and so can wgt_vld_out. Hence NCO renormalization is more flexible. flg_frc_nrm (i.e., ESMF-recommended) normalization makes fields pretty for graphics, yet is non-conservative because e.g., MPAS Ocean gridcells projected onto global uniform grids would have their SSTs normalized for prettiness on coastal gridpoints, which is inherently non-conservative. 20190912: Make "ESMF renormalization" of fields without missing values (i.e., "destarea") opt-in rather than default "destarea" and frac_b = frc_dst together set flg_frc_nrm Formerly flg_frc_nrm triggered ESMF renormalization by default Now flg_frc_nrm and user-explicitly-set --rnr_thr to [0.0,1.0] must both be true to trigger it This keep conservative maps conservative by default NB: This "ESMF renormalization" normalizes by frac_b == frc_dst (not by wgt_vld_out) regardless of rnr_thr 20151018: Avoid double-normalizing by only executing fractional normalization (flg_frc_nrm) block when !has_mss_val, and valid area normalization when has_mss_val */ if(flg_frc_nrm){ /* Only renormalize when frac_b < 1.0 (because frac_b == 1.0 does nothing) */ if(flg_rnr){ /* 20190912: Only renormalize when user explicitly requests it (because renormalization is non-conservative). Prior to today, renormalization was by default, henceforth it is opt-in. */ if(lvl_nbr == 1){ /* Fractionally renormalize single-level fields without missing values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=frc_out[dst_idx]; }else{ /* Fractionally renormalize multi-level fields without missing values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ if(frc_out[dst_idx] != 0.0){ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ var_val_dbl_out[dst_idx+lvl_idx*grd_sz_out]/=frc_out[dst_idx]; } /* !lvl_idx */ } /* !frc_out */ } /* !dst_idx */ } /* lvl_nbr > 1 */ } /* !flg_rnr */ } /* !flg_frc_nrm */ } /* !has_mss_val */ if(has_mss_val){ /* NCL and ESMF treatment of weights and missing values described at https://www.ncl.ucar.edu/Applications/ESMF.shtml#WeightsAndMasking http://earthsystemmodeling.org/esmf_releases/non_public/ESMF_6_1_1/ESMF_refdoc/node5.html#SECTION05012600000000000000 NCO implements one of two procedures: "conservative" or "renormalized" The "conservative" algorithm uses all valid data from the input grid on the output grid Destination cells receive the weighted valid values of the source cells This is conservative because the global integrals of the source and destination fields are equal The "renormalized" algorithm divides the destination value by the sum of the valid weights This returns "reasonable" values, i.e., the mean of the valid input values However, renormalization is equivalent to extrapolating valid data to missing regions Hence the input and output integrals are unequal and the regridding is not conservative */ /* In fields with missing values, destination cells with no accumulated weight are missing value */ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(!tally[dst_idx]) var_val_dbl_out[dst_idx]=mss_val_cmp_dbl; if(flg_rnr){ // if(nco_dbg_lvl_get() >= nco_dbg_quiet) (void)fprintf(fp_stdout,"%s: DEBUG renormalization for %s uses flg_rnr block\n",nco_prg_nm_get(),var_nm); if(wgt_vld_thr == 0.0){ /* Renormalize cells with no threshold by valid accumulated weight */ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(tally[dst_idx]) var_val_dbl_out[dst_idx]/=wgt_vld_out[dst_idx]; }else{ /* Renormalize cells with threshold by valid accumulated weight if weight exceeds threshold */ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(wgt_vld_out[dst_idx] >= wgt_vld_thr){var_val_dbl_out[dst_idx]/=wgt_vld_out[dst_idx];}else{var_val_dbl_out[dst_idx]=mss_val_cmp_dbl;} } /* !wgt_vld_thr */ } /* !flg_rnr */ } /* !has_mss_val */ } /* !sgs_frc_out */ /* Variables with sub-gridscale fractions require "double-weighting" and normalization */ if(sgs_frc_out){ if(!strcmp(var_nm,sgs_frc_nm)){ /* Copy shared variable sgs_frc_out that was regridded before OpenMP loop 20190911: Reasons to copy sgs_frc_out into sgs_frc_nm data include speed, consistency, and well-definedness of sgs_frc_out. One reason to regrid sgs_frc_nm here is consistency with original, raw dataset: ELM landfrac is masked so regridding it here (rather than using sgs_frc_out) would produce a regridded dataset more identical to raw ELM output. The same can be said for CICE (I think). MPAS cellMask and timeMonthly_avg_iceAreaCell are not masked, and so should produce the same values as sgs_frc_out if regridded here. */ memcpy(var_val_dbl_out,sgs_frc_out,grd_sz_out*nco_typ_lng(var_typ_rgr)); }else if(sgs_msk_nm && !strcmp(var_nm,sgs_msk_nm)){ /* Compute binary mask directly from shared sgs_frc_out (guaranteed to be all valid values) */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]=1.0; }else{ /* !sgs_msk_nm */ /* "Double-weight" all other sub-gridscale input values by sgs_frc_in and overlap weight, normalize by sgs_frc_out */ if(!has_mss_val){ if(lvl_nbr == 1){ /* SGS-regrid single-level fields without missing values */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++) var_val_dbl_out[row_dst_adr[lnk_idx]]+=var_val_dbl_in[col_src_adr[lnk_idx]]*wgt_raw[lnk_idx]*sgs_frc_in[col_src_adr[lnk_idx]]; /* NB: MPAS-Seaice dataset sgs_frc_out is usually zero in non-polar regions */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=sgs_frc_out[dst_idx]; }else{ /* lvl_nbr > 1 */ /* SGS-regrid multi-level fields without missing values */ val_in_fst=0L; val_out_fst=0L; for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]; idx_out=row_dst_adr[lnk_idx]; var_val_dbl_out[idx_out+val_out_fst]+=var_val_dbl_in[idx_in+val_in_fst]*wgt_raw[lnk_idx]*sgs_frc_in[idx_in]; } /* !lnk_idx */ /* Normalize current level values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx+val_out_fst]/=sgs_frc_out[dst_idx]; val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ }else{ /* !has_mss_val */ if(lvl_nbr == 1){ /* SGS-regrid single-level fields with missing values */ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]; idx_out=row_dst_adr[lnk_idx]; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_cmp_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]*sgs_frc_in[idx_in]; tally[idx_out]++; } /* !mss_val_cmp_dbl */ } /* !lnk_idx */ /* NB: Normalization clause is complex to support sgs_frc_out from both ELM and MPAS-Seaice */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++) if(!tally[dst_idx]){var_val_dbl_out[dst_idx]=mss_val_cmp_dbl;}else{if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[dst_idx]/=sgs_frc_out[dst_idx];} }else{ /* lvl_nbr > 1 */ /* SGS-regrid multi-level fields with missing values */ val_in_fst=0L; val_out_fst=0L; for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ for(lnk_idx=0;lnk_idx<lnk_nbr;lnk_idx++){ idx_in=col_src_adr[lnk_idx]+val_in_fst; idx_out=row_dst_adr[lnk_idx]+val_out_fst; if((var_val_crr=var_val_dbl_in[idx_in]) != mss_val_cmp_dbl){ var_val_dbl_out[idx_out]+=var_val_crr*wgt_raw[lnk_idx]*sgs_frc_in[col_src_adr[lnk_idx]]; tally[idx_out]++; } /* !mss_val_cmp_dbl */ } /* !lnk_idx */ /* Normalize current level values */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ idx_out=dst_idx+val_out_fst; if(!tally[idx_out]){var_val_dbl_out[idx_out]=mss_val_cmp_dbl;}else{if(sgs_frc_out[dst_idx] != 0.0) var_val_dbl_out[idx_out]/=sgs_frc_out[dst_idx];} } /* dst_idx */ val_in_fst+=grd_sz_in; val_out_fst+=grd_sz_out; } /* !lvl_idx */ } /* lvl_nbr > 1 */ } /* !has_mss_val */ } /* !sgs_msk_nm */ } /* !sgs_frc_out */ if(nco_typ_ntg(var_typ_out)){ /* 20210407: Round, with rint(), integer fields before sending to netCDF for output Otherwise implicit type conversion will truncate (rather than round) output values This is critical for masks where rounding errors produce near integer values (e.g., 0.999...) that could then be truncated to zero by implicit conversion instead of rounded up to 1. */ if(has_mss_val){ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) if(var_val_dbl_out[dst_idx] != mss_val_cmp_dbl) var_val_dbl_out[dst_idx]=rint(var_val_dbl_out[dst_idx]); }else{ for(dst_idx=0;dst_idx<var_sz_out;dst_idx++) var_val_dbl_out[dst_idx]=rint(var_val_dbl_out[dst_idx]); } /* !has_mss_val */ } /* !nco_typ_ntg() */ if(flg_add_fll && !has_mss_val){ /* 20210604: Initialize fields without _FillValue in input file to default missing value in unmapped destination cells Otherwise empty destination cells will be zero (not _FillValue) in output file Fields with input _FillValue are already _FillValue in output where tally is zero */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ if(frc_out[dst_idx] == 0.0){ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ var_val_dbl_out[dst_idx+lvl_idx*grd_sz_out]=NC_FILL_DOUBLE; } /* !lvl_idx */ } /* !frc_out */ } /* !dst_idx */ } /* !flg_add_fll */ if(flg_msk_apl){ /* 20210607: Overwrite output values with _FillValue where destination cell is masked Same procedure regardless of whether input variables already have _FillValue NB: This is separate, and presumably independent, from above flg_add_fll loop Fields with flg_msk_apl will (harmlessly?) go through both loops */ double mss_val_msk; /* [frc] Missing value to apply where mask is false */ //if(has_mss_val) mss_val_msk=mss_val_dbl; else mss_val_msk=NC_FILL_DOUBLE; mss_val_msk=mss_val_cmp_dbl; /* [frc] Missing value to apply where mask is false */ for(dst_idx=0;dst_idx<grd_sz_out;dst_idx++){ if(msk_out[dst_idx] == 0){ for(lvl_idx=0;lvl_idx<lvl_nbr;lvl_idx++){ var_val_dbl_out[dst_idx+lvl_idx*grd_sz_out]=mss_val_msk; } /* !lvl_idx */ } /* !frc_out */ } /* !dst_idx */ } /* !flg_add_fll */ if(nco_dbg_lvl_get() >= nco_dbg_var){ tm_end=clock(); tm_drn=(float)(tm_end-tm_srt)/CLOCKS_PER_SEC; (void)fprintf(fp_stdout,"%s: INFO Compute time for %s (thread %d/%d): %g s\n",nco_prg_nm_get(),trv.nm,thr_idx,omp_get_num_threads(),tm_drn); } /* !dbg */ #pragma omp critical { /* begin OpenMP critical */ // rcd=nco_put_var(out_id,var_id_out,var_val_dbl_out,var_typ_rgr); rcd=nco_put_vara(out_id,var_id_out,dmn_srt,dmn_cnt_out,var_val_dbl_out,var_typ_rgr); } /* end OpenMP critical */ if(dmn_id_in) dmn_id_out=(int *)nco_free(dmn_id_in); if(dmn_id_out) dmn_id_out=(int *)nco_free(dmn_id_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); if(tally) tally=(int *)nco_free(tally); if(var_val_dbl_out) var_val_dbl_out=(double *)nco_free(var_val_dbl_out); if(var_val_dbl_in) var_val_dbl_in=(double *)nco_free(var_val_dbl_in); if(wgt_vld_out) wgt_vld_out=(double *)nco_free(wgt_vld_out); }else{ /* !trv.flg_rgr */ /* Use standard NCO copy routine for variables that are not regridded */ #pragma omp critical { /* begin OpenMP critical */ (void)nco_cpy_var_val(in_id,out_id,(FILE *)NULL,(md5_sct *)NULL,trv.nm,trv_tbl); } /* end OpenMP critical */ } /* !flg_rgr */ } /* !xtr */ } /* end (OpenMP parallel for) loop over idx_tbl */ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"\n"); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s completion report: Variables regridded = %d (%d extensive), copied unmodified = %d, omitted = %d, created = %d\n",nco_prg_nm_get(),fnc_nm,var_rgr_nbr,var_xtn_nbr,var_cpy_nbr,var_xcl_nbr,var_crt_nbr); /* Free memory allocated for grid reading/writing */ if(area_out) area_out=(double *)nco_free(area_out); if(col_src_adr) col_src_adr=(int *)nco_free(col_src_adr); if(dmn_sz_in_int) dmn_sz_in_int=(int *)nco_free(dmn_sz_in_int); if(dmn_sz_out_int) dmn_sz_out_int=(int *)nco_free(dmn_sz_out_int); if(frc_out) frc_out=(double *)nco_free(frc_out); if(lat_bnd_out) lat_bnd_out=(double *)nco_free(lat_bnd_out); if(lat_crn_out) lat_crn_out=(double *)nco_free(lat_crn_out); if(lat_ctr_out) lat_ctr_out=(double *)nco_free(lat_ctr_out); if(lat_ntf_out) lat_ntf_out=(double *)nco_free(lat_ntf_out); if(lat_wgt_out) lat_wgt_out=(double *)nco_free(lat_wgt_out); if(lon_bnd_out) lon_bnd_out=(double *)nco_free(lon_bnd_out); if(lon_crn_out) lon_crn_out=(double *)nco_free(lon_crn_out); if(lon_ctr_out) lon_ctr_out=(double *)nco_free(lon_ctr_out); if(lon_ntf_out) lon_ntf_out=(double *)nco_free(lon_ntf_out); if(msk_out) msk_out=(int *)nco_free(msk_out); if(row_dst_adr) row_dst_adr=(int *)nco_free(row_dst_adr); if(sgs_frc_nm) sgs_frc_nm=(char *)nco_free(sgs_frc_nm); if(sgs_frc_in) sgs_frc_in=(double *)nco_free(sgs_frc_in); if(sgs_frc_out) sgs_frc_out=(double *)nco_free(sgs_frc_out); if(sgs_msk_nm) sgs_msk_nm=(char *)nco_free(sgs_msk_nm); if(wgt_raw) wgt_raw=(double *)nco_free(wgt_raw); return rcd; } /* end nco_rgr_wgt() */ void nco_bsl_zro /* Return Bessel function zeros */ (const int bsl_zro_nbr, /* O [nbr] Order of Bessel function */ double * const bsl_zro) /* O [frc] Bessel zero */ { /* Purpose: Return Bessel function zeros Source: CCM code /fs/cgd/csm/models/atm/ccm3.5.8/src/ccmlsm_share/bsslzr.F Return bsl_zro_nbr zeros (or if bsl_zro_nbr > 50, approximate zeros), of the Bessel function j0 First 50 zeros are given exactly, and remaining zeros are computed by extrapolation, and therefore are not exact Original version: CCM1 Standardized: J. Rosinski, June 1992 Reviewed: J. Hack, D. Williamson, August 1992 Reviewed: J. Hack, D. Williamson, April 1996 Modified 19970123 by Jim Rosinski to use double precision arithmetic ~2000: Converted to Fortran9X by C. Zender, changed all real*16 statements to double precision (real*8) 20150530: Converted to C99 by C. Zender */ const char fnc_nm[]="nco_bsl_zro()"; /* [sng] Function name */ const double pi=M_PI; // [frc] 3 const double bsl_zro_tbl[]={ // Zeros of Bessel functions of order 1 to 50 -1.e36, 2.4048255577, 5.5200781103, 8.6537279129, 11.7915344391, 14.9309177086, 18.0710639679, 21.2116366299, 24.3524715308, 27.4934791320, 30.6346064684, 33.7758202136, 36.9170983537, 40.0584257646, 43.1997917132, 46.3411883717, 49.4826098974, 52.6240518411, 55.7655107550, 58.9069839261, 62.0484691902, 65.1899648002, 68.3314693299, 71.4729816036, 74.6145006437, 77.7560256304, 80.8975558711, 84.0390907769, 87.1806298436, 90.3221726372, 93.4637187819, 96.6052679510, 99.7468198587, 102.8883742542, 106.0299309165, 109.1714896498, 112.3130502805, 115.4546126537, 118.5961766309, 121.7377420880, 124.8793089132, 128.0208770059, 131.1624462752, 134.3040166383, 137.4455880203, 140.5871603528, 143.7287335737, 146.8703076258, 150.0118824570, 153.1534580192, 156.2950342685}; const int bsl_zro_tbl_nbr_max=50; /* [nbr] */ int bsl_idx; /* [idx] Counting index */ /* Main Code */ if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stdout,"%s: DEBUG Entering %s\n",nco_prg_nm_get(),fnc_nm); /* NB: Initialize bsl_zro[0] but (in C) never use it Initialization prevents uninitialized memory warnings */ for(bsl_idx=0;bsl_idx<=bsl_zro_nbr;bsl_idx++) if(bsl_idx <= bsl_zro_tbl_nbr_max) bsl_zro[bsl_idx]=bsl_zro_tbl[bsl_idx]; if(bsl_zro_nbr > bsl_zro_tbl_nbr_max) for(bsl_idx=bsl_zro_tbl_nbr_max+1;bsl_idx<=bsl_zro_nbr;bsl_idx++) bsl_zro[bsl_idx]=bsl_zro[bsl_idx-1]+pi; if(nco_dbg_lvl_get() == nco_dbg_old){ (void)fprintf(stdout,"%s: DEBUG %s reports bsl_zro_nbr = %d\n",nco_prg_nm_get(),fnc_nm,bsl_zro_nbr); (void)fprintf(stdout,"idx\tbsl_zro\n"); for(bsl_idx=1;bsl_idx<=bsl_zro_nbr;bsl_idx++) (void)fprintf(stdout,"%d\t%g\n",bsl_idx,bsl_zro[bsl_idx]); } /* endif dbg */ return; } /* end nco_bsl_zro() */ void nco_lat_wgt_gss /* [fnc] Compute and return sine of Gaussian latitudes and their weights */ (const int lat_nbr, /* I [nbr] Latitude number */ const nco_bool flg_s2n, /* I [enm] Latitude grid-direction is South-to-North */ double * const lat_sin, /* O [frc] Sine of latitudes */ double * const wgt_Gss) /* O [frc] Gaussian weights */ { /* Purpose: Compute and return sine of Gaussian latitudes and their weights Returned arrays are ordered south-to-north (S->N), not (N->S) Source: CCM /fs/cgd/csm/models/atm/ccm3.5.8/src/ccmlsm_share/gauaw.F Calculate sine of latitudes lat_sin(lat_nbr) and weights wgt_Gss(lat_nbr) for Gaussian quadrature Algorithm described in Davis and Rabinowitz, Journal of Research of the NBS, V 56, Jan 1956 Zeros of Bessel function j0, obtained from nco_bsl_zro(), are first guess for abscissae Original version: CCM1 Standardized: L. Bath, Jun 1992 L. Buja, Feb 1996 Reviewed: D. Williamson, J. Hack, Aug 1992 D. Williamson, J. Hack, Feb 1996 19970123 Modified by Jim Rosinski to use real*16 arithmetic in order to achieve (nearly) identical weights and latitudes on all machines. ~2000: Converted to Fortran9X by C. Zender, changed all real*16 statements to double precision (real*8) 20150530: Converted to C99 by C. Zender 20150725: Verified against tabulation at http://pomax.github.io/bezierinfo/legendre-gauss.html#n64 */ const char fnc_nm[]="nco_lat_wgt_gss()"; /* [sng] Function name */ const double eps_rlt=1.0e-16; // Convergence criterion (NB: Threshold was 1.0d-27 in real*16, 1.0e-15 fine for real*8, 1.0e-16 pushes double precision to the brink) const double pi=M_PI; // [frc] 3 const int itr_nbr_max=20; // [nbr] Maximum number of iterations double c_cff; // Constant combination coefficient double lat_idx_dbl; // Latitude index, double precision double lat_nnr_idx_dbl; // Inner latitude index, double precision double lat_nbr_dbl; // [nbr] Number of latitudes, double precision double pk=double_CEWI; // Polynomial double pkm1; // Polynomial double pkm2; // Polynomial double pkmrk; // Polynomial double sp; // Current iteration latitude increment double xz; // Abscissa estimate double cos_arg; // Intermediate parameter introduced while attempting to eliminate valgrind "uninitialised value" warnings int itr_cnt; // Iteration counter int lat_idx; // [idx] Counting index (latitude) int lat_sym_idx; // [idx] Counting index (symmetric latitude) int lat_nnr_idx; // [idx] Counting index (inner latitude loop) int lat_nbr_rcp2; // lat_nbr/2 (number of latitudes in hemisphere) double *lat_sin_p1; // Sine of Gaussian latitudes double precision double *wgt_Gss_p1; // Gaussian weights double precision /* Main Code */ if(nco_dbg_lvl_get() >= nco_dbg_sbr) (void)fprintf(stdout,"%s: DEBUG Entering %s\n",nco_prg_nm_get(),fnc_nm); /* Arrays with Fortran indexing (indicated by "plus one" = "_p1") keep numerical algorithm in C identical to Fortran */ lat_sin_p1=(double *)nco_malloc((lat_nbr+1)*sizeof(double)); // Sine of Gaussian latitudes double precision wgt_Gss_p1=(double *)nco_malloc((lat_nbr+1)*sizeof(double)); // Gaussian weights double precision /* Use Newton iteration to find abscissae */ c_cff=0.25*(1.0-4.0/(pi*pi)); lat_nbr_dbl=lat_nbr; lat_nbr_rcp2=lat_nbr/2; // NB: Integer arithmetic (void)nco_bsl_zro(lat_nbr_rcp2,lat_sin_p1); for(lat_idx=1;lat_idx<=lat_nbr_rcp2;lat_idx++){ // NB: Loop starts at 1 // 20150713: Introduce intermediate parameter cos_arg in attempt to eliminate valgrind "uninitialised value" warnings emitted by cos() (actually __cos_sse()) // Warnings occur with gcc-compiled code, not with clang-compiled code cos_arg=lat_sin_p1[lat_idx]/sqrt((lat_nbr_dbl+0.5)*(lat_nbr_dbl+0.5)+c_cff); xz=cos(cos_arg); /* First approximation to xz */ itr_cnt=0; /* goto label_73 */ label_73: pkm2=1.0; pkm1=xz; if(++itr_cnt > itr_nbr_max){ (void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %d\n",nco_prg_nm_get(),fnc_nm,fabs(sp),itr_nbr_max,lat_idx); nco_exit(EXIT_FAILURE); } /* endif */ /* Compute Legendre polynomial */ for(lat_nnr_idx=2;lat_nnr_idx<=lat_nbr;lat_nnr_idx++){ lat_nnr_idx_dbl=lat_nnr_idx; pk=((2.0*lat_nnr_idx_dbl-1.0)*xz*pkm1-(lat_nnr_idx_dbl-1.0)*pkm2)/lat_nnr_idx_dbl; pkm2=pkm1; pkm1=pk; } /* end inner loop over lat_nnr */ pkm1=pkm2; pkmrk=(lat_nbr_dbl*(pkm1-xz*pk))/(1.0-xz*xz); sp=pk/pkmrk; xz=xz-sp; /* NB: Easy to introduce bug here by not replacing Fortran abs() with C fabs() */ if(fabs(sp) > eps_rlt) goto label_73; lat_sin_p1[lat_idx]=xz; wgt_Gss_p1[lat_idx]=(2.0*(1.0-xz*xz))/((lat_nbr_dbl*pkm1)*(lat_nbr_dbl*pkm1)); } /* end outer loop over lat */ if(lat_nbr != lat_nbr_rcp2*2){ /* When lat_nbr is odd, compute weight at Equator */ lat_sin_p1[lat_nbr_rcp2+1]=0.0; pk=2.0/(lat_nbr_dbl*lat_nbr_dbl); for(lat_idx=2;lat_idx<=lat_nbr;lat_idx+=2){ lat_idx_dbl=lat_idx; pk=pk*lat_idx_dbl*lat_idx_dbl/((lat_idx_dbl-1.0)*(lat_idx_dbl-1.0)); } /* end loop over lat */ wgt_Gss_p1[lat_nbr_rcp2+1]=pk; } /* endif lat_nbr is odd */ /* Complete sets of abscissas and weights, using symmetry properties */ for(lat_idx=1;lat_idx<=lat_nbr_rcp2;lat_idx++){ lat_sym_idx=lat_nbr-lat_idx+1; lat_sin_p1[lat_sym_idx]=-lat_sin_p1[lat_idx]; wgt_Gss_p1[lat_sym_idx]=wgt_Gss_p1[lat_idx]; } /* end loop over lat */ /* Shift by one to remove Fortran offset in p1 arrays */ //memcpy(lat_sin,lat_sin_p1,lat_nbr*sizeof(double)); //memcpy(wgt_Gss,wgt_Gss_p1,lat_nbr*sizeof(double)); /* Reverse and shift arrays because original CCM code algorithm computes latitudes from north-to-south Shift by one to remove Fortran offset in p1 arrays */ if(flg_s2n){ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ lat_sin[lat_idx]=lat_sin_p1[lat_nbr-lat_idx]; wgt_Gss[lat_idx]=wgt_Gss_p1[lat_nbr-lat_idx]; } /* end loop over lat */ }else{ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ lat_sin[lat_idx]=lat_sin_p1[lat_idx+1]; wgt_Gss[lat_idx]=wgt_Gss_p1[lat_idx+1]; } /* end loop over lat */ } /* !flg_s2n */ if(nco_dbg_lvl_get() == nco_dbg_old){ (void)fprintf(stdout,"%s: DEBUG %s reports lat_nbr = %d\n",nco_prg_nm_get(),fnc_nm,lat_nbr); (void)fprintf(stdout,"idx\tasin\tngl_rad\tngl_dgr\tgw\n"); for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) (void)fprintf(stdout,"%d\t%g\t%g\t%g%g\n",lat_idx,lat_sin[lat_idx],asin(lat_sin[lat_idx]),180.0*asin(lat_sin[lat_idx])/pi,wgt_Gss[lat_idx]); } /* endif dbg */ if(wgt_Gss_p1) wgt_Gss_p1=(double *)nco_free(wgt_Gss_p1); if(lat_sin_p1) lat_sin_p1=(double *)nco_free(lat_sin_p1); return; } /* end nco_lat_wgt_gss() */ void nco_sph_plg_area /* [fnc] Compute area of spherical polygon */ (rgr_sct * const rgr, /* I [sct] Regridding structure */ const double * const lat_bnd, /* [dgr] Latitude boundaries of rectangular grid */ const double * const lon_bnd, /* [dgr] Longitude boundaries of rectangular grid */ const long col_nbr, /* [nbr] Number of columns in grid */ const int bnd_nbr, /* [nbr] Number of bounds in gridcell */ double * const area) /* [sr] Gridcell area */ { /* Purpose: Compute area of spherical polygon */ /* Computing triangular area accurately is hard in corner cases Spherical triangle suffer from at least as many issues as planar, which are described by "Miscalculating Area and Angles of a Needle-like Triangle" by W. Kahan, UC Berkeley In particular, the Law of Cosines and Heron's formula can be ill-conditioned For spherical triangles L'Huilier's Theorem is superior to Girard's Formula: http://mathworld.wolfram.com/LHuiliersTheorem.html Girard's formula depends on pi-minus-angle and angle is usually quite small in our applications so precision would be lost L'Huilier's theorem depends only on angles (a,b,c) and semi-perimeter (s) and is well-conditioned for small angles semi-perimeter = half-perimeter of triangle = 0.5*(a+b+c) Spherical Excess (SE) difference between the sum of the angles of a spherical triangle area and a planar triangle area with same interior angles (that sum to pi) SE is also the solid angle subtended by the spherical triangle and that's, well, astonishing and pretty cool Wikipedia shows a better SE formula for triangles that are ill-conditioned for L'Huilier's formula because a = b ~ 0.5c https://en.wikipedia.org/wiki/Spherical_trigonometry#Area_and_spherical_excess See also interesting discussion of L'Huilier by Charles Karney who suggests his own alternative: http://osgeo-org.1560.x6.nabble.com/Area-of-a-spherical-polygon-td3841625.html The discussion mentions Mil94 Robert D. Miller, Computing the area of a spherical polygon, Graphic Gems IV, chapter II.4, pages 132-137. http://books.google.com/books?id=CCqzMm_-WucC&pg=PA132&lpg=PA132&dq=miller+area+spherical+polygon+gems&source=bl&ots=mrnvZ6NJcm&sig=CMg8eaD8dzP5snMaPeCQzgoFWUk&hl=sv&ei=4G-YTKv5GsWZOI-mmZQP&sa=X&oi=book_result&ct=result&resnum=1&ved=0CBQQ6AEwAA#v=onepage&q&f=false Mil94 contains similar ideas to my method for spherical polygons (decomposing into adjacent multiple triangles from single vertex) However, his method places single vertex at pole, then adds signed areas to obtain full polygon area His method may suffer from degraded precision because of roundoff error and long side-lengths So-called "proper" spherical triangle are those for which all angles are less than pi, so a+b+c<3*pi Cartesian coordinates of (lat,lon)=(theta,phi) are (x,y,z)=(cos(theta)*cos(phi),cos(theta)*sin(phi),sin(theta)) Dot-product rule for vectors gives interior angle/arc length between two points: cos(a)=u dot v=cos(theta1)*cos(phi1)*cos(theta2)*cos(phi2)+cos(theta1)*sin(phi1)*cos(theta2)*sin(phi2)+sin(theta1)*sin(theta2) Spherical law of cosines relates interior angles/arc-lengths (a,b,c) to surface angles (A,B,C) in spherical triangle: https://en.wikipedia.org/wiki/Spherical_law_of_cosines cos(a)=cos(b)*cos(c)+sin(b)*sin(c)*cos(A) cos(b)=cos(c)*cos(a)+sin(c)*sin(a)*cos(B) cos(c)=cos(a)*cos(b)+sin(a)*sin(b)*cos(C) cos(A)=[cos(a)-cos(b)*cos(c)]/[sin(b)*sin(c)] cos(B)=[cos(b)-cos(c)*cos(a)]/[sin(c)*sin(a)] cos(C)=[cos(c)-cos(a)*cos(b)]/[sin(a)*sin(b)] Bounds information on unstructured grids will use bounds_nbr=maximum(vertice_nbr) Unused vertices are stored as either repeated points (ACME does this) or, conceiveably, as missing values Given (lat,lon) for N-points algorithm to find area of spherical polygon is: 1. Any decomposition, Girard areas: Loses precision due to mismatch between pi and small spherical excesses A. Find interior angles/arc-lengths (a,b,c,d...) using spherical law of cosines along each edge B. Apply generalized Girard formula SE_n = Sum(A_n) - (N-2) - pi 2. CSZ decomposition (N-2 triangles) with L'Huilier areas, Convert polygon into triangles by cycling spoke through all sides from common apex This method requires computation of N-2 (not N) triangles, though fewer sides due to optimization It works on all convex polygons (interior angles less than 180) but not, in general, concave polygons Whether it works or not on concave polygons depends upon their exact shape and the choice of apex point A. First three non-identical points form first triangle with sides A,B,C (first+second point define A, etc.) i. First vertice anchors all triangles ii. Third vertice of preceding triangle becomes second vertice of next triangle iii. Next non-identical point becomes last vertice of next triangle iv. Side C of previous triangle is side A of next triangle B. For each triangle, compute area with L'Huilier formula unless A = B ~ 0.5*C then use SAS formula 3. centroidal decomposition, N triangle version by Taylor, L'Huilier areas: Compute polygon centroid and treat this as hub from which spokes are drawn to all vertices This method requires computation of N triangles, though fewer sides due to optimization Moreover, it works on all convex polygons and on slightly concave polygons Centroid/hub has clear view of interior of most simple concave polygons 4. Any decomposition but with exact RLL grids by Zender and Agress 20160918 A. Decompose polygon into triangles via any method (e.g., method 2 or 3 above) B. Determine whether triangle is spherical or contains RLL (constant latitude) C. Spherical triangles use L'Huilier, RLL triangles use series expansion */ const char fnc_nm[]="nco_sph_plg_area()"; const double dgr2rdn=M_PI/180.0; int bnd_nbr_ttl; /* [nbr] Number of bounds in gridcell accounting for possibility of centroid information */ long idx; /* [idx] Counting index for unrolled grids */ short int bnd_idx; /* Shift to this method once we pass rgr into nco_sph_plg_area() */ nco_bool flg_mth_csz=False; /* [flg] Use CSZ's advancing polygon bisector method */ nco_bool flg_mth_ctr=False; /* [flg] Use centroid method to compute polygon area */ nco_edg_typ_enm edg_typ; /* [enm] Arc-type for triangle edges */ nco_ply_tri_mth_typ_enm ply_tri_mth; /* [enm] Polygon decomposition method */ if(rgr->edg_typ == nco_edg_nil) rgr->edg_typ=nco_edg_gtc; edg_typ=rgr->edg_typ; /* [enm] Arc-type for triangle edges */ ply_tri_mth=rgr->ply_tri_mth; /* [enm] Polygon decomposition method */ if(ply_tri_mth == nco_ply_tri_mth_csz) flg_mth_csz=True; if(ply_tri_mth == nco_ply_tri_mth_ctr) flg_mth_ctr=True; assert(flg_mth_ctr != flg_mth_csz); bnd_nbr_ttl=bnd_nbr; // Allocate space for one extra boundary to store centroid information if necessary if(flg_mth_ctr) bnd_nbr_ttl=bnd_nbr+1; double *lat_bnd_rdn=NULL_CEWI; /* [rdn] Latitude boundaries of rectangular destination grid */ double *lon_bnd_rdn=NULL_CEWI; /* [rdn] Longitude boundaries of rectangular destination grid */ double *lat_bnd_sin=NULL_CEWI; /* [frc] Sine of latitude boundaries of rectangular destination grid */ double *lon_bnd_sin=NULL_CEWI; /* [frc] Sine of longitude boundaries of rectangular destination grid */ double *lat_bnd_cos=NULL_CEWI; /* [frc] Cosine of latitude boundaries of rectangular destination grid */ double *lon_bnd_cos=NULL_CEWI; /* [frc] Cosine of longitude boundaries of rectangular destination grid */ /* Allocate one extra space for some arrays to store polygon centroid values for each column for ply_tri_mth=ctr */ lon_bnd_rdn=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double)); lat_bnd_rdn=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double)); lon_bnd_cos=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double)); lat_bnd_cos=(double *)nco_malloc(col_nbr*bnd_nbr_ttl*sizeof(double)); lon_bnd_sin=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double)); lat_bnd_sin=(double *)nco_malloc(col_nbr*bnd_nbr*sizeof(double)); memcpy(lat_bnd_rdn,lat_bnd,col_nbr*bnd_nbr*sizeof(double)); memcpy(lon_bnd_rdn,lon_bnd,col_nbr*bnd_nbr*sizeof(double)); for(idx=0;idx<col_nbr*bnd_nbr;idx++){ lon_bnd_rdn[idx]*=dgr2rdn; lat_bnd_rdn[idx]*=dgr2rdn; lon_bnd_cos[idx]=cos(lon_bnd_rdn[idx]); lat_bnd_cos[idx]=cos(lat_bnd_rdn[idx]); lon_bnd_sin[idx]=sin(lon_bnd_rdn[idx]); lat_bnd_sin[idx]=sin(lat_bnd_rdn[idx]); } /* !idx */ double area_smc_crc; /* [sr] Small-circle correction to spherical triangle area */ double area_smc; /* [sr] Gridcell area allowing for latitude-triangles */ double area_ttl; /* [sr] Total area of input polygon list assuming spherical triangles */ double area_smc_ttl; /* [sr] Total area of input polygon list allowing for latitude-triangles */ double area_smc_crc_ttl; /* [sr] Latitude-triangle correction (should be small) to total area of input polygon list */ double area_smc_crc_abs_ttl; /* [sr] Latitude-triangle absolute correction (no compensation of positive/negative contributions, should be no smaller than above) to total area of input polygon list */ double lat_ctr; /* [dgr] Latitude of polygon centroid */ double lon_ctr; /* [dgr] Longitude of polygon centroid */ double lat_ctr_rdn; /* [rdn] Latitude of polygon centroid */ double lon_ctr_rdn; /* [rdn] Longitude of polygon centroid */ double lat_ctr_cos; /* [frc] Cosine latitude of polygon centroid */ double lat_dlt; /* [rdn] Latitudinal difference */ double lon_dlt; /* [rdn] Longitudinal difference */ double ngl_a; /* [rdn] Interior angle/great circle arc a */ double ngl_b; /* [rdn] Interior angle/great circle arc b */ double ngl_c; /* [rdn] Interior angle/great circle arc c */ double ngl_ltr_a; /* [rdn] Interior angle/small circle arc a, canonical latitude-triangle geometry */ double ngl_ltr_b; /* [rdn] Interior angle/great circle arc b, canonical latitude-triangle geometry */ double ngl_ltr_c; /* [rdn] Interior angle/great circle arc c, canonical latitude-triangle geometry */ double prm_smi; /* [rdn] Semi-perimeter of triangle */ double sin_hlf_tht; /* [frc] Sine of half angle/great circle arc theta connecting two points */ double xcs_sph; /* [sr] Spherical excess */ int tri_nbr; /* [nbr] Number of triangles in polygon */ long bnd_vld_nbr=NC_MIN_INT; /* [idx] Number of valid (non-duplicative) vertices in each triangle */ long *a_idx; /* [idx] Point A 1-D indices for each triangle in polygon */ long *b_idx; /* [idx] Point B 1-D indices for each triangle in polygon */ long *c_idx; /* [idx] Point C 1-D indices for each triangle in polygon */ long *vrt_vld=NULL; /* [idx] Absolute 1-D indices of valid vertices */ long idx_a; /* [idx] Point A 1-D index */ long idx_b; /* [idx] Point B 1-D index */ long idx_c; /* [idx] Point C 1-D index */ nco_bool flg_sas_ndl=False; /* [flg] L'Huilier's formula will fail due to needle where one side exceeds semi-perimeter */ nco_bool flg_sas_isc=False; /* [flg] L'Huilier's formula is ill-conditioned due to flat, near-isoceles triangle */ nco_bool flg_sas_a=False; /* [flg] Use SAS triangle formula with central angle a */ nco_bool flg_sas_b=False; /* [flg] Use SAS triangle formula with central angle b */ nco_bool flg_sas_c=False; /* [flg] Use SAS triangle formula with central angle c */ nco_bool flg_ply_has_smc; /* [flg] Any triangle in polygon has small-circle edge */ nco_bool flg_tri_crr_smc; /* [flg] Current triangle has small_circle edge */ /* Initialize global accumulators */ area_ttl=0.0; area_smc_ttl=0.0; area_smc_crc_ttl=0.0; area_smc_crc_abs_ttl=0.0; for(long col_idx=0;col_idx<col_nbr;col_idx++){ /* Initialize local properties and accumulators for this cell/polygon */ flg_ply_has_smc=False; ngl_c=double_CEWI; /* Otherwise compiler unsure ngl_c is initialized first use */ area[col_idx]=0.0; area_smc=0.0; tri_nbr=0; if(col_idx == 0){ a_idx=(long *)nco_calloc(bnd_nbr,sizeof(long)); b_idx=(long *)nco_calloc(bnd_nbr,sizeof(long)); c_idx=(long *)nco_calloc(bnd_nbr,sizeof(long)); vrt_vld=(long *)nco_calloc(bnd_nbr,sizeof(long)); } /* !col_idx */ /* Safety re-initialization to ease debugging, not strictly necessary */ for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++){ vrt_vld[bnd_idx]=NC_MIN_INT; a_idx[bnd_idx]=NC_MIN_INT; b_idx[bnd_idx]=NC_MIN_INT; c_idx[bnd_idx]=NC_MIN_INT; } /* !bnd_idx */ if(flg_mth_ctr){ double lon_dff; /* [dgr] Longitude difference */ long bnd_srt_idx; /* [idx] Absolute starting index of vertices in polygon */ long bnd_idx; /* [idx] Offset of current valid vertex index from starting index */ long bnd_vld_idx; /* [idx] Absolute index of last valid vertex */ /* First vertice is always valid */ bnd_srt_idx=bnd_nbr*col_idx; bnd_vld_idx=bnd_srt_idx; vrt_vld[0]=bnd_vld_idx; lat_ctr=lat_bnd[bnd_srt_idx]; lon_ctr=lon_bnd[bnd_srt_idx]; bnd_vld_nbr=1; /* First guess for next valid index */ bnd_idx=1; /* bnd_idx labels offset from first vertex of next valid (i.e., non-duplicative) vertex */ while(bnd_idx<bnd_nbr){ /* Skip repeated points that must occur when polygon has fewer than allowed vertices */ while(lon_bnd[bnd_vld_idx] == lon_bnd[bnd_srt_idx+bnd_idx] && lat_bnd[bnd_srt_idx] == lat_bnd[bnd_srt_idx+bnd_idx]){ /* Next valid vertice must not duplicate first vertex */ bnd_idx++; /* Have we already found all valid vertices? */ if(bnd_idx == bnd_nbr) break; } /* !while */ /* Jump to normalization when all valid vertices found */ if(bnd_idx == bnd_nbr) break; /* Current vertex is valid (non-duplicative) */ bnd_vld_idx=bnd_srt_idx+bnd_idx; vrt_vld[bnd_vld_nbr]=bnd_vld_idx; bnd_vld_nbr++; if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG %s reports centroidal decomposition col_idx=%lu, bnd_nbr=%d, bnd_idx=%ld, bnd_vld_idx=%ld, bnd_vld_nbr=%ld\n",nco_prg_nm_get(),fnc_nm,col_idx,bnd_nbr,bnd_idx,bnd_vld_idx,bnd_vld_nbr); assert(bnd_vld_nbr <= bnd_nbr); lat_ctr+=lat_bnd[bnd_vld_idx]; lon_ctr+=lon_bnd[bnd_vld_idx]; lon_dff=lon_bnd[bnd_vld_idx]-lon_bnd[0]; if(lon_dff >= 180.0){ lon_ctr-=360.0; }else if(lon_dff <= -180.0){ lon_ctr+=360.0; } /* !lon_dff */ /* Search for next valid vertice in next iteration */ bnd_idx++; } /* !bnd_idx */ /* Compute centroid */ lat_ctr/=bnd_vld_nbr; lon_ctr/=bnd_vld_nbr; /* Centroid can become point A of bnd_nbr polygons or optimize algorithm: 1. Skip sub-dividing polygon into centroid-based triangles for bnd_vld_nbr == 3 2. Split quadrilaterals into two (non-centroid) triangles for bnd_vld_nbr == 4 3. Use full centroid-based triangle algorithm for bnd_vld_nbr >= 5 */ lat_ctr_rdn=lat_ctr*dgr2rdn; lon_ctr_rdn=lon_ctr*dgr2rdn; lat_ctr_cos=cos(lat_ctr_rdn); /* Place centroid values in extended arrays for easy access */ lat_bnd_rdn[(col_idx+1)*bnd_nbr_ttl-1L]=lat_ctr_rdn; lon_bnd_rdn[(col_idx+1)*bnd_nbr_ttl-1L]=lon_ctr_rdn; lat_bnd_cos[(col_idx+1)*bnd_nbr_ttl-1L]=lat_ctr_cos; /* Polygon centroid and valid vertices are now known */ assert(bnd_vld_nbr > 2); if(bnd_vld_nbr == 3){ /* Three vertices only means polygon is already decomposed into a triangle */ tri_nbr=1; a_idx[0]=vrt_vld[0]; b_idx[0]=vrt_vld[1]; c_idx[0]=vrt_vld[2]; }else if(bnd_vld_nbr == 4){ /* Bisect quadrilateral into two triangles rather than use centroid and have four triantles */ tri_nbr=2; a_idx[0]=vrt_vld[0]; b_idx[0]=vrt_vld[1]; c_idx[0]=vrt_vld[2]; a_idx[1]=vrt_vld[0]; /* NB: Order is important. This way side C of triangle[0] = side A of trangle[1] */ b_idx[1]=vrt_vld[2]; c_idx[1]=vrt_vld[3]; }else if(bnd_vld_nbr >= 5){ /* Centroid method has as many triangles as valid vertices */ tri_nbr=bnd_vld_nbr; for(int tri_idx=0;tri_idx<tri_nbr;tri_idx++){ a_idx[tri_idx]=(col_idx+1)*bnd_nbr_ttl-1L; /* A is always centroid, store values at end of arrays */ b_idx[tri_idx]=vrt_vld[tri_idx]; c_idx[tri_idx]=vrt_vld[(tri_idx+1)%tri_nbr]; } /* !tri_idx */ } /* !bnd_vld_nbr */ } /* !flg_mth_ctr */ if(flg_mth_csz){ /* A is always first vertice of all triangles */ idx_a=bnd_nbr*col_idx; /* Start search for B at next vertice */ bnd_idx=1; /* bnd_idx labels offset from point A of potential location of triangle points B and C We know that bnd_idx(A) == 0, bnd_idx(B) < bnd_nbr-1, bnd_idx(C) < bnd_nbr */ while(bnd_idx<bnd_nbr-1){ /* Only first triangle must search for B, subsequent triangles recycle previous C as current B */ if(tri_nbr == 0){ /* Skip repeated points that must occur when polygon has fewer than allowed vertices */ /* 20200115: Prior to today we never skipped polar points (same latitudes but different longitudes) That worked fine in practice for spherical triangles partly because triangles from CSZ decomposition (aka hub-and-spoke decomposition) are additive, even with multiple points on the same great circle, and partly due to luck (a starting vertex surrounded by points on the same geodesic would break it). Moreover, repeated polar points pose no issues for L'Huilier's (or Girard's) method which depends only on the interior angles and side lengths, not the longitudes of polar points. Small circles change that last part, and we must now eliminate repeated polar points. */ if(edg_typ == nco_edg_smc){ /* Skip repeated numerically identical points */ while(lon_bnd[idx_a] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_a] == lat_bnd[idx_a+bnd_idx]){ /* Next vertice may not duplicate A */ bnd_idx++; /* If there is no room for C then all triangles found */ if(bnd_idx == bnd_nbr-1) break; } /* !while */ /* Skip geometrically identical (i.e., repeated polar) points */ while((fabs(lat_bnd[idx_a]) == 90.0) && (fabs(lat_bnd[idx_a+bnd_idx]) == 90.0)){ bnd_idx++; if(bnd_idx == bnd_nbr-1) break; } /* !while */ }else if(edg_typ != nco_edg_smc){ /* Spherical polygongs can use simpler, pre-20200116 algorithm to eliminate repeated points */ while(lon_bnd[idx_a] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_a] == lat_bnd[idx_a+bnd_idx]){ /* Next vertice may not duplicate A */ bnd_idx++; /* If there is no room for C then all triangles found */ if(bnd_idx == bnd_nbr-1) break; } /* !while */ }else{ abort(); } /* !edg_typ */ /* Jump to next column when all triangles found */ if(bnd_idx == bnd_nbr-1) break; } /* !tri_nbr */ idx_b=idx_a+bnd_idx; /* Search for C at next vertice */ bnd_idx++; /* fxm */ while(lon_bnd[idx_b] == lon_bnd[idx_a+bnd_idx] && lat_bnd[idx_b] == lat_bnd[idx_a+bnd_idx]){ /* Next vertice may not duplicate B */ bnd_idx++; /* If there is no room for C then all triangles found */ if(bnd_idx == bnd_nbr) break; } /* !while */ /* Jump to next column when all triangles found */ if(bnd_idx == bnd_nbr) break; idx_c=idx_a+bnd_idx; /* Valid triangle, vertices are known and labeled */ a_idx[tri_nbr]=idx_a; b_idx[tri_nbr]=idx_b; c_idx[tri_nbr]=idx_c; tri_nbr++; /* Begin search for next B at current C */ bnd_idx=idx_c-idx_a; } /* !bnd_idx */ } /* !flg_mth_csz */ /* Triangles are known for requested decomposition method Compute and accumulate their area Optimized algorithm recycles previous arc c as current arc a (after first triangle) */ for(int tri_idx=0;tri_idx<tri_nbr;tri_idx++){ idx_a=a_idx[tri_idx]; idx_b=b_idx[tri_idx]; idx_c=c_idx[tri_idx]; if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG %s reports triangle vertices: col_idx=%lu, tri_idx=%d, idx_a=%ld, idx_b=%ld, idx_c=%ld\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,idx_a,idx_b,idx_c); /* Compute interior angle/great circle arc a for first triangle; subsequent triangles recycle previous arc c */ if(tri_idx == 0){ /* 20150831: Test by computing ncol=0 area in conus chevrons grid, compare to MAT results ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/257x512_SCRIP.20150901.nc -m ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.20150901.nc ncremap -s ${DATA}/grids/257x512_SCRIP.20150901.nc -g ${DATA}/grids/conusx4v1np4_chevrons_scrip_c150815.nc -m ${DATA}/maps/map_fv257x512_to_conusx4v1np4_chevrons_bilin.20150901.nc ncks -O -D 5 -v FSNT --map ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.150418.nc ${DATA}/ne30/raw/famipc5_ne30_v0.3_00003.cam.h0.1979-01.nc ${DATA}/ne30/rgr/fv_FSNT.nc ncks -O -D 5 -v FSNT --rgr diagnose_area --map ${DATA}/maps/map_fv257x512_to_conusx4v1np4_chevrons_bilin.20150901.nc ${DATA}/ne30/rgr/fv_FSNT.nc ${DATA}/ne30/rgr/dogfood.nc ncks -O -D 1 --rgr infer#diagnose_area --rgr grid=${HOME}/grd.nc ${DATA}/ne30/rgr/dogfood.nc ~/foo.nc ncks -H -s %20.15e, -v area -d ncol,0 ${DATA}/ne30/rgr/dogfood.nc ncks -H -s %20.15e, -v grid_area -d grid_size,0 ${DATA}/grids/conusx4v1np4_chevrons_scrip_c150815.nc ncks -H -s %20.15e, -v grid_area -d grid_size,0 ${HOME}/grd.nc ncol=0 on conus chevrons file: 3.653857995295246e-05 raw GLL weight 3.653857995294305e-05 ESMF weight (area_b from map-file) 3.653857995294302e-05 matlab CSZ decomposition (N-2 triangles) computed at SNL by MAT 3.653857995294301e-05 matlab centroidal decomposition (N triangles) computed at SNL by MAT 3.653857995294258e-05 NCO CSZ _and_ centroidal decompositions (new haversine) 3.653857995289623e-05 NCO CSZ decomposition (old acos) 20191011: Tested this same polygon in ESMF and NCO weight-generator NCO maps begin with first destination gridcell, find next ESMF gridcell by searching for first col: ncks --trd -C -v col ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc | egrep "=1 " ncks -H --trd -s %20.15e -C -d n_b,0 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc 3.653857995294305e-05 ncks -H --trd -s '%20.15e, ' -C -d n_b,0 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc 3.653857995295246e-05 ESMF and NCO weight-generators produce nearly identical S results to double-precision: ncks -H --trd -s '%20.15e, ' -C -d n_s,0,1 -v S ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc 2.181999640069480e-03, 1.309571213636605e-02 ncks -H --trd -s %20.15e -C -d n_s,207436 -d n_s,209617 -v S ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc 2.181999640069454e-03, 1.309571213636510e-02 Compare first five polygon areas: ncks --trd -H -C -s '%20.15e, ' -d n_b,0,4 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc ncks --trd -H -C -s '%20.15e, ' -d n_b,0,4 -v area_b ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc 3.653857995294305e-05, 1.250459284052488e-04, 1.448204605591709e-04, 8.223598867312266e-05, 8.585831933875070e-05, # aave 3.653857995294258e-05, 1.250459284052470e-04, 1.448204605591675e-04, 8.223598867312247e-05, 8.585831933875186e-05, Compare total areas: ncwa -O -y ttl -v area.? ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_aave.20191001.nc ~/foo_aave.nc ncwa -O -y ttl -v area.? ${DATA}/maps/map_cmip6_180x360_to_conusx4v1np4_chevrons_nco.20191001.nc ~/foo_nco.nc ncks --trd -H -C -s '%20.15e, ' -v area.? ~/foo_aave.nc ncks --trd -H -C -s '%20.15e, ' -v area.? ~/foo_nco.nc aave: 1.256637061435867e+01, 1.256637061435973e+01 nco: 1.256637061435857e+01, 1.256637061435955e+01 4*pi: 1.25663706143591729538e+01 Does (tru_glb_ttl/NCO_glb_ttl)*NCO_lcl = ESMF_lcl ? (1.25663706143591729538/1.256637061435857)*3.653857995294258=3.6538579952944333 No, normalization alone does not explain differences between ESMF and NCO It does not appear that ESMF does a global normalization of areas/weights */ /* Computing great circle arcs over small arcs requires care since central angle is near 0 degrees Cosine small angles changes slowly for such angles, and leads to precision loss Use haversine formula instead of spherical law of cosines formula https://en.wikipedia.org/wiki/Great-circle_distance */ /* Interior angle/great circle arc a, spherical law of cosines formula (loses precision): cos_a=lat_bnd_cos[idx_a]*lon_bnd_cos[idx_a]*lat_bnd_cos[idx_b]*lon_bnd_cos[idx_b]+ lat_bnd_cos[idx_a]*lon_bnd_sin[idx_a]*lat_bnd_cos[idx_b]*lon_bnd_sin[idx_b]+ lat_bnd_sin[idx_a]*lat_bnd_sin[idx_b];ngl_a=acos(cos_a); */ /* Interior angle/great circle arc a, haversine formula: */ // 20160918: Use branch cut rules for longitude lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_a],lon_bnd_rdn[idx_b])); lat_dlt=fabs(lat_bnd_rdn[idx_a]-lat_bnd_rdn[idx_b]); sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_a]*lat_bnd_cos[idx_b]*pow(sin(0.5*lon_dlt),2)); ngl_a=2.0*asin(sin_hlf_tht); }else{ /* !tri_idx == 0 */ ngl_a=ngl_c; } /* !tri_idx == 0 */ /* Interior angle/great circle arc b */ lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_b],lon_bnd_rdn[idx_c])); lat_dlt=fabs(lat_bnd_rdn[idx_b]-lat_bnd_rdn[idx_c]); sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_b]*lat_bnd_cos[idx_c]*pow(sin(0.5*lon_dlt),2)); ngl_b=2.0*asin(sin_hlf_tht); /* Interior angle/great circle arc c */ lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_c],lon_bnd_rdn[idx_a])); lat_dlt=fabs(lat_bnd_rdn[idx_c]-lat_bnd_rdn[idx_a]); sin_hlf_tht=sqrt(pow(sin(0.5*lat_dlt),2)+lat_bnd_cos[idx_c]*lat_bnd_cos[idx_a]*pow(sin(0.5*lon_dlt),2)); ngl_c=2.0*asin(sin_hlf_tht); /* Semi-perimeter */ prm_smi=0.5*(ngl_a+ngl_b+ngl_c); /* L'Huilier's formula results in NaN if any side exceeds semi-perimeter This can occur in needle-shaped triangles due to rounding errors in derived arc lengths a, b, c 20200203: Problematic needles occurs a few dozen times in ne120pg2 -> cmip6 maps Problematic isoceles triangles are much rarer than problematic needles Therefore look for needle-issues first, then, if none found, look for isoceles issues Wikipedia recommends treating ill-conditioned triangles by Side-Angle-Side (SAS) formula https://en.wikipedia.org/wiki/Spherical_trigonometry Diagnose needles beforehand and call SAS routines as above to avoid NaN in L'Huilier Label problematic needle triangles by shortest side, e.g., "flg_sas_a" means (b ~ c) and a ~ 0.0 */ flg_sas_ndl=flg_sas_isc=flg_sas_a=flg_sas_b=flg_sas_c=False; if(ngl_a > prm_smi){if(ngl_b > ngl_c) flg_sas_c=True; else flg_sas_b=True;} /* a exceeds semi-perimeter */ else if(ngl_b > prm_smi){if(ngl_c > ngl_a) flg_sas_a=True; else flg_sas_c=True;} /* b exceeds semi-perimeter */ else if(ngl_c > prm_smi){if(ngl_a > ngl_b) flg_sas_b=True; else flg_sas_a=True;} /* c exceeds semi-perimeter */ if(flg_sas_a || flg_sas_b || flg_sas_c) flg_sas_ndl=True; if(!flg_sas_ndl){ /* L'Huilier's formula becomes ill-conditioned when two sides are one half the third side This occurs for flat, isoceles-shaped triangles Label problematic isoceles triangles by longest side, e.g., "flg_sas_a" means (b ~ c) ~ 0.5*a */ /* Sensitivity tests on ~20191014 showed that triangular ill-conditioning treatment (i.e., switching to SAS method) does not improve (and may degrade) accuracy for eps_ill_cnd > 1.0e-15 */ const double eps_ill_cnd=1.0e-15; /* [frc] Ill-conditioned tolerance for interior angle/great circle arcs in triangle */ const double eps_ill_cnd_dbl=2.0*eps_ill_cnd; /* [frc] Ill-conditioned tolerance for interior angle/great circle arcs in triangle */ if((fabs(ngl_a-ngl_b) < eps_ill_cnd) && (fabs(ngl_a-0.5*ngl_c) < eps_ill_cnd_dbl)) flg_sas_c=True; /* c is twice a and b */ else if((fabs(ngl_b-ngl_c) < eps_ill_cnd) && (fabs(ngl_b-0.5*ngl_a) < eps_ill_cnd_dbl)) flg_sas_a=True; /* a is twice b and c */ else if((fabs(ngl_c-ngl_a) < eps_ill_cnd) && (fabs(ngl_c-0.5*ngl_b) < eps_ill_cnd_dbl)) flg_sas_b=True; /* b is twice c and a */ if(flg_sas_a || flg_sas_b || flg_sas_c) flg_sas_isc=True; } /* !flg_sas_ndl */ if(flg_sas_isc || flg_sas_ndl){ /* Compute area using SAS formula */ double cos_hlf_C; /* [frc] Cosine of half of canoncal surface angle C */ //double sin_hlf_C; /* [frc] Sine of half of canoncal surface angle C */ double ngl_sfc_ltr_C; /* [rdn] Canonical surface angle/great circle arc C */ double tan_hlf_a_tan_hlf_b; /* [frc] Product of tangents of one-half of nearly equal canoncal sides */ double xcs_sph_hlf_tan; /* [frc] Tangent of one-half the spherical excess */ /* Transform sides into canonical order for formula where C is surface angle between arcs a and b */ if(flg_sas_c){ ngl_ltr_a=ngl_a; ngl_ltr_b=ngl_b; ngl_ltr_c=ngl_c; } /* !flg_sas_c */ if(flg_sas_a){ ngl_ltr_a=ngl_b; ngl_ltr_b=ngl_c; ngl_ltr_c=ngl_a; } /* !flg_sas_a */ if(flg_sas_b){ ngl_ltr_a=ngl_c; ngl_ltr_b=ngl_a; ngl_ltr_c=ngl_b; } /* !flg_sas_b */ if(flg_sas_ndl && (nco_dbg_lvl_get() >= nco_dbg_scl)) (void)fprintf(stdout,"%s: INFO %s reports col_idx = %li triangle %d is needle-shaped triangle with a side that exceeds semi-perimeter = %0.16e. Eschew L'Huilier's formula for spherical excess to avoid NaN. Could use SAS formula with canonical central interior arc c = %0.16e.\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,prm_smi,ngl_ltr_c); if(flg_sas_isc && (nco_dbg_lvl_get() >= nco_dbg_scl)) (void)fprintf(stdout,"%s: INFO %s reports col_idx = %li triangle %d is nearly flat isoceles-shaped triangle. Canonical arcs a and b differ by %0.16e. Eschew L'Huilier's formula for spherical excess to avoid low precision. Could use SAS formula.\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,fabs(ngl_ltr_a-ngl_ltr_b)); /* Determine canonical surface angle C To find any angle given three spherical triangle sides, Wikipedia opines: "The cosine rule may be used to give the angles A, B, and C but, to avoid ambiguities, the half-angle formulae are preferred." Half-angle formulae include two applicable variants that yield the sine or cosine of half C Then C is determined as twice the asin() or acos() function, respectively For needle-shaped triangles, RHS sin formula is ~ sin^2(0)/sin(a)*sin(b) ~ 0.0 For needle-shaped triangles, RHS cos formula is ~ sin^2(s)/sin(a)*sin(b) ~ 0.5 For flat isoceles triangles, RHS sin formula is ~ sin^2(0)/sin(a)*sin(b) ~ 0.0 For flat isoceles triangles, RHS cos formula is ~ sin(s)*sin(0)/sin(a)*sin(b) ~ 0.0 Use sin formula since both needle- and isoceles-shaped triangles have RHS ~ 0.0 where arcsin() is most precise 20200203: Half-angle sine formula gives NaNs, and half-angle cosine formula works on ne120pg2->cmip. Why? Adopting cosine formula because it works */ //sin_hlf_C=sqrt(sin(prm_smi-ngl_ltr_a)*sin(prm_smi-ngl_ltr_b)/(sin(ngl_ltr_a)*sin(ngl_ltr_b))); // Half-angle sine formula cos_hlf_C=sqrt(sin(prm_smi)*sin(prm_smi-ngl_ltr_c)/(sin(ngl_ltr_a)*sin(ngl_ltr_b))); // Half-angle cosine formula //ngl_sfc_ltr_C=2.0*asin(sin_hlf_C); ngl_sfc_ltr_C=2.0*acos(cos_hlf_C); /* SAS formula */ tan_hlf_a_tan_hlf_b=tan(0.5*ngl_ltr_a)*tan(0.5*ngl_ltr_b); xcs_sph_hlf_tan=tan_hlf_a_tan_hlf_b*sin(ngl_sfc_ltr_C)/(1.0+tan_hlf_a_tan_hlf_b*cos(ngl_sfc_ltr_C)); assert(fabs(xcs_sph_hlf_tan) != M_PI_2); xcs_sph=2.0*atan(xcs_sph_hlf_tan); if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO SAS area formula for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e). Spherical excess = %0.16e.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c,xcs_sph); // Single-line version // xcs_sph=2.0*atan(tan(0.5*ngl_ltr_a)*tan(0.5*ngl_ltr_b)*sin(2.0*acos(sqrt(sin(prm_smi)*sin(prm_smi-ngl_c)/(sin(ngl_a)*sin(ngl_b)))))/(1.0+tan_hlf_a_tan_hlf_b*cos(2.0*acos(sqrt(sin(prm_smi)*sin(prm_smi-ngl_c)/(sin(ngl_a)*sin(ngl_b))))))); /* Above procedure for problematic needle-shaped and isoceles-shaped triangles degrades statistics For ne30pg2, ne120pg2 -> cmip, setting area = 0.0 _greatly_ improves area statistics (Why?) Set spherical excess to zero for problematic needle-shaped and isoceles-shaped triangles */ /* fxm: Make zeroing skinny needles/isoceles-shaped triangle-areas a command-line option? */ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO Setting SAS area = 0.0\n",nco_prg_nm_get()); xcs_sph=0.0; /* !flg_sas */ }else{ double xcs_sph_qtr_tan; /* [frc] Tangent of one-quarter the spherical excess */ xcs_sph_qtr_tan=sqrt(tan(0.5*prm_smi)*tan(0.5*(prm_smi-ngl_a))*tan(0.5*(prm_smi-ngl_b))*tan(0.5*(prm_smi-ngl_c))); assert(fabs(xcs_sph_qtr_tan) != M_PI_2); xcs_sph=4.0*atan(xcs_sph_qtr_tan); /* 20191014: Aggregate all previous area-related commands into one, gigantic, unreadable, possibly more precise command (tested and it is more obfuscated but not more precise) */ // xcs_sph=4.0*atan(sqrt(tan(0.5*0.5*(ngl_a+ngl_b+ngl_c))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_a))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_b))*tan(0.5*(0.5*(ngl_a+ngl_b+ngl_c)-ngl_c)))); } /* !flg_sas */ if(isnan(xcs_sph)){ const double eps_ngl_skn=1.0e-13; /* [frc] Angles skinnier than this form needles whose area ~ 0.0 */ /* Categorize reason for NaN */ (void)fprintf(stdout,"%s: WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING\nUnxpected NaN polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e).\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); if( /* Side exceeds semi-perimeter */ (ngl_a > prm_smi) || (ngl_b > prm_smi) || (ngl_c > prm_smi) ){ (void)fprintf(stdout,"%s: WARNING Triangle side exceeds semi-perimeter = %0.16e polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16e, %0.16e, %0.16e). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),prm_smi,col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); }else if( /* Are angles too skinny? Quite often on ne30pg2, ne120pg2 */ (ngl_a < eps_ngl_skn) || (ngl_b < eps_ngl_skn) || (ngl_c < eps_ngl_skn) ){ (void)fprintf(stdout,"%s: WARNING Triangle has at least one skinny angles < %g [rdn] for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16f, %0.16f, %0.16f). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),eps_ngl_skn,col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); }else if( /* Are two vertices identical to double-precision? Never on ne30pg2, ne120pg2 */ ((lat_bnd[idx_a] == lat_bnd[idx_b]) && (lon_bnd[idx_a] == lon_bnd[idx_b])) || ((lat_bnd[idx_b] == lat_bnd[idx_c]) && (lon_bnd[idx_b] == lon_bnd[idx_c])) || ((lat_bnd[idx_c] == lat_bnd[idx_a]) && (lon_bnd[idx_c] == lon_bnd[idx_a])) ){ (void)fprintf(stdout,"%s: WARNING Triangle has repeated points for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g). Assigned triangle area = 0.0.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c]); }else{ (void)fprintf(stdout,"%s: WARNING Triangle area formula yields NaN for polygon col_idx = %li, triangle %d, vertices A, B, C at (lat,lon) [dgr] = (%0.16f, %0.16f), (%0.16f, %0.16f), (%0.16f, %0.16f). Interior angles/great circle arcs (a, b, c) [rdn] = (%0.16f, %0.16f, %0.16f). Are points co-linear? Assigned triangle area = 0.0.\n",nco_prg_nm_get(),col_idx,tri_idx,lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c],ngl_a,ngl_b,ngl_c); } /* !co-linear */ xcs_sph=0.0; } /* !NaN */ area[col_idx]+=xcs_sph; /* Accumulate spherical triangle area into reported polygon area and adjust below */ area_smc+=xcs_sph; /* Accumulate spherical triangle area into small-circle polygon area and adjust below */ area_ttl+=xcs_sph; /* Accumulate spherical triangle area into spherical polygon area */ area_smc_ttl+=xcs_sph; /* Accumulate spherical triangle area into total polygon area and adjust below */ /* 20160918 from here to end of loop is non-spherical work 20170217: Temporarily turn-off latitude circle diagnostics because Sungduk's POP case breaks them Canonical latitude-triangle geometry has point A at apex and points B and C at same latitude ncremap --dbg=1 --alg_typ=nco --grd_src=${DATA}/grids/ne30np4_pentagons.091226.nc --grd_dst=${DATA}/grids/cmip6_180x360_scrip.20181001.nc --map=${DATA}/maps/map_ne30np4_to_cmip6_180x360_nco.20190601.nc ncremap --dbg=1 -R 'edg_typ=smc' --alg_typ=nco --grd_src=${DATA}/grids/ne30np4_pentagons.091226.nc --grd_dst=${DATA}/grids/cmip6_180x360_scrip.20181001.nc --map=${DATA}/maps/map_ne30np4_to_cmip6_180x360_smc.20190601.nc */ flg_tri_crr_smc=False; if(lat_bnd_rdn[idx_a] == lat_bnd_rdn[idx_b] || lat_bnd_rdn[idx_b] == lat_bnd_rdn[idx_c] || lat_bnd_rdn[idx_c] == lat_bnd_rdn[idx_a]){ /* Set flag only if triangle is not degenerate. Degenerate triangles (3 points on a geodesic) have zero area */ if(xcs_sph != 0.0) flg_ply_has_smc=flg_tri_crr_smc=True; if(nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: DEBUG Found small circle triangle with vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c]); } /* endif */ if((edg_typ == nco_edg_smc) && flg_tri_crr_smc){ double ngl_plr; /* [rdn] Polar angle (co-latitude) */ long idx_ltr_a; /* [idx] Point A (apex) of canonical latitude-triangle geometry, 1-D index */ long idx_ltr_b; /* [idx] Point B (base) of canonical latitude-triangle geometry, 1-D index */ long idx_ltr_c; /* [idx] Point C (base) of canonical latitude-triangle geometry, 1-D index */ /* Rotate labels to standard position with vertex A, equi-latitude points B and C */ if(lat_bnd_rdn[idx_a] == lat_bnd_rdn[idx_b]){ idx_ltr_a=idx_c; idx_ltr_b=idx_a; idx_ltr_c=idx_b; ngl_ltr_a=ngl_c; ngl_ltr_b=ngl_a; ngl_ltr_c=ngl_b; ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_a]); }else if(lat_bnd_rdn[idx_b] == lat_bnd_rdn[idx_c]){ idx_ltr_a=idx_a; idx_ltr_b=idx_b; idx_ltr_c=idx_c; ngl_ltr_a=ngl_a; ngl_ltr_b=ngl_b; ngl_ltr_c=ngl_c; ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_b]); }else if(lat_bnd_rdn[idx_c] == lat_bnd_rdn[idx_a]){ idx_ltr_a=idx_b; idx_ltr_b=idx_c; idx_ltr_c=idx_a; ngl_ltr_a=ngl_b; ngl_ltr_b=ngl_c; ngl_ltr_c=ngl_a; ngl_plr=fabs(M_PI_2-lat_bnd_rdn[idx_c]); }else{ (void)fprintf(stdout,"%s: ERROR latitudes not equal in small circle section. Vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_a],lon_bnd[idx_a],lat_bnd[idx_b],lon_bnd[idx_b],lat_bnd[idx_c],lon_bnd[idx_c]); abort(); } /* endif */ /* 20160918: Compute exact area of latitude triangle wedge */ double xpn_x; /* [frc] Expansion parameter */ lon_dlt=fabs(nco_lon_dff_brnch_rdn(lon_bnd_rdn[idx_ltr_b],lon_bnd_rdn[idx_ltr_c])); assert(lon_dlt != 0.0); // Latitude triangles must have bases with distinct longitudes if(lon_dlt != M_PI){ /* Normal clause executed for small-circle triangles */ /* Numeric conditioning uncertain. Approaches divide-by-zero when lon_dlt << 1 */ xpn_x=lat_bnd_sin[idx_ltr_b]*(1.0-cos(lon_dlt))/sin(lon_dlt); assert(fabs(xpn_x) != M_PI_2); area_smc_crc=2.0*atan(xpn_x); /* 20170217: Sungduk's POP regrid triggers following abort(): ncremap -D 1 -i ~/pop_g16.nc -d ~/cam_f19.nc -o ~/foo.nc */ //assert(xpn_x >= 0.0); //if(lat_bnd[idx_ltr_b] > 0.0) area_smc_crc+=-lon_dlt*lat_bnd_sin[idx_ltr_b]; else area_smc_crc+=+lon_dlt*lat_bnd_sin[idx_ltr_b]; area_smc_crc+=-lon_dlt*lat_bnd_sin[idx_ltr_b]; }else{ /* 20200228: Latitude triangles may have bases with longitudes that differ by 180 degrees Consider a quadrilateral with four equidistant vertices in longitude, and that caps a pole: CSZ decomposition technique divides this into two triangles each with three co-latitudinal points and no vertex at pole Solution candidates: 1. Divide such quadrilaterals using centroid technique Just realized current implementation of centroid decomposition fails on polar caps Failure occurs because centroid latitude is +/- ~90 not mean of vertices' latitudes Must impute "pseudo-centroid" with latitude +/- 90 instead of averaging vertex latitudes Requires testing each polygon to determine if it contains pole <- Too difficult/expensive 2. Assume latitude triangles whose base is 180 degrees are at pole Compute area exactly using analytic formula for annular lune */ (void)fprintf(stdout,"%s: INFO longitudes differ by pi in small circle section. Vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_ltr_a],lon_bnd[idx_ltr_a],lat_bnd[idx_ltr_b],lon_bnd[idx_ltr_b],lat_bnd[idx_ltr_c],lon_bnd[idx_ltr_c]); (void)fprintf(stdout,"%s: DEBUG col_nbr=%lu, bnd_nbr=%d, col_idx=%ld, area=%g. Vertices [0..bnd_nbr-1] in format idx (lat,lon)\n",nco_prg_nm_get(),col_nbr,bnd_nbr,col_idx,xcs_sph); for(int bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%2d (%g, %g)\n",bnd_idx,lat_bnd[bnd_nbr*col_idx+bnd_idx],lon_bnd[bnd_nbr*col_idx+bnd_idx]); (void)fprintf(stdout,"%s: INFO Assuming this triangle is decomposed from polar cap polygon. Treating area with analytic formula for annular lune\n",nco_prg_nm_get()); /* Compute small circle correction as difference between spherical triangle area and standard annuular lune formula Small circle correction is positive-definite for polar triangles so use fabs(sin(lat_bnd_sin)) */ area_smc_crc=lon_dlt*fabs(lat_bnd_sin[idx_ltr_b])-area_smc; } /* !lon_dlt */ // Adjust diagnostic areas by small-circle area correction area_smc+=area_smc_crc; area_smc_ttl+=area_smc_crc; area_smc_crc_ttl+=area_smc_crc; area_smc_crc_abs_ttl+=fabs(area_smc_crc); // 20200109: Adjust area reported to calling code by small-circle area correction area[col_idx]+=area_smc_crc; if(0){ /* 20160918: Approximate area of latitude triangle wedge. Use truncated power expansion of exact formula. */ double xpn_x_sqr; /* [frc] Expansion parameter squared */ double xpn_sum; /* [frc] Expansion sum */ double xpn_nmr; /* [frc] Expansion term numerator */ double xpn_trm; /* [frc] Expansion term */ double xpn_dnm; /* [frc] Expansion term denominator */ const unsigned short int rdr_xpn=3; /* [nbr] Order of N in trigonometric series expansion */ unsigned short int idx_xpn; /* [idx] Index in series expansion */ xpn_x=cos(ngl_plr)*(1.0-cos(lon_dlt))/sin(lon_dlt); xpn_x_sqr=xpn_x*xpn_x; xpn_nmr=xpn_x; xpn_dnm=1.0; xpn_trm=xpn_nmr/xpn_dnm; xpn_sum+=xpn_trm; for(idx_xpn=3;idx_xpn<=rdr_xpn;idx_xpn+=2){ xpn_nmr*=xpn_x_sqr; xpn_dnm*=(idx_xpn-1)*idx_xpn; xpn_trm=xpn_nmr/xpn_dnm; xpn_sum+=xpn_trm; } /* !idx_xpn */ (void)fprintf(stdout,"%s: Small-circle area using series approximation...not implemented yet\n",nco_prg_nm_get()); } /* !0 */ if(nco_dbg_lvl_get() >= nco_dbg_scl){ (void)fprintf(stdout,"%s: INFO %s col_idx = %li triangle %d spherical area, latitude-triangle area, %% difference: %g, %g, %g%%\n",nco_prg_nm_get(),fnc_nm,col_idx,tri_idx,xcs_sph,xcs_sph+area_smc_crc,100.0*area_smc_crc/xcs_sph); if(fabs(area_smc_crc/xcs_sph) > 0.1){ (void)fprintf(stdout,"%s: DEBUG Non-spherical correction exceeds 10%% for current triangle with vertices A, B, C at (lat,lon) [dgr] = (%g, %g), (%g, %g), (%g, %g)\n",nco_prg_nm_get(),lat_bnd[idx_ltr_a],lon_bnd[idx_ltr_a],lat_bnd[idx_ltr_b],lon_bnd[idx_ltr_b],lat_bnd[idx_ltr_c],lon_bnd[idx_ltr_c]); } /* !fabs */ } /* !dbg */ } /* !edg_typ && flg_tri_crr_smc */ } /* !tri_idx */ if(edg_typ == nco_edg_smc && flg_ply_has_smc){ /* Current gridcell contained at least one latitude-triangle */ if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO %s col_idx = %li spherical area, small circle area, %% difference: %g, %g, %g%%\n",nco_prg_nm_get(),fnc_nm,col_idx,area[col_idx],area_smc,100.0*(area_smc-area[col_idx])/area[col_idx]); } /* !edg_typ && !flg_ply_has_smc */ } /* !col_idx */ if(edg_typ == nco_edg_smc && nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: INFO %s total spherical area, small circle area, %% difference, crc_ttl, crc_abs_ttl: %g, %g, %g%%, %g, %g\n",nco_prg_nm_get(),fnc_nm,area_ttl,area_smc_ttl,100.0*(area_smc_ttl-area_ttl)/area_ttl,area_smc_crc_ttl,area_smc_crc_abs_ttl); if(vrt_vld) vrt_vld=(long *)nco_free(vrt_vld); if(a_idx) a_idx=(long *)nco_free(a_idx); if(b_idx) b_idx=(long *)nco_free(b_idx); if(c_idx) c_idx=(long *)nco_free(c_idx); if(lat_bnd_rdn) lat_bnd_rdn=(double *)nco_free(lat_bnd_rdn); if(lon_bnd_rdn) lon_bnd_rdn=(double *)nco_free(lon_bnd_rdn); if(lat_bnd_cos) lat_bnd_cos=(double *)nco_free(lat_bnd_cos); if(lon_bnd_cos) lon_bnd_cos=(double *)nco_free(lon_bnd_cos); if(lat_bnd_sin) lat_bnd_sin=(double *)nco_free(lat_bnd_sin); if(lon_bnd_sin) lon_bnd_sin=(double *)nco_free(lon_bnd_sin); } /* !nco_sph_plg_area() */ int /* O [enm] Return code */ nco_rgr_tps /* [fnc] Regrid using TempestRemap library */ (rgr_sct * const rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Regrid fields using TempestRemap "library" (more precisely, executables) Routine was originally written to call Tempest executables However, that functionality was all placed into the ncremap shell script Thus this C-interface is currently unused TempestRemap2 has a library that may be accessed on-line Test Tempest library: no way to activate yet export DATA_TEMPEST='/data/zender/rgr';ncks -O --rgr=Y ${DATA}/rgr/essgcm14_clm.nc ~/foo.nc */ const char fnc_nm[]="nco_rgr_tps()"; const int fmt_chr_nbr=6; const char *cmd_rgr_fmt; char *cmd_rgr; char fl_grd_dst[]="/tmp/foo_outRLLMesh.g"; char *fl_grd_dst_cdl; int rcd_sys; int lat_nbr_rqs=180; int lon_nbr_rqs=360; nco_rgr_tps_cmd nco_tps_cmd; /* [enm] TempestRemap command enum */ char *nvr_DATA_TEMPEST; /* [sng] Directory where Tempest grids, meshes, and weights are stored */ nvr_DATA_TEMPEST=getenv("DATA_TEMPEST"); rgr->drc_tps= (nvr_DATA_TEMPEST && strlen(nvr_DATA_TEMPEST) > 0L) ? (char *)strdup(nvr_DATA_TEMPEST) : (char *)strdup("/tmp"); if(nco_dbg_lvl_get() >= nco_dbg_crr){ (void)fprintf(stderr,"%s: INFO %s reports\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stderr,"drc_tps = %s, ",rgr->drc_tps ? rgr->drc_tps : "NULL"); (void)fprintf(stderr,"\n"); } /* endif dbg */ /* Allow for whitespace characters in fl_grd_dst Assume CDL translation results in acceptable name for shell commands */ fl_grd_dst_cdl=nm2sng_fl(fl_grd_dst); /* Construct and execute regridding command */ nco_tps_cmd=nco_rgr_GenerateRLLMesh; cmd_rgr_fmt=nco_tps_cmd_fmt_sng(nco_tps_cmd); cmd_rgr=(char *)nco_malloc((strlen(cmd_rgr_fmt)+strlen(fl_grd_dst_cdl)-fmt_chr_nbr+1UL)*sizeof(char)); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stderr,"%s: %s reports generating %d by %d RLL mesh in %s...\n",nco_prg_nm_get(),fnc_nm,lat_nbr_rqs,lon_nbr_rqs,fl_grd_dst); (void)sprintf(cmd_rgr,cmd_rgr_fmt,lat_nbr_rqs,lon_nbr_rqs,fl_grd_dst_cdl); rcd_sys=system(cmd_rgr); if(rcd_sys == -1){ (void)fprintf(stdout,"%s: ERROR %s unable to complete TempestRemap regridding command \"%s\"\n",nco_prg_nm_get(),fnc_nm,cmd_rgr); nco_exit(EXIT_FAILURE); } /* end if */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"done\n"); /* Clean-up memory */ if(fl_grd_dst_cdl) fl_grd_dst_cdl=(char *)nco_free(fl_grd_dst_cdl); if(cmd_rgr) cmd_rgr=(char *)nco_free(cmd_rgr); return NCO_NOERR; } /* end nco_rgr_tps() */ const char * /* O [sng] String describing two-dimensional grid-type */ nco_grd_2D_sng /* [fnc] Convert two-dimensional grid-type enum to string */ (const nco_grd_2D_typ_enm nco_grd_2D_typ) /* I [enm] Two-dimensional grid-type enum */ { /* Purpose: Convert two-dimensional grid-type enum to string */ switch(nco_grd_2D_typ){ case nco_grd_2D_unk: return "Unknown, unclassified, or unrepresentable 2D grid type (e.g., unstructured, curvilinear, POP displaced-pole)"; case nco_grd_2D_gss: return "Gaussian latitude grid. Used by spectral transform models, e.g., CCM 1-3, CAM 1-3, ECMWF Forecast, LSM, MATCH, NCEP (R1, R2), UCICTM."; case nco_grd_2D_fv: return "Cap-latitude grid, aka FV-scalar grid (in Lin-Rood representation). When global (not regional) in extent and with odd number of latitudes, poles are considered at (and labeled as) centers of first and last gridcells. For example lat_ctr=-90,-89,-88,... and lat_crn=-89.5,-88.5,-87.5,... Thus pole-gridcells span half the equi-angular latitude increment of the rest of the grid. Used by CAM FV (i.e., CAM 4-6), ECMWF (ERA-I, ERA40, ERA5), GEOS-CHEM, UCICTM, UKMO."; case nco_grd_2D_eqa: return "Uniform/Equi-Angular latitude grid. Uniform/Equi-angle (everywhere) latitude grid. When global (not regional) in extent and with even number of latitudes, poles are at corners/edges of first and last gridcells. For example lat_ctr=-89.5,-88.5,-87.5,... and lat_crn=-90,-89,-88,.... When global, forms valid FV-staggered (aka FV-velocity, aka offset) grid (for Lin-Rood representation). Used by CIESIN/SEDAC, IGBP-DIS, NASA CMG, TOMS AAI, WOCE."; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_2D_sng() */ const char * /* O [sng] String describing latitude grid-type */ nco_grd_lat_sng /* [fnc] Convert latitude grid-type enum to string */ (const nco_grd_lat_typ_enm nco_grd_lat_typ) /* I [enm] Latitude grid-type enum */ { /* Purpose: Convert latitude grid-type enum to string */ switch(nco_grd_lat_typ){ case nco_grd_lat_unk: return "Unknown, unclassified, or unrepresentable latitude grid type (e.g., unstructured, curvilinear, POP3)"; case nco_grd_lat_gss: return "Gaussian latitude grid used by global spectral models: CCM 1-3, CAM 1-3, ECMWF Forecast, LSM, MATCH, NCEP (R1, R2), UCICTM."; case nco_grd_lat_fv: return "Cap-latitude grid, aka FV-scalar grid (in Lin-Rood representation). When global (not regional) in extent and with odd number of latitudes, poles are considered at (and labeled as) centers of first and last gridcells. For example lat_ctr=-90,-89,-88,... and lat_crn=-89.5,-88.5,-87.5,... Thus pole-gridcells span half the equi-angular latitude increment of the rest of the grid. Used by CAM FV (i.e., CAM 4-6), ECMWF (ERA-I, ERA40, ERA5), GEOS-CHEM, UCICTM, UKMO."; case nco_grd_lat_eqa: return "Uniform/Equi-Angular latitude grid. Uniform/Equi-angle (everywhere) latitude grid. When global (not regional) in extent and with even number of latitudes, poles are at corners/edges of first and last gridcells. For example lat_ctr=-89.5,-88.5,-87.5,... and lat_crn=-90,-89,-88,.... When global, forms valid FV-staggered (aka FV-velocity, aka offset) grid (for Lin-Rood representation). Used by CIESIN/SEDAC, IGBP-DIS, NASA CMG, TOMS AAI, WOCE."; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_lat_sng() */ const char * /* O [sng] String describing longitude grid-type */ nco_grd_lon_sng /* [fnc] Convert longitude grid-type enum to string */ (const nco_grd_lon_typ_enm nco_grd_lon_typ) /* I [enm] Longitude grid-type enum */ { /* Purpose: Convert longitude grid-type enum to string */ switch(nco_grd_lon_typ){ case nco_grd_lon_unk: return "Unknown, unclassified, or unrepresentable longitude grid type (e.g., unstructured, curvilinear)"; case nco_grd_lon_180_wst: return "Date line at west edge of first longitude cell"; case nco_grd_lon_180_ctr: return "Date line at center of first longitude cell"; case nco_grd_lon_Grn_wst: return "Greenwich at west edge of first longitude cell"; case nco_grd_lon_Grn_ctr: return "Greenwich at center of first longitude cell"; case nco_grd_lon_bb: return "Longitude grid determined by bounding box (lon_wst/lon_est) and gridcell number (lon_nbr)"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_lon_sng() */ const char * /* O [sng] String describing grid extent */ nco_grd_xtn_sng /* [fnc] Convert two-dimensional grid-extent enum to string */ (const nco_grd_xtn_enm nco_grd_xtn) /* I [enm] Grid-extent enum */ { /* Purpose: Convert grid-extent enum to string */ switch(nco_grd_xtn){ case nco_grd_xtn_nil: return "Unknown"; case nco_grd_xtn_glb: return "Global"; case nco_grd_xtn_rgn: return "Regional"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_grd_xtn_sng() */ const char * /* O [sng] String describing grid conversion */ nco_rgr_grd_sng /* [fnc] Convert grid conversion enum to string */ (const nco_rgr_typ_enm nco_rgr_typ) /* I [enm] Grid conversion enum */ { /* Purpose: Convert grid conversion enum to string */ switch(nco_rgr_typ){ case nco_rgr_grd_1D_to_1D: return "1D_to_1D"; case nco_rgr_grd_1D_to_2D: return "1D_to_2D"; case nco_rgr_grd_2D_to_1D: return "2D_to_1D"; case nco_rgr_grd_2D_to_2D: return "2D_to_2D"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_grd_sng() */ const char * /* O [sng] String describing regridding method */ nco_rgr_mth_sng /* [fnc] Convert regridding method enum to string */ (const nco_rgr_mth_typ_enm nco_rgr_mth_typ) /* I [enm] Regridding method enum */ { /* Purpose: Convert regridding method enum to string */ switch(nco_rgr_mth_typ){ case nco_rgr_mth_conservative: return "Conservative remapping"; case nco_rgr_mth_bilinear: return "Bilinear remapping"; case nco_rgr_mth_none: return "none"; case nco_rgr_mth_unknown: return "Unknown (TempestRemap or ESMF_weight_only)"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_mth_sng() */ const char * /* O [sng] String describing mapfile generator */ nco_rgr_mpf_sng /* [fnc] Convert mapfile generator enum to string */ (const nco_rgr_mpf_typ_enm nco_rgr_mpf_typ) /* I [enm] Mapfile generator enum */ { /* Purpose: Convert mapfile generator enum to string */ switch(nco_rgr_mpf_typ){ case nco_rgr_mpf_ESMF: return "ESMF Offline Regridding Weight Generator (ERWG), either from ESMF_RegridWeightGen directly or via NCL"; case nco_rgr_mpf_SCRIP: return "SCRIP (original LANL package)"; case nco_rgr_mpf_Tempest: return "TempestRemap (GenerateOfflineMap)"; case nco_rgr_mpf_ESMF_weight_only: return "ESMF Offline Regridding Weight Generator (ERWG), either from ESMF_RegridWeightGen directly or via NCL, with --weight_only option from ERWG 7.1+"; case nco_rgr_mpf_NCO: return "netCDF Operators (NCO) Offline Regridding Weight Generator"; case nco_rgr_mpf_MBTR: return "MOAB-TempestRemap Online Regridding Weight Generator"; case nco_rgr_mpf_unknown: return "Unknown Weight Generator"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_mpf_sng() */ const char * /* O [sng] String describing regridding normalization */ nco_rgr_nrm_sng /* [fnc] Convert regridding normalization enum to string */ (const nco_rgr_nrm_typ_enm nco_rgr_nrm_typ) /* I [enm] Regridding normalization enum */ { /* Purpose: Convert regridding normalization enum to string */ switch(nco_rgr_nrm_typ){ case nco_rgr_nrm_fracarea: return "fracarea"; case nco_rgr_nrm_destarea: return "destarea"; case nco_rgr_nrm_none: return "none"; case nco_rgr_nrm_unknown: return "Unknown (possibilities include ESMF_weight_only, NCO, and TempestRemap)"; default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_rgr_nrm_sng() */ const char * /* O [sng] String containing regridding command and format */ nco_tps_cmd_fmt_sng /* [fnc] Convert TempestRemap command enum to command string */ (const nco_rgr_tps_cmd nco_tps_cmd) /* I [enm] TempestRemap command enum */ { /* Purpose: Convert TempestRemap command enum to command string and format */ switch(nco_tps_cmd){ case nco_rgr_ApplyOfflineMap: return "ApplyOfflineMap"; case nco_rgr_CalculateDiffNorms: return "CalculateDiffNorms"; case nco_rgr_GenerateCSMesh: return "GenerateCSMesh --res %d --file %s"; case nco_rgr_GenerateGLLMetaData: return "GenerateGLLMetaData"; case nco_rgr_GenerateICOMesh: return "GenerateICOMesh"; case nco_rgr_GenerateLambertConfConicMesh: return "GenerateLambertConfConicMesh"; case nco_rgr_GenerateOfflineMap: return "GenerateOfflineMap --in_mesh %s --out_mesh %s --ov_mesh %s --in_data %s --out_data %s"; case nco_rgr_GenerateOverlapMesh: return "GenerateOverlapMesh --a %s --b %s --out %s"; case nco_rgr_GenerateRLLMesh: return "GenerateRLLMesh --lat %d --lon %d --file %s"; case nco_rgr_GenerateTestData: return "GenerateTestData --mesh %s --np %d --test %d --out %s"; case nco_rgr_MeshToTxt: return "MeshToTxt"; case nco_rgr_AAA_nil: case nco_rgr_ZZZ_last: default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_tps_cmd_fmt_sng() */ const char * /* O [sng] String containing regridding command name */ nco_tps_cmd_sng /* [fnc] Convert TempestRemap command enum to command name */ (const nco_rgr_tps_cmd nco_tps_cmd) /* I [enm] TempestRemap command enum */ { /* Purpose: Convert TempestRemap command enum to command string */ switch(nco_tps_cmd){ case nco_rgr_ApplyOfflineMap: return "ApplyOfflineMap"; case nco_rgr_CalculateDiffNorms: return "CalculateDiffNorms"; case nco_rgr_GenerateCSMesh: return "GenerateCSMesh"; case nco_rgr_GenerateGLLMetaData: return "GenerateGLLMetaData"; case nco_rgr_GenerateICOMesh: return "GenerateICOMesh"; case nco_rgr_GenerateLambertConfConicMesh: return "GenerateLambertConfConicMesh"; case nco_rgr_GenerateOfflineMap: return "GenerateOfflineMap"; case nco_rgr_GenerateOverlapMesh: return "GenerateOverlapMesh"; case nco_rgr_GenerateRLLMesh: return "GenerateRLLMesh"; case nco_rgr_GenerateTestData: return "GenerateTestData"; case nco_rgr_MeshToTxt: return "MeshToTxt"; case nco_rgr_AAA_nil: case nco_rgr_ZZZ_last: default: nco_dfl_case_generic_err(); break; } /* end switch */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* end nco_tps_cmd_sng() */ int /* O [enm] Return code */ nco_grd_mk /* [fnc] Create SCRIP-format grid file */ (rgr_sct * const rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Use grid information to create SCRIP-format grid file Spherical geometry terminology: spherical cap = spherical dome = volume cut-off by plane spherical lune = digon = area bounded by two half-great circles = base of spherical wedge spherical segment = volume defined by cutting sphere with pair parallel planes spherical sector = volume subtended by lat1 spherical wedge = ungula = volume subtended by lon2-lon1 spherical zone = area of spherical segment excluding bases spherical quadrangle = area of intersection of spherical zone and lune (i.e., area of bearing = angle from true north geodesic = shortest path between points on a surface great circle = orthodrome = "straight path" = geodesic of the sphere convergency = difference (in azimuth?) between great circle tracks at two different positions conversion angle = angle between geodesic and rhumb line rhumb line = loxodrome = "oblique (or slanted) path" = line of constant azimuth Formulae: http://www.movable-type.co.uk/scripts/latlong.html # On-line Javascript implementation http://williams.best.vwh.net/avform.htm ACME: https://acme-svn2.ornl.gov/acme-repo/acme/mapping/grids https://acme-svn2.ornl.gov/acme-repo/acme/inputdata/cpl/gridmaps NCAR: yellowstone.ucar.edu:/glade/p/cesm/cseg/mapping/grids yellowstone.ucar.edu:/glade/p_old/cesm/cseg/mapping/grids Global RLL grids: ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr grid=${DATA}/grids/180x360_SCRIP.20150901.nc --rgr latlon=180,360 --rgr lat_typ=eqa --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Equiangular grid 90x180' --rgr grid=${DATA}/grids/90x180_SCRIP.20150901.nc --rgr latlon=90,180 --rgr lat_typ=eqa --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc Maps for global RLL grids: ESMF_RegridWeightGen -s ${DATA}/grids/180x360_SCRIP.20150901.nc -d ${DATA}/grids/90x180_SCRIP.20150901.nc -w ${DATA}/maps/map_180x360_to_90x180.20150901.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/90x180_SCRIP.20150901.nc -d ${DATA}/grids/180x360_SCRIP.20150901.nc -w ${DATA}/maps/map_90x180_to_180x360.20150901.nc --method conserve ACME grids: ncks -O -D 1 --rgr ttl='FV-scalar grid 129x256' --rgr grid=${DATA}/grids/129x256_SCRIP.20150910.nc --rgr latlon=129,256 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='FV-scalar grid 257x512' --rgr grid=${DATA}/grids/257x512_SCRIP.20150910.nc --rgr latlon=257,512 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='FV-scalar grid 801x1600' --rgr grid=${DATA}/grids/801x1600_SCRIP.20150910.nc --rgr latlon=801,1600 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ACME maps: ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/129x256_SCRIP.20150910.nc -w ${DATA}/maps/map_ne30np4_to_fv129x256_aave.20150910.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/257x512_SCRIP.20150910.nc -w ${DATA}/maps/map_ne30np4_to_fv257x512_bilin.20150910.nc --method bilinear ESMF_RegridWeightGen -s ${DATA}/grids/ne120np4_pentagons.100310.nc -d ${DATA}/grids/257x512_SCRIP.20150910.nc -w ${DATA}/maps/map_ne120np4_to_fv257x512_aave.20150910.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne120np4_pentagons.100310.nc -d ${DATA}/grids/801x1600_SCRIP.20150910.nc -w ${DATA}/maps/map_ne120np4_to_fv801x1600_bilin.20150910.nc --method bilinear AMWG grids: AMWG diagnostics (until ~2016) mis-diagnose FV grids with odd numbers of latitudes as Gaussian Grids ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 96x144 for horizontal resolution 1.9x2.5 degrees' --rgr grid=${DATA}/grids/96x144_SCRIP.20160301.nc --rgr latlon=96,144 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 192x288 for horizontal resolution 0.9x1.25 degrees' --rgr grid=${DATA}/grids/192x288_SCRIP.20160301.nc --rgr latlon=192,288 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 128x256 for horizontal resolution 1.4x1.4 degrees' --rgr grid=${DATA}/grids/128x256_SCRIP.20160301.nc --rgr latlon=128,256 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 256x512 for horizontal resolution 0.7x0.7 degrees' --rgr grid=${DATA}/grids/256x512_SCRIP.20160301.nc --rgr latlon=256,512 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='CAM FV-scalar grid 800x1600 for horizontal resolution 0.225x0.225 degrees' --rgr grid=${DATA}/grids/800x1600_SCRIP.20160301.nc --rgr latlon=800,1600 --rgr lat_typ=cap --rgr lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Equiangular grid 360x720 produced by RTM' --rgr grid=${DATA}/grids/360x720rtm_SCRIP.20160301.nc --rgr latlon=360,720 --rgr lat_typ=eqa --rgr lon_typ=180_wst ~/nco/data/in.nc ~/foo.nc AMWG maps old method (no provenance archived): ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/128x256_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv128x256_aave.20160301.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/256x512_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv256x512_bilin.20160301.nc --method bilinear ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/256x512_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv256x512_aave.20160301.nc --method conserve ESMF_RegridWeightGen -s ${DATA}/grids/ne30np4_pentagons.091226.nc -d ${DATA}/grids/800x1600_SCRIP.20160301.nc -w ${DATA}/maps/map_ne30np4_to_fv800x1600_bilin.20160301.nc --method bilinear AMWG maps with ncremap (preferred method): ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/128x256_SCRIP.20160301.nc -m ${DATA}/maps/map_ne30np4_to_fv128x256_aave.20160301.nc -w esmf -a conserve ncremap -s ${DATA}/grids/ne30np4_pentagons.091226.nc -g ${DATA}/grids/256x512_SCRIP.20160301.nc -m ${DATA}/maps/map_ne30np4_to_fv256x512_bilin.20160301.nc -w esmf -a bilinear ncremap -s ${DATA}/grids/ne120np4_pentagons.100310.nc -g ${DATA}/grids/256x512_SCRIP.20160301.nc -m ${DATA}/maps/map_ne120np4_to_fv256x512_aave.20160301.nc -w esmf -a conserve ncremap -s ${DATA}/grids/ne120np4_pentagons.100310.nc -g ${DATA}/grids/800x1600_SCRIP.20160301.nc -m ${DATA}/maps/map_ne120np4_to_fv800x1600_bilin.20160301.nc -w esmf -a bilinear MPAS grids: NCO cannot yet generate MPAS grids, but given an MPAS grid it can generate appropriate maps MPAS maps: ncremap -s ${DATA}/grids/oEC60to30.SCRIP.150729.nc -g ${DATA}/grids/t62_SCRIP.20150901.nc -m ${DATA}/maps/map_oEC60to30_to_t62_aave.20160301.nc -w esmf -a conserve ncremap -s ${DATA}/grids/oEC60to30.SCRIP.150729.nc -g ${DATA}/grids/t62_SCRIP.20150901.nc -m ${DATA}/maps/map_oEC60to30_to_t62_bilin.20160301.nc -w esmf -a bilinear Regional RLL grids: ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr grid=${DATA}/sld/rgr/grd_dst.nc --rgr latlon=100,100 --rgr snwe=30.0,70.0,-120.0,-90.0 ~/nco/data/in.nc ~/foo.nc Global RLL skeleton: ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr skl=${DATA}/sld/rgr/skl_180x360.nc --rgr grid=${DATA}/grids/180x360_SCRIP.20150901.nc --rgr latlon=180,360#lat_typ=eqa#lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc Curvilinear grids: ncks -O -D 1 --rgr ttl='Curvilinear grid 10x20. Degenerate case.' --rgr crv --rgr lon_crv=0.0 --rgr skl=${DATA}/sld/rgr/skl_crv.nc --rgr grid=${DATA}/sld/rgr/grd_crv.nc --rgr latlon=10,20 --rgr snwe=-5.0,5.0,-10.0,10.0 ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Curvilinear grid 10x20. Curvilinearity = 1.0 lon' --rgr lon_crv=1.0 --rgr skl=${DATA}/sld/rgr/skl_crv.nc --rgr grid=${DATA}/sld/rgr/grd_crv.nc --rgr latlon=10,20 --rgr snwe=-5.0,5.0,-10.0,10.0 ~/nco/data/in.nc ~/foo.nc 1-D Latitude (no longitude) grids: ncks -O -D 1 --rgr ttl='Latitude-only zonal grid' --rgr skl=${DATA}/sld/rgr/skl_lat_10dgr_uni.nc --rgr grid=${DATA}/sld/rgr/grd_lat_10dgr_uni.nc --rgr latlon=18,1 --rgr snwe=-90,90,0,360 ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr ttl='Latitude-only zonal grid' --rgr skl=${DATA}/sld/rgr/skl_lat_05dgr_cap.nc --rgr grid=${DATA}/sld/rgr/grd_lat_05dgr_cap.nc --rgr latlon=37,1 --rgr snwe=-90,90,0,360 ~/nco/data/in.nc ~/foo.nc ncremap -i ${DATA}/sld/rgr/skl_lat_10dgr_uni.nc -d ${DATA}/sld/rgr/skl_lat_05dgr_cap.nc -m ${DATA}/maps/map_lat10uni_to_lat05cap_aave.nc -o ~/rgr/lat10to05.nc ESMF_RegridWeightGen -s ${DATA}/sld/rgr/grd_lat_10dgr_uni.nc -d ${DATA}/sld/rgr/grd_lat_05dgr_cap.nc -w ${DATA}/maps/map_lat10uni_to_lat05cap_aave.nc --method conserve */ const char fnc_nm[]="nco_grd_mk()"; /* [sng] Function name */ const double rdn2dgr=180.0/M_PI; const double dgr2rdn=M_PI/180.0; const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ const int dmn_nbr_3D=3; /* [nbr] Rank of 3-D grid variables */ const int dmn_nbr_grd_max=dmn_nbr_3D; /* [nbr] Maximum rank of grid variables */ const int itr_nbr_max=20; // [nbr] Maximum number of iterations const nc_type crd_typ=NC_DOUBLE; char *fl_out_tmp=NULL_CEWI; char *fl_out; char grd_area_nm[]="grid_area"; /* 20150830: NB ESMF_RegridWeightGen --user_areas looks for variable named "grid_area" */ char dmn_sz_nm[]="grid_dims"; char grd_crn_lat_nm[]="grid_corner_lat"; char grd_crn_lon_nm[]="grid_corner_lon"; char grd_crn_nm[]="grid_corners"; char grd_ctr_lat_nm[]="grid_center_lat"; char grd_ctr_lon_nm[]="grid_center_lon"; char grd_rnk_nm[]="grid_rank"; char grd_sz_nm[]="grid_size"; char msk_nm[]="grid_imask"; double *grd_ctr_lat; /* [dgr] Latitude centers of grid */ double *grd_ctr_lon; /* [dgr] Longitude centers of grid */ double *grd_crn_lat; /* [dgr] Latitude corners of grid */ double *grd_crn_lon; /* [dgr] Longitude corners of grid */ double *area; /* [sr] Area of grid */ double *lat_bnd=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular grid */ double *lat_crn=NULL; /* [dgr] Latitude corners of rectangular grid */ double *lat_ctr=NULL_CEWI; /* [dgr] Latitude centers of rectangular grid */ double *lat_ntf=NULL; /* [dgr] Latitude interfaces of rectangular grid */ double *lat_wgt=NULL; /* [dgr] Latitude weights of rectangular grid */ double *lon_bnd=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular grid */ double *lon_crn=NULL; /* [dgr] Longitude corners of rectangular grid */ double *lon_ctr=NULL_CEWI; /* [dgr] Longitude centers of rectangular grid */ double *lon_ntf=NULL; /* [dgr] Longitude interfaces of rectangular grid */ double area_ttl=0.0; /* [frc] Exact sum of area */ double lat_crv; /* [dgr] Latitudinal curvilinearity */ double lon_crv; /* [dgr] Longitudinal curvilinearity */ double lat_nrt; /* [dgr] Latitude of northern edge of grid */ double lat_sth; /* [dgr] Latitude of southern edge of grid */ double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */ double lat_wgt_gss; /* [frc] Latitude weight estimated from interface latitudes */ double lon_est; /* [dgr] Longitude of eastern edge of grid */ double lon_wst; /* [dgr] Longitude of western edge of grid */ double lon_ncr; /* [dgr] Longitude increment */ double lat_ncr; /* [dgr] Latitude increment */ double lon_spn; /* [dgr] Longitude span */ double lat_spn; /* [dgr] Latitude span */ double *wgt_Gss=NULL; // [frc] Gaussian weights double precision int *msk=NULL; /* [flg] Mask of grid */ int *dmn_sz_int; /* [nbr] Array of dimension sizes of grid */ int dmn_ids[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_out_fmt=NC_FORMAT_CLASSIC; /* [enm] Output file format */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int area_id; /* [id] Area variable ID */ int dmn_id_grd_crn; /* [id] Grid corners dimension ID */ int dmn_id_grd_rnk; /* [id] Grid rank dimension ID */ int dmn_id_grd_sz; /* [id] Grid size dimension ID */ int dmn_sz_int_id; /* [id] Grid dimension sizes ID */ int grd_crn_lat_id; /* [id] Grid corner latitudes variable ID */ int grd_crn_lon_id; /* [id] Grid corner longitudes variable ID */ int grd_ctr_lat_id; /* [id] Grid center latitudes variable ID */ int grd_ctr_lon_id; /* [id] Grid center longitudes variable ID */ int itr_cnt; /* Iteration counter */ int msk_id; /* [id] Mask variable ID */ long dmn_srt[dmn_nbr_grd_max]; long dmn_cnt[dmn_nbr_grd_max]; long bnd_nbr; /* [nbr] Number of bounds in gridcell */ long col_nbr; /* [nbr] Number of columns in grid */ long crn_idx; /* [idx] Counting index for corners */ long grd_crn_nbr; /* [nbr] Number of corners in gridcell */ long grd_rnk_nbr; /* [nbr] Number of dimensions in grid */ long grd_sz_nbr; /* [nbr] Number of gridcells in grid */ long idx2; /* [idx] Counting index for unrolled grids */ long idx; /* [idx] Counting index for unrolled grids */ long lat_idx2; /* [idx] Counting index for unrolled latitude */ long lat_idx; long lat_nbr; /* [nbr] Number of latitudes in grid */ long lon_idx2; /* [idx] Counting index for unrolled longitude */ long lon_idx; long lon_nbr; /* [nbr] Number of longitudes in grid */ nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=True; /* Option O */ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_CREATE=rgr->flg_uio; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool WRT_TMP_FL=False; /* [flg] Write output to temporary file */ nco_bool flg_grd_1D=False; nco_bool flg_grd_2D=False; nco_bool flg_grd_crv=False; nco_bool flg_s2n=True; /* I [enm] Latitude grid-direction is South-to-North */ nco_grd_2D_typ_enm grd_typ; /* [enm] Grid-type enum */ nco_grd_lat_drc_enm lat_drc; /* [enm] Latitude grid-direction enum */ nco_grd_lat_typ_enm lat_typ; /* [enm] Latitude grid-type enum */ nco_grd_lon_typ_enm lon_typ; /* [enm] Longitude grid-type enum */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ dfl_lvl=rgr->dfl_lvl; grd_typ=rgr->grd_typ; /* [enm] Grid type */ fl_out=rgr->fl_grd; fl_out_fmt=rgr->fl_out_fmt; lat_drc=rgr->lat_drc; /* [enm] Latitude grid direction */ lat_typ=rgr->lat_typ; /* [enm] Latitude grid type */ lon_typ=rgr->lon_typ; /* [enm] Longitude grid type */ lat_nbr=rgr->lat_nbr; /* [nbr] Number of latitudes in grid */ lon_nbr=rgr->lon_nbr; /* [nbr] Number of longitudes in grid */ lat_crv=rgr->lat_crv; /* [dgr] Latitude curvilinearity */ lon_crv=rgr->lon_crv; /* [dgr] Longitude curvilinearity */ lat_sth=rgr->lat_sth; /* [dgr] Latitude of southern edge of grid */ lon_wst=rgr->lon_wst; /* [dgr] Longitude of western edge of grid */ lat_nrt=rgr->lat_nrt; /* [dgr] Latitude of northern edge of grid */ lon_est=rgr->lon_est; /* [dgr] Longitude of eastern edge of grid */ /* Use curvilinear coordinates (lat and lon are 2D arrays) if flg_crv already set or it lat_crv or lon_crv set */ if(lat_crv != 0.0 || lon_crv != 0.0 || rgr->flg_crv) flg_grd_crv=True; if(lat_drc == nco_grd_lat_drc_n2s) flg_s2n=False; /* Assume 2D grid */ flg_grd_2D=True; grd_rnk_nbr=dmn_nbr_2D; /* Assume quadrilaterals */ grd_crn_nbr=4; /* Assume rectangles */ bnd_nbr=2; col_nbr=lat_nbr*lon_nbr; grd_sz_nbr=lat_nbr*lon_nbr; /* Allocate space for output data */ area=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); dmn_sz_int=(int *)nco_malloc(grd_rnk_nbr*nco_typ_lng((nc_type)NC_INT)); msk=(int *)nco_malloc(grd_sz_nbr*nco_typ_lng((nc_type)NC_INT)); lat_bnd=(double *)nco_malloc(lat_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(lat_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lon_bnd=(double *)nco_malloc(lon_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(lon_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(lon_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); wgt_Gss=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); grd_ctr_lat=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_ctr_lon=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lat=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lon=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); /* Define variable values */ int lon_psn=int_CEWI; /* [idx] Ordinal position of longitude in rectangular grid dimension-size array */ int lat_psn=int_CEWI; /* [idx] Ordinal position of latitude in rectangular grid dimension-size array */ if(grd_rnk_nbr == dmn_nbr_2D){ lon_psn=0; /* SCRIP introduced [lon,lat] convention because more natural for Fortran */ lat_psn=1; } /* !flg_grd_in_2D */ dmn_sz_int[lon_psn]=lon_nbr; dmn_sz_int[lat_psn]=lat_nbr; for(idx=0;idx<grd_sz_nbr;idx++) msk[idx]=1; /* Compute rectangular arrays NB: Much is a more-generic rewrite of map/map_grd.F90:map_grd_mk() */ /* 20150827: Old rule: Longitude grid was entirely specified by one of four longitude map tokens: Grn_ctr,Grn_wst,180_ctr,180_wst New rule: User may specify bounds (lon_wst,lon_est,lat_sth,lat_nrt) independently of grid token Such bounds ALWAYS refer bounding box interface edges, NEVER to centers of first last gridcells Bounds and number of gridcells completely determine uniform grid so former longitude-type tokens have no effect when bounds specified (so letting grid-type tokens affect grid would over-determine grid and lead to errors) Hence, grid-type tokens may be used as short-hand to specify grids but may not be required to exist later (because regional grids would not have specified them) Grid grid-type tokens lon_bb/lat_bb imply bounding box was originally used to specify bounds 1x1 degree global grid with first longitude centered at Greenwich: --lon_nbr=360 --lon_typ Grn_ctr --lon_nbr=360 --lon_wst=-0.5 --lon_est=359.5 1x1 degree global grid with Greenwich at west edge of first longitude: --lon_nbr=360 --lon_typ Grn_wst --lon_nbr=360 --lon_wst=0.0 --lon_est=360.0 1x1 degree regional grid, total size 9x9 degrees, Greenwich at center of middle gridcell: --lon_nbr=9 --lon_wst=-4.5 --lon_est=4.5 1x1 degree regional grid, total size 10x10 degrees, Greenwich at east/west edges of middle two gridcells --lon_nbr=10 --lon_wst=-5.0 --lon_est=5.0 */ /* Were east/west longitude bounds set explicitly or implicitly? NB: This is redundant since it was done in nco_rgr_ini(), yet better safe than sorry */ if(lon_wst != NC_MAX_DOUBLE || lon_est != NC_MAX_DOUBLE) lon_typ=rgr->lon_typ=nco_grd_lon_bb; if(lon_wst == NC_MAX_DOUBLE){ /* Precomputed longitude grids begin with longitude 0.0 or -180.0 degrees */ switch(lon_typ){ case nco_grd_lon_bb: case nco_grd_lon_Grn_ctr: case nco_grd_lon_Grn_wst: lon_wst=0.0; break; case nco_grd_lon_180_ctr: case nco_grd_lon_180_wst: lon_wst=-180.0; break; default: nco_dfl_case_generic_err(); break; } /* !lon_typ */ } /* !lon */ if(lon_est == NC_MAX_DOUBLE){ /* Precomputed longitude grids end with longitude 360.0 or 180.0 degrees */ switch(lon_typ){ case nco_grd_lon_bb: case nco_grd_lon_Grn_ctr: case nco_grd_lon_Grn_wst: lon_est=360.0; break; case nco_grd_lon_180_ctr: case nco_grd_lon_180_wst: lon_est=180.0; break; default: nco_dfl_case_generic_err(); break; } /* !lon_typ */ } /* !lon */ /* Determine longitude increment from span of pre-centered bounding box (centering will not change span) */ lon_spn=lon_est-lon_wst; lon_ncr=lon_spn/lon_nbr; /* Centering: If user did not set explicit longitude bounds then... */ if(lon_typ != nco_grd_lon_bb) /* map_lon_ctr_typ determines whether lon_wst refers to cell center or Western edge */ if((lon_typ == nco_grd_lon_Grn_ctr) || (lon_typ == nco_grd_lon_180_ctr)) lon_wst=lon_wst-(lon_ncr/2.0); /* Re-derive lon_est from lon_wst and lon_nbr (more fundamental properties) */ lon_est=lon_wst+lon_ncr*lon_nbr; /* lon_wst and lon_est have been set and will not change */ assert(lon_wst < lon_est); lon_ntf[0L]=lon_wst; lon_ntf[lon_nbr]=lon_est; for(lon_idx=1L;lon_idx<lon_nbr;lon_idx++) lon_ntf[lon_idx]=lon_ntf[0L]+lon_idx*lon_ncr; /* Ensure rounding errors do not produce unphysical grid */ lon_ntf[lon_nbr]=lon_ntf[0L]+lon_spn; /* Finished with longitude, now tackle latitude */ /* Were south/north latitude bounds set explicitly or implicitly? */ // if(lat_sth != NC_MAX_DOUBLE || lat_nrt != NC_MAX_DOUBLE) lon_typ=rgr->lat_typ=nco_grd_lat_bb; if(lat_sth == NC_MAX_DOUBLE) lat_sth=-90.0; if(lat_nrt == NC_MAX_DOUBLE) lat_nrt=90.0; /* Determine latitude increment from span of pre-centered bounding box (centering will not change span) */ lat_spn=lat_nrt-lat_sth; lat_ncr=lat_spn/lat_nbr; const long lat_nbr_hlf=lat_nbr/2L; // [nbr] Half number of latitudes (e.g., lat_nbr_hlf=32 for lat_nbr=64 and 65) double *lat_sin=NULL; // [frc] Sine of Gaussian latitudes double precision /* Create S->N grid. If user requested N->S, flip grid at end */ // if(flg_s2n) lat_ntf[0L]=lat_sth; else lat_ntf[0L]=lat_nrt; lat_ntf[0L]=lat_sth; switch(lat_typ){ case nco_grd_lat_fv: lat_ncr=lat_spn/(lat_nbr-1L); lat_ntf[1L]=lat_ntf[0L]+0.5*lat_ncr; for(lat_idx=2L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=lat_ntf[1L]+(lat_idx-1L)*lat_ncr; break; case nco_grd_lat_eqa: lat_ncr=lat_spn/lat_nbr; for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=lat_ntf[0L]+lat_idx*lat_ncr; break; case nco_grd_lat_gss: lat_sin=(double *)nco_malloc(lat_nbr*sizeof(double)); (void)nco_lat_wgt_gss(lat_nbr,True,lat_sin,wgt_Gss); for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]); /* First guess for lat_ntf is midway between Gaussian abscissae */ for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=0.5*(lat_ctr[lat_idx-1L]+lat_ctr[lat_idx]); /* Iterate guess until area between interfaces matches Gaussian weight (compute for one hemisphere, make other symmetric) */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++){ double fofx_at_x0; /* [frc] Function to iterate evaluated at current guess */ double dfdx_at_x0; /* [frc] Derivative of equation evaluated at current guess */ const double eps_rlt_cnv=1.0e-15; // Convergence criterion (1.0e-16 pushes double precision to the brink) itr_cnt=0; lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; while(fabs(fofx_at_x0) > eps_rlt_cnv){ /* Newton-Raphson iteration: Let x=lat_ntf[lat_idx], y0=lat_ntf[lat_idx-1L], gw = Gaussian weight (exact solution) f(x)=sin(dgr2rdn*x)-sin(dgr2rdn*y0)-gw=0 # s2n grid f(x)=sin(dgr2rdn*y0)-sin(dgr2rdn*x)-gw=0 # n2s grid dfdx(x)= dgr2rdn*cos(dgr2rdn*x) # s2n grid dfdx(x)=-dgr2rdn*cos(dgr2rdn*x) # n2s grid x_better=x0-f(x0)/f'(x0) */ dfdx_at_x0=dgr2rdn*cos(dgr2rdn*lat_ntf[lat_idx]); /* 20190613: n2s latitudes are constructed s2n and flipped to n2s later Hence next line is commented-out in construction mode but used in infer mode */ // if(!flg_s2n) dfdx_at_x0=-dfdx_at_x0; lat_ntf[lat_idx]+=fofx_at_x0/dfdx_at_x0; /* NB: not sure why this is minus not plus but it works :) */ lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; if(++itr_cnt > itr_nbr_max){ (void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %ld\n",nco_prg_nm_get(),fnc_nm,fabs(fofx_at_x0),itr_nbr_max,lat_idx); nco_exit(EXIT_FAILURE); } /* endif */ } /* !while */ } /* !lat_idx */ /* Use Gaussian grid symmetry to obtain same interfaces in both hemispheres (avoids cumulative rounding errors) */ if(lat_nbr%2){ /* lat_nbr is odd */ for(lat_idx=1L;lat_idx<=lat_nbr_hlf+1L;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx+1L]; }else{ /* lat_nbr is even */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx]; } /* !flg_lat_evn */ break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ /* Ensure rounding errors do not produce unphysical grid */ lat_ntf[lat_nbr]=lat_nrt; if(nco_dbg_lvl_get() > nco_dbg_old){ (void)fprintf(stderr,"%s: DEBUG %s Gaussian abscissae/interfaces for lat_nbr=%ld\n",nco_prg_nm_get(),fnc_nm,lat_nbr); (void)fprintf(stderr,"idx\tlat_ctr\tlat_ntf\tntf_p1\n"); for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ (void)fprintf(stderr,"%ld\t%20.15f\t%20.15f\t%20.15f\n",lat_idx,lat_ctr[lat_idx],lat_ntf[lat_idx],lat_ntf[lat_idx+1L]); } /* !lat_idx */ } /* !dbg */ /* Always define longitude centers midway between interfaces */ for(lon_idx=0L;lon_idx<=lon_nbr-1L;lon_idx++) lon_ctr[lon_idx]=0.5*(lon_ntf[lon_idx]+lon_ntf[lon_idx+1L]); /* Many grids have center latitude equally spaced between interfaces */ if(lat_typ != nco_grd_lat_fv && lat_typ != nco_grd_lat_gss){ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=0.5*(lat_ntf[lat_idx]+lat_ntf[lat_idx+1L]); } /* !lat_typ */ /* Cap grids excepted---they place centers of first/last gridcells at poles */ if(lat_typ == nco_grd_lat_fv){ lat_ctr[0L]=lat_ntf[0L]; for(lat_idx=1L;lat_idx<lat_nbr-1L;lat_idx++) lat_ctr[lat_idx]=0.5*(lat_ntf[lat_idx]+lat_ntf[lat_idx+1L]); lat_ctr[lat_nbr-1L]=lat_ntf[lat_nbr]; } /* !cap */ /* Gaussian grid centerpoints are defined by solutions to Legendre polynomials */ if(lat_typ == nco_grd_lat_gss){ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]); } /* !Gaussian */ for(idx=0L;idx<lon_nbr;idx++){ lon_bnd[2*idx]=lon_ntf[idx]; lon_bnd[2*idx+1L]=lon_ntf[idx+1L]; } /* !idx */ for(idx=0L;idx<lat_nbr;idx++){ lat_bnd[2*idx]=lat_ntf[idx]; lat_bnd[2*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0L;idx<lat_nbr;idx++){ (void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr[idx]); for(int bnd_idx=0L;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", "); } /* end loop over lat */ } /* endif dbg */ /* Use centers and boundaries to diagnose latitude weights */ switch(lat_typ){ case nco_grd_lat_eqa: case nco_grd_lat_fv: for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx])); break; case nco_grd_lat_gss: for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=wgt_Gss[lat_idx]; break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ /* Fuzzy test of latitude weight normalization 20180903 Tolerance threshold of eps_rlt_max=1.0e-14 is too strict for Gaussian grids somewhere lat_nbr >~ 150 20180904 Tolerance threshold of eps_rlt_max=1.0e-12 allows Gaussian grids like ECMWF O1280 Newton-Raphson method of interface determination may need improvement to fix that Tolerance threshold of 1.0e-14 works for all relevant E3SM Uniform and Cap grids */ //const double eps_rlt_max=1.0e-14; /* [frc] Round-off error tolerance: Used 1.0e-14 until 20180904 */ const double eps_rlt_max=1.0e-12; /* [frc] Round-off error tolerance: Used 1.0e-12 since 20180904 */ lat_wgt_ttl=0.0; for(idx=0L;idx<lat_nbr;idx++) lat_wgt_ttl+=lat_wgt[idx]; if(grd_typ == nco_grd_2D_fv || grd_typ == nco_grd_2D_eqa){ double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */ lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd[2*(lat_nbr-1)+1L])-sin(dgr2rdn*lat_bnd[0L])); if(fabs(1.0-lat_wgt_ttl/lat_wgt_ttl_xpc) > eps_rlt_max){ (void)fprintf(stdout,"%s: ERROR %s reports grid normalization does not meet precision tolerance eps_rlt_max = %20.15f\nlat_wgt_ttl = %20.15f, lat_wgt_ttl_xpc = %20.15f, lat_wgt_frc = %20.15f, eps_rlt = %20.15f\n",nco_prg_nm_get(),fnc_nm,eps_rlt_max,lat_wgt_ttl,lat_wgt_ttl_xpc,lat_wgt_ttl/lat_wgt_ttl_xpc,1.0-lat_wgt_ttl/lat_wgt_ttl_xpc); nco_exit(EXIT_FAILURE); } /* !imprecise */ } /* !nco_grd_lat_eqa, !nco_grd_lat_fv */ /* 20180831 Code above assumes grids run S->N User can request N->S grids with --rgr lat_drc=n2s If so, flip grid before unrolling into output arrays */ if(!flg_s2n){ double *lat_ctr_tmp=NULL_CEWI; /* [dgr] Temporary Latitude centers of rectangular grid */ double *lat_wgt_tmp=NULL; /* [dgr] Temporary Latitude weights of rectangular grid */ double *lat_ntf_tmp=NULL; /* [dgr] Temporary Latitude interfaces of rectangular grid */ lat_ctr_tmp=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lat_ntf_tmp=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt_tmp=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); long tmp_idx; /* [idx] Temporary index for swapping values */ for(idx=0L;idx<lat_nbr;idx++){ lat_ctr_tmp[idx]=lat_ctr[idx]; lat_wgt_tmp[idx]=lat_wgt[idx]; } /* !idx */ for(idx=0L;idx<lat_nbr;idx++){ tmp_idx=lat_nbr-idx-1L; lat_ctr[idx]=lat_ctr_tmp[tmp_idx]; lat_wgt[idx]=lat_wgt_tmp[tmp_idx]; } /* !idx */ for(idx=0L;idx<lat_nbr+1L;idx++){ lat_ntf_tmp[idx]=lat_ntf[idx]; } /* !idx */ for(idx=0L;idx<lat_nbr+1L;idx++){ tmp_idx=lat_nbr+1L-idx-1L; /* NB: Subtle index difference */ lat_ntf[idx]=lat_ntf_tmp[tmp_idx]; } /* !idx */ for(idx=0L;idx<lat_nbr;idx++){ lat_bnd[2*idx]=lat_ntf[idx]; lat_bnd[2*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ if(lat_ctr_tmp) lat_ctr_tmp=(double *)nco_free(lat_ctr_tmp); if(lat_ntf_tmp) lat_ntf_tmp=(double *)nco_free(lat_ntf_tmp); if(lat_wgt_tmp) lat_wgt_tmp=(double *)nco_free(lat_wgt_tmp); } /* !flg_s2n */ assert(grd_crn_nbr == 4); for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){ idx=grd_crn_nbr*lon_idx; lon_crn[idx]=lon_ntf[lon_idx]; lon_crn[idx+1L]=lon_ntf[lon_idx+1L]; lon_crn[idx+2L]=lon_ntf[lon_idx+1L]; lon_crn[idx+3L]=lon_ntf[lon_idx]; } /* !lon_idx */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ idx=grd_crn_nbr*lat_idx; lat_crn[idx]=lat_ntf[lat_idx]; lat_crn[idx+1L]=lat_ntf[lat_idx]; lat_crn[idx+2L]=lat_ntf[lat_idx+1L]; lat_crn[idx+3L]=lat_ntf[lat_idx+1L]; } /* !lat_idx */ /* Stuff rectangular arrays into unrolled arrays */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){ idx=lat_idx*lon_nbr+lon_idx; grd_ctr_lat[idx]=lat_ctr[lat_idx]; grd_ctr_lon[idx]=lon_ctr[lon_idx]; for(crn_idx=0L;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; lat_idx2=lat_idx*grd_crn_nbr+crn_idx; lon_idx2=lon_idx*grd_crn_nbr+crn_idx; grd_crn_lat[idx2]=lat_crn[lat_idx2]; grd_crn_lon[idx2]=lon_crn[lon_idx2]; } /* !crn */ } /* !lon */ } /* !lat */ if(flg_grd_crv){ /* Impose curvilinearity by adding lon_crv offset to each row relative to previous row, and lat_crv offset to each column relative to previous column */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++){ idx=lat_idx*lon_nbr+lon_idx; grd_ctr_lat[idx]+=lon_idx*lat_crv; grd_ctr_lon[idx]+=lat_idx*lon_crv; for(crn_idx=0L;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; lat_idx2=lat_idx*grd_crn_nbr+crn_idx; lon_idx2=lon_idx*grd_crn_nbr+crn_idx; grd_crn_lat[idx2]=lat_crn[lat_idx2]; grd_crn_lon[idx2]=lon_crn[lon_idx2]; if(crn_idx == 0L || crn_idx == 1L){ grd_crn_lat[idx2]+=lat_idx*lat_crv; /* LL, LR */ grd_crn_lon[idx2]+=lat_idx*lon_crv; /* LL, LR */ }else if(crn_idx == 2L || crn_idx == 3L){ grd_crn_lat[idx2]+=(lat_idx+1L)*lat_crv; /* UL, UR */ grd_crn_lon[idx2]+=(lat_idx+1L)*lon_crv; /* UL, UR */ } /* !crn */ } /* !crn */ } /* !lon */ } /* !lat */ } /* !flg_grd_crv */ /* 20190613: Convert CW quadrilaterals to CCW quadrilaterals so TempestRemap accepts grids Default construction/inferral method orders corners CCW and CW for s2n and n2s grids, respectively */ if(!flg_s2n){ nco_bool flg_ccw; /* [flg] Gridcell is CCW */ const int rcr_lvl=1; /* [nbr] Recursion level (1 is top level, 2 and greater are recursed */ const int idx_ccw=0; /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */ for(idx=0L;idx<grd_sz_nbr;idx++){ idx2=grd_crn_nbr*idx; flg_ccw=nco_ccw_chk(grd_crn_lat+idx2,grd_crn_lon+idx2,grd_crn_nbr,idx_ccw,rcr_lvl); if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_vec) (void)fprintf(stderr,"%s: DEBUG %s reports nco_ccw_chk() tried to change idx = %lu from CW to CCW\n",nco_prg_nm_get(),fnc_nm,idx); } /* !idx */ } /* !flg_s2n */ if(nco_dbg_lvl_get() >= nco_dbg_std){ long int idx_crn_ll; long int idx_crn_lr; long int idx_crn_ur; long int idx_crn_ul; long idx_dbg; idx_dbg=rgr->idx_dbg; idx_crn_ll=grd_crn_nbr*idx_dbg+0L; idx_crn_lr=grd_crn_nbr*idx_dbg+1L; idx_crn_ur=grd_crn_nbr*idx_dbg+2L; idx_crn_ul=grd_crn_nbr*idx_dbg+3L; (void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,grd_ctr_lat[idx_dbg],grd_ctr_lon[idx_dbg],grd_crn_lat[idx_crn_ll],grd_crn_lon[idx_crn_ll],grd_crn_lat[idx_crn_lr],grd_crn_lon[idx_crn_lr],grd_crn_lat[idx_crn_ur],grd_crn_lon[idx_crn_ur],grd_crn_lat[idx_crn_ul],grd_crn_lon[idx_crn_ul]); } /* !dbg */ if(flg_grd_crv){ /* Area of arbitrary curvilinear grids requires spherical trigonometry */ nco_sph_plg_area(rgr,grd_crn_lat,grd_crn_lon,grd_sz_nbr,grd_crn_nbr,area); }else{ /* Area of rectangular spherical zones from elementary calculus results 20150906: Half-angle formulae for better conditioning improve area normalization for 801x1600 by 2.0e-15 area[lat_idx*lon_nbr+lon_idx]=dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*2.0*(sin(0.5*dgr2rdn*lat_bnd[2*lat_idx+1L])*cos(0.5*dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(0.5*dgr2rdn*lat_bnd[2*lat_idx])*cos(0.5*dgr2rdn*lat_bnd[2*lat_idx])); Gain not worth the extra complexity */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++) /* fabs() ensures positive area in n2s grids */ area[lat_idx*lon_nbr+lon_idx]=fabs(dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*(sin(dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(dgr2rdn*lat_bnd[2*lat_idx]))); } /* !flg_grd_2D */ if(nco_dbg_lvl_get() >= nco_dbg_sbr){ lat_wgt_ttl=0.0; area_ttl=0.0; if(flg_grd_2D){ (void)fprintf(stderr,"%s: INFO %s reports destination rectangular latitude grid:\n",nco_prg_nm_get(),fnc_nm); for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_wgt_ttl+=lat_wgt[lat_idx]; } /* !flg_grd_2D */ for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0L;lon_idx<lon_nbr;lon_idx++) area_ttl+=area[lat_idx*lon_nbr+lon_idx]; (void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_ttl,area_ttl/(4.0*M_PI)); assert(area_ttl > 0.0); assert(area_ttl <= 4.0*M_PI); } /* endif dbg */ /* Open grid file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Define dimensions */ rcd=nco_def_dim(out_id,grd_crn_nm,grd_crn_nbr,&dmn_id_grd_crn); rcd=nco_def_dim(out_id,grd_sz_nm,grd_sz_nbr,&dmn_id_grd_sz); rcd=nco_def_dim(out_id,grd_rnk_nm,grd_rnk_nbr,&dmn_id_grd_rnk); int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; /* Define variables */ (void)nco_def_var(out_id,dmn_sz_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_rnk,&dmn_sz_int_id); /* NB: Too small to deflate */ (void)nco_def_var(out_id,grd_area_nm,(nc_type)crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,msk_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_sz,&msk_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lat_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lon_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lon_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_grd_sz; dmn_ids[1]=dmn_id_grd_crn; (void)nco_def_var(out_id,grd_crn_lat_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lat_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_grd_sz; dmn_ids[1]=dmn_id_grd_crn; (void)nco_def_var(out_id,grd_crn_lon_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lon_id,shuffle,deflate,dfl_lvl); /* Define global and "units" attributes */ char *att_val; rcd=nco_char_att_put(out_id,NULL,"title",rgr->grd_ttl); rcd=nco_char_att_put(out_id,NULL,"Conventions","SCRIP"); const char usr_cpp[]=TKN2SNG(USER); /* [sng] Hostname from C pre-processor */ rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO"); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ)); rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ)); rcd=nco_char_att_put(out_id,dmn_sz_nm,"long_name","Size(s) of horizontal dimensions (in Fortran storage order for historical reasons)"); rcd=nco_char_att_put(out_id,grd_area_nm,"long_name","Solid Angle Subtended on Source Grid"); rcd=nco_char_att_put(out_id,grd_area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,grd_area_nm,"units","steradian"); rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"standard_name","latitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"bounds",grd_crn_lat_nm); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"standard_name","longitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"bounds",grd_crn_lon_nm); rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"long_name","Latitude of Grid Cell Vertices"); rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"standard_name","latitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"long_name","Longitude of Grid Cell Vertices"); rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"standard_name","longitude"); if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees"); /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ rcd=nco_char_att_put(out_id,msk_nm,"long_name","Binary Integer Mask for Grid"); rcd=nco_char_att_put(out_id,msk_nm,"units","none"); /* Begin data mode */ (void)nco_enddef(out_id); /* Write variables */ dmn_srt[0]=0L; dmn_cnt[0]=grd_rnk_nbr; rcd=nco_put_vara(out_id,dmn_sz_int_id,dmn_srt,dmn_cnt,dmn_sz_int,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,msk_id,dmn_srt,dmn_cnt,msk,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ); dmn_srt[0]=0L; dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lat_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ); dmn_srt[0]=0L; dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lon_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ); /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); fl_out=rgr->fl_skl; if(fl_out){ /* Write skeleton data file on requested grid Skeleton file can then be populated with data for testing */ char *area_nm; char *bnd_nm; // char *bnd_tm_nm; char *col_nm_out; char *lat_nm_out; /* [sng] Name of output dimension for latitude */ char *lat_wgt_nm; char *lon_nm_out; /* [sng] Name of variable to recognize as longitude */ char *lat_bnd_nm; /* [sng] Name of latitude boundary variable */ char *lon_bnd_nm; /* [sng] Name of longitude boundary variable */ // int area_id; /* [id] Variable ID for area */ int dmn_id_bnd; /* [id] Dimension ID */ //int dmn_id_bnd_tm; /* [id] Dimension ID */ int dmn_id_col; /* [id] Dimension ID */ int dmn_id_lat; /* [id] Dimension ID */ int dmn_id_lon; /* [id] Dimension ID */ int lat_bnd_id; /* [id] Variable ID for lat_bnds/lat_vertices */ int lat_id; /* [id] Variable ID for latitude */ int lat_wgt_id; /* [id] Variable ID for latitude weight */ int lon_bnd_id; /* [id] Variable ID for lon_bnds/lon_vertices */ int lon_id; /* [id] Variable ID for longitude */ /* Use explicitly specified output names, if any, otherwise use input names (either explicitly specified or discovered by fuzzing) */ if(rgr->lat_nm_out) lat_nm_out=rgr->lat_nm_out; else lat_nm_out=(char *)strdup("lat"); if(rgr->lon_nm_out) lon_nm_out=rgr->lon_nm_out; else lon_nm_out=(char *)strdup("lon"); if(rgr->col_nm_out) col_nm_out=rgr->col_nm_out; else col_nm_out=(char *)strdup("ncol"); /* Name output dimensions */ area_nm=rgr->area_nm; bnd_nm=rgr->bnd_nm; //bnd_tm_nm=rgr->bnd_tm_nm; lat_bnd_nm=rgr->lat_bnd_nm; lat_wgt_nm=rgr->lat_wgt_nm; lon_bnd_nm=rgr->lon_bnd_nm; /* Use names discovered by fuzzing */ if(flg_grd_1D){ bnd_nm=rgr->vrt_nm; lat_bnd_nm=rgr->lat_vrt_nm; lon_bnd_nm=rgr->lon_vrt_nm; } /* !flg_grd_1D */ if(flg_grd_2D){ bnd_nm=rgr->bnd_nm; lat_bnd_nm=rgr->lat_bnd_nm; lon_bnd_nm=rgr->lon_bnd_nm; } /* !flg_grd_2D */ /* Open grid file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Define dimensions */ if(flg_grd_crv){ rcd=nco_def_dim(out_id,bnd_nm,grd_crn_nbr,&dmn_id_bnd); }else{ rcd=nco_def_dim(out_id,bnd_nm,bnd_nbr,&dmn_id_bnd); } /* !flg_grd_crv */ if(flg_grd_1D){ rcd=nco_def_dim(out_id,col_nm_out,col_nbr,&dmn_id_col); } /* !flg_grd_1D */ if(flg_grd_2D){ rcd=nco_def_dim(out_id,lat_nm_out,lat_nbr,&dmn_id_lat); rcd=nco_def_dim(out_id,lon_nm_out,lon_nbr,&dmn_id_lon); } /* !flg_grd_2D */ /* Define new coordinates and variables in regridded file */ if(flg_grd_1D){ (void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_col,&lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_col,&lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_col; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_col; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_1D,&dmn_id_col,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); } /* !flg_grd_1D */ if(flg_grd_crv){ dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_lon; (void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_2D,dmn_ids,&lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_2D,dmn_ids,&lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_2D,dmn_ids,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_lon; dmn_ids[2]=dmn_id_bnd; (void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_3D,dmn_ids,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_3D,dmn_ids,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); }else if(flg_grd_2D){ (void)nco_def_var(out_id,lat_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_lat,&lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lon_nm_out,crd_typ,dmn_nbr_1D,&dmn_id_lon,&lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lat_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lat_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lon; dmn_ids[1]=dmn_id_bnd; (void)nco_def_var(out_id,lon_bnd_nm,crd_typ,dmn_nbr_2D,dmn_ids,&lon_bnd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,lat_wgt_nm,crd_typ,dmn_nbr_1D,&dmn_id_lat,&lat_wgt_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_wgt_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_lat; dmn_ids[1]=dmn_id_lon; (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_2D,dmn_ids,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); } /* !flg_grd_2D */ /* Define attributes */ rcd=nco_char_att_put(out_id,NULL,"title",rgr->grd_ttl); rcd=nco_char_att_put(out_id,NULL,"Conventions","CF-1.6"); rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ)); rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ)); rcd=nco_char_att_put(out_id,area_nm,"long_name","Solid angle subtended by gridcell"); rcd=nco_char_att_put(out_id,area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm,"units","steradian"); char *crd_val_sng; /* CF-standard coordinates values string */ size_t crd_val_sng_lng=strlen(lat_nm_out)+strlen(lon_nm_out)+1L; crd_val_sng=(char *)nco_malloc(crd_val_sng_lng*sizeof(char)+1L); (void)sprintf(crd_val_sng,"%s %s",lat_nm_out,lon_nm_out); rcd=nco_char_att_put(out_id,area_nm,"coordinates",crd_val_sng); if(crd_val_sng) crd_val_sng=(char *)nco_free(crd_val_sng); rcd=nco_char_att_put(out_id,lat_nm_out,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lat_nm_out,"standard_name","latitude"); rcd=nco_char_att_put(out_id,lat_nm_out,"units","degrees_north"); rcd=nco_char_att_put(out_id,lat_nm_out,"axis","Y"); rcd=nco_char_att_put(out_id,lat_nm_out,"bounds",lat_bnd_nm); if(flg_grd_2D) att_val=strdup("Gridcell latitude interfaces"); else att_val=strdup("Gridcell latitude vertices"); rcd=nco_char_att_put(out_id,lat_bnd_nm,"long_name",att_val); if(flg_grd_2D) rcd=nco_char_att_put(out_id,lat_wgt_nm,"long_name","Latitude quadrature weights (normalized to sum to 2.0 on global grids)"); rcd=nco_char_att_put(out_id,lon_nm_out,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,lon_nm_out,"standard_name","longitude"); rcd=nco_char_att_put(out_id,lon_nm_out,"units","degrees_east"); rcd=nco_char_att_put(out_id,lon_nm_out,"axis","X"); rcd=nco_char_att_put(out_id,lon_nm_out,"bounds",lon_bnd_nm); if(flg_grd_2D) att_val=strdup("Gridcell longitude interfaces"); else att_val=strdup("Gridcell longitude vertices"); rcd=nco_char_att_put(out_id,lon_bnd_nm,"long_name",att_val); /* Begin data mode */ (void)nco_enddef(out_id); /* Write new coordinates and variables to regridded file */ if(flg_grd_1D){ dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; (void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; (void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=col_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; (void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); } /* !flg_grd_1D */ if(flg_grd_crv){ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; (void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ); (void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ); (void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); dmn_srt[0]=dmn_srt[1]=0L;dmn_srt[2]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; dmn_cnt[2]=grd_crn_nbr; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ); (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ); }else if(flg_grd_2D){ dmn_srt[0]=0L; dmn_cnt[0]=lat_nbr; (void)nco_put_vara(out_id,lat_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=lon_nbr; (void)nco_put_vara(out_id,lon_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=lat_nbr; (void)nco_put_vara(out_id,lat_wgt_id,dmn_srt,dmn_cnt,lat_wgt,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lon_nbr; dmn_cnt[1]=bnd_nbr; (void)nco_put_vara(out_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; (void)nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); } /* !flg_grd_2D */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); } /* !fl_out */ /* Free memory associated with input file */ if(dmn_sz_int) dmn_sz_int=(int *)nco_free(dmn_sz_int); if(msk) msk=(int *)nco_free(msk); if(area) area=(double *)nco_free(area); if(grd_ctr_lat) grd_ctr_lat=(double *)nco_free(grd_ctr_lat); if(grd_ctr_lon) grd_ctr_lon=(double *)nco_free(grd_ctr_lon); if(grd_crn_lat) grd_crn_lat=(double *)nco_free(grd_crn_lat); if(grd_crn_lon) grd_crn_lon=(double *)nco_free(grd_crn_lon); if(lat_bnd) lat_bnd=(double *)nco_free(lat_bnd); if(lat_crn) lat_crn=(double *)nco_free(lat_crn); if(lat_ctr) lat_ctr=(double *)nco_free(lat_ctr); if(lat_ntf) lat_ntf=(double *)nco_free(lat_ntf); if(lat_sin) lat_sin=(double *)nco_free(lat_sin); if(lat_wgt) lat_wgt=(double *)nco_free(lat_wgt); if(lon_bnd) lon_bnd=(double *)nco_free(lon_bnd); if(lon_crn) lon_crn=(double *)nco_free(lon_crn); if(lon_ctr) lon_ctr=(double *)nco_free(lon_ctr); if(lon_ntf) lon_ntf=(double *)nco_free(lon_ntf); if(wgt_Gss) wgt_Gss=(double *)nco_free(wgt_Gss); return rcd; } /* !nco_grd_mk() */ int /* O [enm] Return code */ nco_grd_nfr /* [fnc] Infer SCRIP-format grid file from input data file */ (rgr_sct * const rgr) /* I/O [sct] Regridding structure */ { /* Purpose: Use grid information and guesswork to create SCRIP-format grid file from input data file Test curvilinear grids: ncks -O -D 1 --rgr infer --rgr grid=${DATA}/sld/rgr/grd_airs.nc ${DATA}/sld/raw/AIRS.2014.10.01.202.L2.TSurfStd.Regrid010.1DLatLon.nc ~/foo.nc ncks -O -D 1 --rgr infer --rgr grid=${DATA}/sld/rgr/grd_airs.nc ${DATA}/sld/raw/AIRS.2014.10.01.202.L2.TSurfStd.Regrid010.1DLatLon.hole.nc ~/foo.nc */ const char fnc_nm[]="nco_grd_nfr()"; /* [sng] Function name */ const double rdn2dgr=180.0/M_PI; const double dgr2rdn=M_PI/180.0; const int dmn_nbr_0D=0; /* [nbr] Rank of 0-D grid variables */ const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ const int dmn_nbr_grd_max=4; /* [nbr] Maximum rank of grid variables (msk_[src/dst] could be rank 4) */ const int itr_nbr_max=20; // [nbr] Maximum number of iterations const int idx_ccw=0; /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */ const int rcr_lvl=1; /* [nbr] Recursion level (1 is top level, 2 and greater are recursed */ const nc_type crd_typ=NC_DOUBLE; char *area_nm_in=NULL; char *fl_in; char *fl_out; char *fl_out_tmp=NULL_CEWI; char *fl_pth_lcl=NULL; char *msk_nm_in=NULL; char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */ /* SCRIP-format grid names are non-negotiable and thus fixed not dynamic */ char area_nm[]="grid_area"; /* 20150830: NB ESMF_RegridWeightGen --user_areas looks for variable named "grid_area" */ char dmn_sz_nm[]="grid_dims"; char grd_crn_lat_nm[]="grid_corner_lat"; char grd_crn_lon_nm[]="grid_corner_lon"; char grd_crn_nm[]="grid_corners"; char grd_ctr_lat_nm[]="grid_center_lat"; char grd_ctr_lon_nm[]="grid_center_lon"; char grd_rnk_nm[]="grid_rank"; char grd_sz_nm[]="grid_size"; char msk_nm[]="grid_imask"; char unt_sng[]="units"; /* netCDF-standard units attribute name */ double *grd_ctr_lat; /* [dgr] Latitude centers of grid */ double *grd_ctr_lon; /* [dgr] Longitude centers of grid */ double *grd_crn_lat; /* [dgr] Latitude corners of grid */ double *grd_crn_lon; /* [dgr] Longitude corners of grid */ double *area; /* [sr] Area of grid */ double *lat_bnd=NULL_CEWI; /* [dgr] Latitude boundaries of rectangular grid */ double *lat_crn=NULL; /* [dgr] Latitude corners of rectangular grid */ double *lat_ctr=NULL_CEWI; /* [dgr] Latitude centers of rectangular grid */ double *lat_ntf=NULL; /* [dgr] Latitude interfaces of rectangular grid */ double *lat_wgt=NULL; /* [dgr] Latitude weights of rectangular grid */ double *lon_bnd=NULL_CEWI; /* [dgr] Longitude boundaries of rectangular grid */ double *lon_crn=NULL; /* [dgr] Longitude corners of rectangular grid */ double *lon_ctr=NULL_CEWI; /* [dgr] Longitude centers of rectangular grid */ double *lon_ntf=NULL; /* [dgr] Longitude interfaces of rectangular grid */ double *vrt_lat=NULL; /* [rdn] MPAS latitude boundary variable latVertex */ double *vrt_lon=NULL; /* [rdn] MPAS longitude boundary variable lonVertex */ double area_ttl=0.0; /* [frc] Exact sum of area */ //double lat_nrt; /* [dgr] Latitude of northern edge of grid */ double lat_sth; /* [dgr] Latitude of southern edge of grid */ double lat_wgt_ttl=0.0; /* [frc] Actual sum of quadrature weights */ double lat_wgt_gss; /* [frc] Latitude weight estimated from interface latitudes */ // double lon_est; /* [dgr] Longitude of eastern edge of grid */ double lon_wst; /* [dgr] Longitude of western edge of grid */ double lon_ncr; /* [dgr] Longitude increment */ double lat_ncr; /* [dgr] Latitude increment */ double lon_spn; /* [dgr] Longitude span */ double lat_spn; /* [dgr] Latitude span */ double mss_val_area_dbl; double mss_val_ctr_dbl; double mss_val_msk_dbl; int *msk=NULL; /* [flg] Mask of grid */ int *vrt_cll=NULL; /* [enm] MPAS variable verticesOnCell */ int *dmn_sz_int; /* [nbr] Array of dimension sizes of grid */ int dmn_ids[dmn_nbr_grd_max]; /* [id] Dimension IDs array for output variable */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int dmn_idx; /* [idx] Dimension index */ int fl_out_fmt=NC_FORMAT_CLASSIC; /* [enm] Output file format */ int in_id; /* I [id] Input netCDF file ID */ int md_open; /* [enm] Mode flag for nc_open() call */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int area_id=NC_MIN_INT; /* [id] Area variable ID */ int dmn_id_grd_crn; /* [id] Grid corners dimension ID */ int dmn_id_grd_rnk; /* [id] Grid rank dimension ID */ int dmn_id_grd_sz; /* [id] Grid size dimension ID */ int dmn_sz_int_id; /* [id] Grid dimension sizes ID */ int grd_crn_lat_id; /* [id] Grid corner latitudes variable ID */ int grd_crn_lon_id; /* [id] Grid corner longitudes variable ID */ int grd_ctr_lat_id; /* [id] Grid center latitudes variable ID */ int grd_ctr_lon_id; /* [id] Grid center longitudes variable ID */ int itr_cnt; /* Iteration counter */ int lat_rnk; /* [nbr] Rank of latitude coordinate */ int lon_rnk; /* [nbr] Rank of longitude coordinate */ int lat_ctr_id=NC_MIN_INT; /* [id] Latitude centers of rectangular grid variable ID */ int lon_ctr_id=NC_MIN_INT; /* [id] Longitude centers of rectangular grid variable ID */ int lat_bnd_id=NC_MIN_INT; /* [id] Latitude centers of rectangular grid variable ID */ int lon_bnd_id=NC_MIN_INT; /* [id] Longitude centers of rectangular grid variable ID */ int msk_id=NC_MIN_INT; /* [id] Mask variable ID */ int msk_rnk_nbr; /* [id] Mask rank */ int mss_val_int_out=NC_MIN_INT; /* [nbr] Value that can be non-erroneously pointed to */ int val_two=2; /* [nbr] Value that can be non-erroneously pointed to */ int val_zero=0; /* [nbr] Value that can be non-erroneously pointed to */ int var_id; /* [id] Current variable ID */ int vrt_cll_id=NC_MIN_INT; /* [id] MPAS variable verticesOnCell ID */ int vrt_lat_id=NC_MIN_INT; /* [id] MPAS latitude boundary variable latVertex ID */ int vrt_lon_id=NC_MIN_INT; /* [id] MPAS longitude boundary variable lonVertex ID */ long dmn_srt[dmn_nbr_grd_max]; long dmn_cnt[dmn_nbr_grd_max]; long bnd_idx; long bnd_nbr=NC_MIN_INT; /* [nbr] Number of bounds in gridcell */ long col_idx; long col_nbr; /* [nbr] Number of columns in grid */ long crn_idx; /* [idx] Counting index for corners */ long ttl_idx; /* [idx] Total (unrolled) counting index for grid+corners */ long dmn_sz; /* [nbr] Size of current dimension */ long grd_crn_nbr; /* [nbr] Number of corners in gridcell */ long grd_rnk_nbr=int_CEWI; /* [nbr] Number of dimensions in grid */ long grd_sz_nbr; /* [nbr] Number of gridcells in grid */ long idx2; /* [idx] Counting index for unrolled grids */ long idx; /* [idx] Counting index for unrolled grids */ long idx_crn; long idx_ctr; long idx_fst; /* [idx] Index offset */ long idx_tmp; /* [idx] Temporary index */ long lat_idx2; /* [idx] Counting index for unrolled latitude */ long lat_idx; long lat_nbr; /* [nbr] Number of latitudes in grid */ long lon_idx2; /* [idx] Counting index for unrolled longitude */ long lon_idx; long lon_nbr; /* [nbr] Number of longitudes in grid */ long vrt_idx; /* [idx] Counting index for vertices */ long vrt_nbr; /* [nbr] Number of vertices in MPAS grid */ long int idx_crn_ll; long int idx_crn_lr; long int idx_crn_ur; long int idx_crn_ul; nco_bool FL_RTR_RMT_LCN; nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=True; /* Option O */ nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_CREATE=rgr->flg_uio; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool WRT_TMP_FL=False; /* [flg] Write output to temporary file */ nco_bool flg_1D_mpas_bnd=False; /* [flg] Unstructured input grid with MPAS bounds */ nco_bool flg_1D_psd_rct_bnd=False; /* [flg] Unstructured input grid with pseudo-rectangular bounds */ nco_bool flg_ccw; /* [flg] Gridcell is CCW */ nco_bool flg_grd_1D=False; nco_bool flg_grd_2D=False; nco_bool flg_grd_crv=False; nco_bool flg_s2n=True; /* [enm] Latitude grid-direction is South-to-North */ nco_bool flg_wrt_crn=True; nco_bool flg_crn_grd_lat_lon=False; /* [flg] Curvilinear corner array ordered non-canonically as grd_nbr,lat_nbr,lon_nbr */ nco_bool use_mss_val_area=False; nco_bool has_mss_val_area=False; nco_bool has_mss_val_bnd=False; nco_bool has_mss_val_ctr=False; nco_bool has_mss_val_msk=False; nco_grd_2D_typ_enm grd_typ; /* [enm] Grid-type enum */ nco_grd_lat_typ_enm lat_typ; /* [enm] Latitude grid-type enum */ nco_grd_lon_typ_enm lon_typ; /* [enm] Longitude grid-type enum */ nco_grd_xtn_enm nco_grd_xtn=nco_grd_xtn_nil; /* [enm] Grid-extent enum */ nc_type msk_typ; ptr_unn msk_unn; size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ /* Algorithm: Read grid information from input data file (aka *_in) Close input file Once grid dimensions known, allocate output grid arrays (aka *_out) Open output file (aka grid-file) Use guesswork and standard algorithms to fill-in output arrays */ /* Duplicate (because nco_fl_mk_lcl() free()'s fl_in) */ fl_in=(char *)strdup(rgr->fl_in); /* Make sure file is on local system and is readable or die trying */ fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id); char *bnd_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as bounds */ char *col_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as column */ char *lat_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as latitude */ char *lon_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as longitude */ char *lat_nm_in=NULL_CEWI; /* [sng] Name of variable to recognize as latitude */ char *lon_nm_in=NULL_CEWI; /* [sng] Name of variable to recognize as longitude */ char *lat_bnd_nm=NULL_CEWI; /* [sng] Name of latitude boundary variable */ char *lon_bnd_nm=NULL_CEWI; /* [sng] Name of longitude boundary variable */ char *vrt_dmn_nm=NULL_CEWI; /* [sng] Name of MPAS vertices dimension nVertices */ char *vrt_cll_nm=NULL_CEWI; /* [sng] Name of MPAS variable verticesOnCell */ char *vrt_lat_nm=NULL_CEWI; /* [sng] Name of MPAS latitude boundary variable latVertex */ char *vrt_lon_nm=NULL_CEWI; /* [sng] Name of MPAS longitude boundary variable lonVertex */ int dmn_id_bnd=NC_MIN_INT; /* [id] Dimension ID for spatial bounds */ int dmn_id_col=NC_MIN_INT; /* [id] Dimension ID for unstructured grids */ int dmn_id_lat=NC_MIN_INT; /* [id] Dimension ID for latitude */ int dmn_id_lon=NC_MIN_INT; /* [id] Dimension ID for longitude */ int dmn_id_vrt=NC_MIN_INT; /* [id] Dimension ID for MPAS vertices */ /* Begin CF-coordinates block */ cf_crd_sct *cf=NULL; char *rgr_var; /* [sng] Variable for special regridding treatment */ nco_bool flg_cf=False; /* [flg] Follow CF Coordinates convention to find and infer grid */ rgr_var=rgr->var_nm; if(rgr_var){ /* Infer grid from special variable Intended to be variable that has both horizontal dimensions and "coordinates" attribute, e.g., ncks --cdl -m ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc | grep coordinates 4LFTX_221_SPDY_S113:coordinates = "gridlat_221 gridlon_221" ; Usage: ncks -O -D 3 --rgr infer --rgr_var=4LFTX_221_SPDY_S113 --rgr grid=~/grd_narr.nc ${DATA}/hdf/narrmon-a_221_20100101_0000_000.nc ~/foo.nc */ char crd_sng[]="coordinates"; /* CF-standard coordinates attribute name */ cf=(cf_crd_sct *)nco_malloc(sizeof(cf_crd_sct)); cf->crd=False; /* [flg] CF coordinates information is complete */ cf->crd_id[0]=NC_MIN_INT; /* [id] Coordinate ID, first */ cf->crd_id[1]=NC_MIN_INT; /* [id] Coordinate ID, second */ cf->crd_nm[0]=NULL; /* [sng] Coordinate name, first */ cf->crd_nm[1]=NULL; /* [sng] Coordinate name, second */ cf->crd_sng=NULL; /* [sng] Coordinates attribute value */ cf->dmn_id[0]=NC_MIN_INT; /* [id] Dimension ID, first */ cf->dmn_id[1]=NC_MIN_INT; /* [id] Dimension ID, second */ cf->dmn_nm[0]=NULL; /* [sng] Dimension name, first */ cf->dmn_nm[1]=NULL; /* [sng] Dimension name, second */ cf->unt_sng[0]=NULL; /* [sng] Units string, first coordinate */ cf->unt_sng[1]=NULL; /* [sng] Units string, second coordinate */ cf->var_id=NC_MIN_INT; /* [id] Coordinate variable ID */ cf->var_nm=NULL; /* [sng] Coordinates variable name */ cf->var_type=NC_NAT; /* [enm] Coordinates variable type */ if((rcd=nco_inq_varid_flg(in_id,rgr_var,&cf->var_id)) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports special \"coordinates\" variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd */ cf->crd_sng=nco_char_att_get(in_id,cf->var_id,crd_sng); if(cf->crd_sng){ cf->crd=True; }else{ /* !rcd && att_typ */ (void)fprintf(stderr,"%s: WARNING %s reports coordinates variable %s does not have character-valued \"coordinates\" attribute. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,rgr_var); goto skp_cf; } /* !rcd && att_typ */ /* Valid coordinates attribute requires two coordinate names separated by space character */ char *crd_nm[NCO_MAX_CRD_PER_VAR]; /* [sng] Coordinate name start position */ char *crd_dpl; /* [sng] Modifiable duplicate of coordinates string */ char *spc_ptr; /* [sng] Pointer to space character (' ') */ int crd_nbr=0; /* [nbr] Number of names in coordinates attribute */ int crd_spt=0; /* [nbr] Number of "spatial-like" (that include "degree" in units) coordinates */ int crd_idx=0; /* [idx] Counter for coordinate names */ for(crd_idx=0;crd_idx<NCO_MAX_CRD_PER_VAR;crd_idx++) crd_nm[crd_idx]=NULL; crd_dpl=(char *)strdup(cf->crd_sng); /* Search for spaces starting from end of string */ while((spc_ptr=strrchr(crd_dpl,' '))){ crd_nm[crd_nbr]=spc_ptr+1L; crd_nbr++; /* NUL-terminate so next search ends here */ *spc_ptr='\0'; } /* !sbs_ptr */ /* Final coordinate name begins where coordinate string starts */ crd_nm[crd_nbr]=crd_dpl; /* Change crd_nbr from 0-based index to actual coordinate number */ crd_nbr++; if(crd_nbr < 2){ (void)fprintf(stderr,"%s: WARNING %s found only %d coordinate(s) in \"coordinates\" attribute \"%s\", at least two are required. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,crd_nbr,cf->crd_sng); goto skp_cf; } /* !crd_nbr */ /* If more than two coordinate names are present, choose first two (searching backwards from end) with "degree" in units attributes, otherwise just choose first two */ crd_idx=crd_spt=0; while(crd_spt < 2 && crd_idx < crd_nbr){ cf->crd_nm[crd_spt]=crd_nm[crd_idx]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[crd_spt],&cf->crd_id[crd_spt])) == NC_NOERR){ cf->unt_sng[crd_spt]=nco_char_att_get(in_id,cf->crd_id[crd_spt],unt_sng); if(cf->unt_sng[crd_spt]){ if(strcasestr(cf->unt_sng[crd_spt],"degree")){ /* Increment count of spatial-like coordinates... */ crd_spt++; }else{ /* ...or free() memory allocated during search */ cf->unt_sng[crd_spt]=(char *)nco_free(cf->unt_sng[crd_spt]); } /* !strcasestr() */ crd_idx++; } /* !rcd && att_typ */ } /* !rcd */ } /* !crd_spt */ /* If while()-loop above was successful, our search is over Otherwise, use first two coordinate names regardless of units, and print more diagnostics */ if(crd_spt < 2){ cf->crd_nm[0]=crd_nm[0]; cf->crd_nm[1]=crd_nm[1]; if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[0],&cf->crd_id[0])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0]); goto skp_cf; } /* !rcd */ if((rcd=nco_inq_varid_flg(in_id,cf->crd_nm[1],&cf->crd_id[1])) != NC_NOERR){ (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s not found. Turning-off CF coordinates search.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1]); goto skp_cf; } /* !rcd */ cf->unt_sng[0]=nco_char_att_get(in_id,cf->crd_id[0],unt_sng); if(cf->unt_sng[0]){ if(!strcasestr(cf->unt_sng[0],"degree")) (void)fprintf(stderr,"%s: WARNING %s reports first coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->unt_sng[0]); } /* !rcd && att_typ */ cf->unt_sng[1]=nco_char_att_get(in_id,cf->crd_id[1],unt_sng); if(cf->unt_sng[1]){ if(!strcasestr(cf->unt_sng[1],"degree")) (void)fprintf(stderr,"%s: WARNING %s reports second coordinates variable %s has weird units attribute = %s. May not detect correct ordering of latitude and longitude coordinates\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[1],cf->unt_sng[1]); } /* !rcd && att_typ */ } /* !crd_spt */ int crd_rnk; /* [nbr] Coordinate rank */ rcd=nco_inq_varndims(in_id,cf->crd_id[0],&crd_rnk); if(crd_rnk != 2){ (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s has %i dimension(s). Skipping CF coordinates method.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],crd_rnk); goto skp_cf; } /* !crd_rnk */ rcd=nco_inq_vardimid(in_id,cf->crd_id[0],cf->dmn_id); cf->dmn_nm[0]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); cf->dmn_nm[1]=(char *)nco_malloc(NC_MAX_NAME*sizeof(NC_CHAR)); rcd=nco_inq_dimname(in_id,cf->dmn_id[0],cf->dmn_nm[0]); rcd=nco_inq_dimname(in_id,cf->dmn_id[1],cf->dmn_nm[1]); /* "coordinates" convention does not guarantee lat, lon are specified in that order Use "units" values, if any, to determine order In absence of "units", assume order is lat, lon */ nco_bool crd0_is_lat=False; /* [flg] First coordinate is latitude */ nco_bool crd0_is_lon=False; /* [flg] First coordinate is longitude */ nco_bool crd1_is_lat=False; /* [flg] Second coordinate is latitude */ nco_bool crd1_is_lon=False; /* [flg] Second coordinate is longitude */ if(cf->unt_sng[0]){ if(!strcasecmp(cf->unt_sng[0],"degrees_north") || !strcasecmp(cf->unt_sng[0],"degree_north") || !strcasecmp(cf->unt_sng[0],"degree_N") || !strcasecmp(cf->unt_sng[0],"degrees_N") || !strcasecmp(cf->unt_sng[0],"degreeN") || !strcasecmp(cf->unt_sng[0],"degreesN")) crd0_is_lat=True; if(!strcasecmp(cf->unt_sng[0],"degrees_east") || !strcasecmp(cf->unt_sng[0],"degree_east") || !strcasecmp(cf->unt_sng[0],"degree_E") || !strcasecmp(cf->unt_sng[0],"degrees_E") || !strcasecmp(cf->unt_sng[0],"degreeE") || !strcasecmp(cf->unt_sng[0],"degreesE")) crd0_is_lon=True; } /* endif */ if(cf->unt_sng[1]){ if(!strcasecmp(cf->unt_sng[1],"degrees_north") || !strcasecmp(cf->unt_sng[1],"degree_north") || !strcasecmp(cf->unt_sng[1],"degree_N") || !strcasecmp(cf->unt_sng[1],"degrees_N") || !strcasecmp(cf->unt_sng[1],"degreeN") || !strcasecmp(cf->unt_sng[1],"degreesN")) crd1_is_lat=True; if(!strcasecmp(cf->unt_sng[1],"degrees_east") || !strcasecmp(cf->unt_sng[1],"degree_east") || !strcasecmp(cf->unt_sng[1],"degree_E") || !strcasecmp(cf->unt_sng[1],"degrees_E") || !strcasecmp(cf->unt_sng[1],"degreeE") || !strcasecmp(cf->unt_sng[1],"degreesE")) crd1_is_lon=True; } /* endif */ assert((crd0_is_lat && crd1_is_lon) || (crd0_is_lon && crd1_is_lat)); int idx_lat; int idx_lon; if(crd0_is_lat && crd1_is_lon){ idx_lat=0; idx_lon=1; }else{ idx_lat=1; idx_lon=0; } /* endif */ /* Dimensions and coordinates have been vetted. Store as primary lookup names. Dimensions are always returned in order [LRV,MRV]=[0,1] LRV is along-track direction, and MRV is across-track (at least in NASA data) Internally we label LRV as "lat" and MRV as "lon" so that code looks similar for curvilinear and rectangular grids */ dmn_id_lat=cf->dmn_id[0]; dmn_id_lon=cf->dmn_id[1]; /* Subtlety: lat_nm_in is coordinate (variable+dimension) name when specified from command-line (as in nco_grd_nfr()), dimension name when found through CF-method (as in nco_rgr_wgt()). This confusing distinction could be avoided by passing command-line dimension names through-to nco_rgr_wgt(). However, that route would require complex priorities for what to do when passing command-line coordinate names not dimension names and visa-versa. */ //lat_nm_in=strdup(cf->dmn_nm[0]); //lon_nm_in=strdup(cf->dmn_nm[1]); lat_nm_in=strdup(cf->crd_nm[idx_lat]); lon_nm_in=strdup(cf->crd_nm[idx_lon]); /* Next four lines unnecessary in nco_rgr_wgt() which only needs dimension names (it reads input coordinates from map- not data-file) */ lat_ctr_id=cf->crd_id[idx_lat]; lon_ctr_id=cf->crd_id[idx_lon]; lat_dmn_nm=strdup(cf->dmn_nm[0]); lon_dmn_nm=strdup(cf->dmn_nm[1]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports coordinates variable %s \"coordinates\" attribute \"%s\" points to coordinates %s and %s. Latitude coordinate \"%s\" has LRV (along-track) and MRV (across-track) dimensions \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,rgr_var,cf->crd_sng,cf->crd_nm[0],cf->crd_nm[1],cf->crd_nm[idx_lat],cf->dmn_nm[0],cf->dmn_nm[1]); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s Coordinates %s and %s \"units\" values are \"%s\" and \"%s\", respectively.\n",nco_prg_nm_get(),fnc_nm,cf->crd_nm[0],cf->crd_nm[1],cf->unt_sng[0] ? cf->unt_sng[0] : "(non-existent)",cf->unt_sng[1] ? cf->unt_sng[1] : "(non-existent)"); /* Clean-up CF coordinates memory */ if(crd_dpl) crd_dpl=(char *)nco_free(crd_dpl); if(cf->crd_sng) cf->crd_sng=(char *)nco_free(cf->crd_sng); if(cf->dmn_nm[0]) cf->dmn_nm[0]=(char *)nco_free(cf->dmn_nm[0]); if(cf->dmn_nm[1]) cf->dmn_nm[1]=(char *)nco_free(cf->dmn_nm[1]); if(cf->unt_sng[0]) cf->unt_sng[0]=(char *)nco_free(cf->unt_sng[0]); if(cf->unt_sng[1]) cf->unt_sng[1]=(char *)nco_free(cf->unt_sng[1]); } /* !rgr_var */ /* goto skp_cf */ skp_cf: /* free() any abandoned cf structure now */ if(!flg_cf) if(cf) cf=(cf_crd_sct *)nco_free(cf); rcd=NC_NOERR; /* End CF-coordinates block */ /* Locate fields that must be present in input file Required variables are usually latitude and longitude Currently these variables must be in root group This fails for, e.g., OMI L2 which has coordinates /GEOLOCATION_DATA/[Latitude,Longitude] fxm: Generalize with traversal table so usual suspect coordinates may be in any group */ if(lat_ctr_id == NC_MIN_INT){ if(rgr->lat_nm_in && (rcd=nco_inq_varid_flg(in_id,rgr->lat_nm_in,&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup(rgr->lat_nm_in); else if((rcd=nco_inq_varid_flg(in_id,"latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latitude"); else if((rcd=nco_inq_varid_flg(in_id,"Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("Latitude"); /* AMSR, HIRDLS, TRMM */ else if((rcd=nco_inq_varid_flg(in_id,"lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("lat"); /* CAM */ else if((rcd=nco_inq_varid_flg(in_id,"lat_d",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("lat_d"); /* EAM dynamics grid */ else if((rcd=nco_inq_varid_flg(in_id,"Lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("Lat"); else if((rcd=nco_inq_varid_flg(in_id,"XLAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("XLAT"); /* WRF */ else if((rcd=nco_inq_varid_flg(in_id,"XLAT_M",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("XLAT_M"); /* Unknown */ else if((rcd=nco_inq_varid_flg(in_id,"LAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("LAT"); /* MAR/RACMO */ else if((rcd=nco_inq_varid_flg(in_id,"LATIXY",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("LATIXY"); /* CISM/CLM/ELM */ else if((rcd=nco_inq_varid_flg(in_id,"TLAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("TLAT"); /* CICE, POP */ else if((rcd=nco_inq_varid_flg(in_id,"ULAT",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("ULAT"); /* CICE, POP */ else if((rcd=nco_inq_varid_flg(in_id,"latCell",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latCell"); /* MPAS-O/I */ else if((rcd=nco_inq_varid_flg(in_id,"nav_lat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("nav_lat"); /* NEMO */ else if((rcd=nco_inq_varid_flg(in_id,"rlat",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("rlat"); /* RACMO */ else if((rcd=nco_inq_varid_flg(in_id,"global_latitude0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("global_latitude0"); /* Oxford */ else if((rcd=nco_inq_varid_flg(in_id,"latitude0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("latitude0"); /* Oxford NB: Must search for global_* first */ else if((rcd=nco_inq_varid_flg(in_id,"CO_Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("CO_Latitude"); /* MLS */ else if((rcd=nco_inq_varid_flg(in_id,"S1_Latitude",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("S1_Latitude"); /* GPM */ else if((rcd=nco_inq_varid_flg(in_id,"yc",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("yc"); /* RTM */ else if((rcd=nco_inq_varid_flg(in_id,"south_north",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("south_north"); /* StackOverflow question https://stackoverflow.com/questions/68896581 */ else if((rcd=nco_inq_varid_flg(in_id,"gridlat_0",&lat_ctr_id)) == NC_NOERR) lat_nm_in=strdup("gridlat_0"); /* NWS HRRR */ } /* !lat_ctr_id */ if(lon_ctr_id == NC_MIN_INT){ if(rgr->lon_nm_in && (rcd=nco_inq_varid_flg(in_id,rgr->lon_nm_in,&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup(rgr->lon_nm_in); else if((rcd=nco_inq_varid_flg(in_id,"longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("longitude"); else if((rcd=nco_inq_varid_flg(in_id,"Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("Longitude"); /* AMSR, TRMM */ else if((rcd=nco_inq_varid_flg(in_id,"lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("lon"); /* CAM */ else if((rcd=nco_inq_varid_flg(in_id,"lon_d",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("lon"); /* EAM dynamics grid */ else if((rcd=nco_inq_varid_flg(in_id,"Lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("Lon"); else if((rcd=nco_inq_varid_flg(in_id,"XLONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("XLONG"); /* WRF */ else if((rcd=nco_inq_varid_flg(in_id,"XLONG_M",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("XLONG_M"); /* Unknown */ else if((rcd=nco_inq_varid_flg(in_id,"LON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("LON"); /* MAR/RACMO */ else if((rcd=nco_inq_varid_flg(in_id,"LONGXY",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("LONGXY"); /* CISM/CLM/ELM */ else if((rcd=nco_inq_varid_flg(in_id,"TLON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("TLON"); /* CICE */ else if((rcd=nco_inq_varid_flg(in_id,"TLONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("TLONG"); /* POP */ else if((rcd=nco_inq_varid_flg(in_id,"ULON",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("ULON"); /* CICE */ else if((rcd=nco_inq_varid_flg(in_id,"ULONG",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("ULONG"); /* POP */ else if((rcd=nco_inq_varid_flg(in_id,"lonCell",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("lonCell"); /* MPAS-O/I */ else if((rcd=nco_inq_varid_flg(in_id,"nav_lon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("nav_lon"); /* NEMO */ else if((rcd=nco_inq_varid_flg(in_id,"rlon",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("rlon"); /* RACMO */ else if((rcd=nco_inq_varid_flg(in_id,"global_longitude0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("global_longitude0"); /* Oxford NB: Must search for global_* first */ else if((rcd=nco_inq_varid_flg(in_id,"longitude0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("longitude0"); /* Oxford */ else if((rcd=nco_inq_varid_flg(in_id,"CO_Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("CO_Longitude"); /* MLS */ else if((rcd=nco_inq_varid_flg(in_id,"S1_Longitude",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("S1_Longitude"); /* GPM */ else if((rcd=nco_inq_varid_flg(in_id,"xc",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("xc"); /* RTM */ else if((rcd=nco_inq_varid_flg(in_id,"west_east",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("west_east"); /* StackOverflow question https://stackoverflow.com/questions/68896581 */ else if((rcd=nco_inq_varid_flg(in_id,"gridlon_0",&lon_ctr_id)) == NC_NOERR) lon_nm_in=strdup("gridlon_0"); /* NWS HRRR */ } /* !lon_ctr_id */ if(!lat_nm_in || !lon_nm_in){ (void)fprintf(stdout,"%s: ERROR %s unable to identify latitude and/or longitude variable.\nHINT: Potential causes and workarounds for this include: 1. Coordinate variables must be in the root directory (not in a group). If this might be the problem, try to \"flatten\" the input file before regridding it (see http://nco.sf.net/nco.html#flatten). 2. Horizontal dimensions with \"unusual\" names are hard to identify unless the user designates them somehow. ncremap will search for horizontal dimensions named in the \"coordinates\" attribute in a template variable specified with the \"-V rgr_var\" option. 3. NCO will also search its own internal database for likely names of horizontal coordinate variables (lat, latitude, LAT, XLAT, etc.). Contact the NCO project to have your idiosyncratic coordinate names added to the internal database.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !lat_nm_in */ /* Rank of coordinates determines whether grid is curvilinear */ rcd+=nco_inq_varndims(in_id,lat_ctr_id,&lat_rnk); rcd+=nco_inq_varndims(in_id,lon_ctr_id,&lon_rnk); /* If lat_ctr and lon_ctr share same and only dimension then grid is unstructured */ if(lat_rnk*lon_rnk == 1){ rcd+=nco_inq_vardimid(in_id,lat_ctr_id,&dmn_id_lat); rcd+=nco_inq_vardimid(in_id,lon_ctr_id,&dmn_id_lon); if(dmn_id_lat == dmn_id_lon){ dmn_id_col=dmn_id_lat; dmn_id_lat=NC_MIN_INT; dmn_id_lon=NC_MIN_INT; rcd+=nco_inq_dimname(in_id,dmn_id_col,dmn_nm); col_dmn_nm=(char *)strdup(dmn_nm); flg_grd_1D=True; } /* !unstructured */ } /* lat_rnk == lon_rnk == 1 */ if(lat_rnk*lon_rnk == 1 && dmn_id_lat != NC_MIN_INT && dmn_id_lon != NC_MIN_INT){ flg_grd_crv=False; flg_grd_2D=True; } /* !lat_rnk */ if(lat_rnk == dmn_nbr_2D || lon_rnk == dmn_nbr_2D){ flg_grd_crv=True; flg_grd_2D=False; } /* !lat_rnk */ if(lat_rnk > dmn_nbr_2D || lon_rnk > dmn_nbr_2D){ (void)fprintf(stdout,"%s: ERROR %s reports an identified grid variable (%s with rank %d and/or %s with rank %d) has rank greater than two---grid variables currently must have rank 1 or 2.\nHINT: If grid variables do not vary in time, then temporally average them (with, e.g., ncwa -a time in.nc out.nc) prior to inferring grid\n",nco_prg_nm_get(),fnc_nm,lat_nm_in,lat_rnk,lon_nm_in,lon_rnk); nco_exit(EXIT_FAILURE); } /* !3D */ if(lat_rnk*lon_rnk != 1 && lat_rnk*lon_rnk != 4) assert(False); /* Scrutinize coordinates for their dimensions NB: Unstructured already known */ if(flg_grd_2D){ rcd+=nco_inq_dimname(in_id,dmn_id_lat,dmn_nm); lat_dmn_nm=(char *)strdup(dmn_nm); rcd+=nco_inq_dimname(in_id,dmn_id_lon,dmn_nm); lon_dmn_nm=(char *)strdup(dmn_nm); } /* !flg_grd_2D */ if(flg_grd_crv){ rcd+=nco_inq_vardimid(in_id,lat_ctr_id,dmn_ids); /* fxm: use cf struct and match with units name, if any? normally curvilinear grid dimensions are just pixel dimensions that are not aligned north-south or east-west */ dmn_id_lat=dmn_ids[0]; dmn_id_lon=dmn_ids[1]; rcd+=nco_inq_dimname(in_id,dmn_id_lat,dmn_nm); lat_dmn_nm=(char *)strdup(dmn_nm); rcd+=nco_inq_dimname(in_id,dmn_id_lon,dmn_nm); lon_dmn_nm=(char *)strdup(dmn_nm); } /* !flg_grd_crv */ if(!(lat_dmn_nm && lon_dmn_nm) && !col_dmn_nm){ (void)fprintf(stdout,"%s: ERROR %s unable to identify latitude and/or longitude dimension and/or column dimension.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !col_dmn_nm !lat_dmn_nm !lon_dmn_nm */ /* Locate spatial dimensions that may be present NB: bounds dimensions may present a special problem CAM-FV and CAM-SE use nbnd for temporal bounds and have no spatial bounds dimension CAM3 uses tbnd for temporal bounds and has no spatial bounds dimension CICE and POP use d2 for temporal bounds, and CICE uses nvertices for spatial bounds while POP uses nothing Hence search for nvertices before nbnd to ensure spatial bound is found first */ if((rcd=nco_inq_dimid_flg(in_id,"nv",&dmn_id_bnd)) == NC_NOERR) bnd_dmn_nm=strdup("nv"); /* fxm */ else if((rcd=nco_inq_dimid_flg(in_id,"nvertices",&dmn_id_bnd)) == NC_NOERR) bnd_dmn_nm=strdup("nvertices"); /* CICE */ else if((rcd=nco_inq_dimid_flg(in_id,"maxEdges",&dmn_id_bnd)) == NC_NOERR) bnd_dmn_nm=strdup("maxEdges"); /* MPAS */ if((rcd=nco_inq_dimid_flg(in_id,"nVertices",&dmn_id_vrt)) == NC_NOERR) vrt_dmn_nm=strdup("nVertices"); /* MPAS */ /* Use dimension IDs to get dimension sizes and grid size */ if(flg_grd_1D){ rcd+=nco_inq_dimlen(in_id,dmn_id_col,&col_nbr); lat_nbr=lon_nbr=col_nbr; }else{ rcd+=nco_inq_dimlen(in_id,dmn_id_lat,&lat_nbr); rcd+=nco_inq_dimlen(in_id,dmn_id_lon,&lon_nbr); col_nbr=NC_MIN_INT; } /* !flg_grd_1D */ if(dmn_id_bnd != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_bnd,&grd_crn_nbr); if(dmn_id_bnd != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_bnd,&bnd_nbr); if(dmn_id_vrt != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_vrt,&vrt_nbr); if(flg_grd_1D){ /* Unstructured grid (e.g., CAM-SE) */ grd_rnk_nbr=dmn_nbr_1D; grd_typ=nco_grd_2D_unk; lat_typ=nco_grd_lat_unk; lon_typ=nco_grd_lon_unk; /* 1D grids without their own boundaries are at the mercy of the weight generator */ if(dmn_id_bnd == NC_MIN_INT){ (void)fprintf(stdout,"%s: WARNING %s reports an unstructured grid without spatial boundary information. NCO can copy but not infer spatial boundaries from unstructured grids. Thus NCO will not write spatial bounds to the gridfile inferred from this input file. Instead, the weight generator that ingests this gridfile must generate weights for gridcells with unknown spatial extent. This is feasible for grids and mappings where weights masquerade as areas and are determined by underlying grid and interpolation type (e.g., bilinear remapping of spectral element grid). Unfortunately, the ESMF_RegridWeightGen (ERWG) program requires cell interfaces in both grid files, so ERWG will break on this gridfile. Other weight generators such as TempestRemap may be more successful with this SCRIP file.\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT Re-run the regridder, this time adding the \"-s src_grd\" option to specify the source grid file in SCRIP format. That SCRIP file will have the spatial bounds information required by the ESMF_RegridWeightGen (ERWG) program, so that the regridder will circumvent inferring the underlying grid through its black but fragile magic.\n",nco_prg_nm_get()); flg_wrt_crn=False; /* Input could actually be from grid with no polygonal definition, e.g., CAM-SE Corner number is non-deterministic since, e.g., CAM-SE dual grid can be fit to quadrilaterals, pentagons, chevrons, etc. Bounds will not be diagnosed so safe to set grd_crn_nbr to harmless (though weird) value like 4 However, ERWG requires presence of valid corner dimension "grid_corners" and arrays in input SCRIP file So ERWG will break when reading this SCRIP file regardless of whether it contains arrays (with bogus values) By default do not write grid corner values */ grd_crn_nbr=4; } /* !dmn_id_bnd */ if(bnd_nbr == 2){ /* Unstructured grids with bounds information (e.g., OCO2) may use a pseudo-rectangular convention of archiving latitude and longitude bounds as 2xN (rather than 4XN) arrays even though cell have four corners. "convention" is that two latitudes and two longitudes can specify rectangular boundary cell In this case, bnd_nbr=grd_crn_nbr=2=sizeof(nv)=sizeof(nvertices) currently Set number of corners to rectangular and leave bnd_nbr as is */ grd_crn_nbr=4; flg_1D_psd_rct_bnd=True; } /* !bnd_nbr */ if(!strcmp(bnd_dmn_nm,"maxEdges")){ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Unstructured grid has dimension \"%s\" which indicates an MPAS grid. Will attempt to locate other MPAS information (dimension nVertices and variables verticesOnCell, lonVertex, and latVertex) to construct SCRIP-compliant bounds variables...\n",nco_prg_nm_get(),bnd_dmn_nm); if((rcd=nco_inq_varid_flg(in_id,"verticesOnCell",&vrt_cll_id)) == NC_NOERR) vrt_cll_nm=strdup("verticesOnCell"); if((rcd=nco_inq_varid_flg(in_id,"lonVertex",&vrt_lon_id)) == NC_NOERR) vrt_lon_nm=strdup("lonVertex"); if((rcd=nco_inq_varid_flg(in_id,"latVertex",&vrt_lat_id)) == NC_NOERR) vrt_lat_nm=strdup("latVertex"); if(dmn_id_vrt != NC_MIN_INT) rcd+=nco_inq_dimlen(in_id,dmn_id_vrt,&vrt_nbr); if(vrt_dmn_nm && vrt_cll_nm && vrt_lon_nm && vrt_lat_nm){ flg_1D_mpas_bnd=True; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO Found all MPAS information needed to construct SCRIP-compliant bounds variables.\n",nco_prg_nm_get()); }else{ (void)fprintf(stdout,"%s: INFO Unable to find all MPAS information needed to construct SCRIP-compliant bounds variables. Will not write bounds coordinates. This will degrade usefulness of SCRIP file for regridding schemes (e.g., conservative) that require cell boundaries.\n",nco_prg_nm_get()); (void)fprintf(stdout,"%s: HINT Often MPAS restart files contain the required bounds variables (verticesOnCell, lonVertex, latVertex) that normal MPAS data files lack. Try inferring the SCRIP grid from a restart file not a normal time-varying output dataset.\n",nco_prg_nm_get()); flg_wrt_crn=False; } /* !vrt_cll_nm */ } /* !bnd_dmn_nm */ }else if(flg_grd_2D){ /* !flg_grd_1D */ /* Assume 2D grid of uninitialized type */ grd_rnk_nbr=dmn_nbr_2D; grd_typ=nco_grd_2D_nil; lat_typ=nco_grd_lat_nil; lon_typ=nco_grd_lon_nil; /* Assume rectangular grids that do not specify otherwise use quadrilaterals */ if(dmn_id_bnd == NC_MIN_INT) grd_crn_nbr=4; /* Sometimes we infer from a 2D grid, like those produced by nco_grd_mk(), that has bounds with nv=2 This signals rectangular gridcell bounds are interfaces not vertices (to save half the space) These rectangles really have four corners so we change grd_crn_nbr (not bnd_nbr) accordingly */ if(grd_crn_nbr == 2) grd_crn_nbr=4; /* Convention is to archive only two bounds for rectangular grids (since sides are identical) Non-quadrilateral rectangular grids are untested */ if(grd_crn_nbr == 4) bnd_nbr=2; }else if(flg_grd_crv){ /* !flg_grd_2D */ /* Assume curvilinear grid (e.g., WRF) */ flg_grd_2D=False; grd_rnk_nbr=dmn_nbr_2D; grd_typ=nco_grd_2D_unk; lat_typ=nco_grd_lat_unk; lon_typ=nco_grd_lon_unk; /* Assume curvilinear grids that do not specify otherwise use quadrilaterals */ if(dmn_id_bnd == NC_MIN_INT) grd_crn_nbr=4; /* Assume quadrilaterals are, well, quadrilaterals (e.g., rhomboids) not necessarily rectangles Non-quadrilateral curvilinear grids are untested */ if(grd_crn_nbr == 4) bnd_nbr=4; else assert(False); } /* !flg_grd_crv */ /* Allocate space for output data */ if(flg_grd_1D) grd_sz_nbr=col_nbr; else grd_sz_nbr=lat_nbr*lon_nbr; dmn_sz_int=(int *)nco_malloc(grd_rnk_nbr*nco_typ_lng((nc_type)NC_INT)); area=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); msk=(int *)nco_malloc(grd_sz_nbr*nco_typ_lng((nc_type)NC_INT)); if(flg_grd_1D){ if(bnd_nbr != NC_MIN_INT) lat_bnd=(double *)nco_malloc(grd_sz_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); if(bnd_nbr != NC_MIN_INT) lon_bnd=(double *)nco_malloc(grd_sz_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); }else if(flg_grd_2D){ /* !flg_grd_1D */ lat_bnd=(double *)nco_malloc(lat_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(lat_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lon_bnd=(double *)nco_malloc(lon_nbr*bnd_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(lon_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(lon_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); }else if(flg_grd_crv){ /* !flg_grd_2D */ lat_bnd=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lat_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lat_ntf=(double *)nco_malloc((lat_nbr+1L)*nco_typ_lng(crd_typ)); lat_wgt=(double *)nco_malloc(lat_nbr*nco_typ_lng(crd_typ)); lon_bnd=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_crn=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_ctr=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); lon_ntf=(double *)nco_malloc((lon_nbr+1L)*nco_typ_lng(crd_typ)); } /* !flg_grd_crv */ grd_ctr_lat=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_ctr_lon=(double *)nco_malloc(grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lat=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); grd_crn_lon=(double *)nco_malloc(grd_crn_nbr*grd_sz_nbr*nco_typ_lng(crd_typ)); /* Locate fields that may be present in input file */ if((rcd=nco_inq_varid_flg(in_id,"lat_bnds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_bnds"); else if((rcd=nco_inq_varid_flg(in_id,"latt_bounds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latt_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"latu_bounds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latu_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"lat_ntf",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_ntf"); else if((rcd=nco_inq_varid_flg(in_id,"lat_vertices",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("lat_vertices"); else if((rcd=nco_inq_varid_flg(in_id,"latitude_bnds",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("latitude_bnds"); /* OCO2 */ else if((rcd=nco_inq_varid_flg(in_id,"LatitudeCornerpoints",&lat_bnd_id)) == NC_NOERR) lat_bnd_nm=strdup("LatitudeCornerpoints"); /* OMI */ if((rcd=nco_inq_varid_flg(in_id,"lon_bnds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_bnds"); else if((rcd=nco_inq_varid_flg(in_id,"lont_bounds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lont_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"lonu_bounds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lonu_bounds"); else if((rcd=nco_inq_varid_flg(in_id,"lon_ntf",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_ntf"); else if((rcd=nco_inq_varid_flg(in_id,"lon_vertices",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("lon_vertices"); else if((rcd=nco_inq_varid_flg(in_id,"longitude_bnds",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("longitude_bnds"); /* OCO2 */ else if((rcd=nco_inq_varid_flg(in_id,"LongitudeCornerpoints",&lon_bnd_id)) == NC_NOERR) lon_bnd_nm=strdup("LongitudeCornerpoints"); /* OMI */ if((rcd=nco_inq_varid_flg(in_id,"area",&area_id)) == NC_NOERR) area_nm_in=strdup("area"); else if((rcd=nco_inq_varid_flg(in_id,"Area",&area_id)) == NC_NOERR) area_nm_in=strdup("Area"); else if((rcd=nco_inq_varid_flg(in_id,"areaCell",&area_id)) == NC_NOERR) area_nm_in=strdup("areaCell"); /* MPAS-O/I */ else if((rcd=nco_inq_varid_flg(in_id,"grid_area",&area_id)) == NC_NOERR) area_nm_in=strdup("grid_area"); else if((rcd=nco_inq_varid_flg(in_id,"area_d",&area_id)) == NC_NOERR) area_nm_in=strdup("area_d"); /* EAM dynamics grid */ else if((rcd=nco_inq_varid_flg(in_id,"area_p",&area_id)) == NC_NOERR) area_nm_in=strdup("area_p"); /* EAM physics grid */ // else if((rcd=nco_inq_varid_flg(in_id,"aice",&area_id)) == NC_NOERR) area_nm_in=strdup("aice"); /* CICE time-dependent ice area (3D), not total gridcell area */ else if((rcd=nco_inq_varid_flg(in_id,"tarea",&area_id)) == NC_NOERR) area_nm_in=strdup("tarea"); /* CICE time-invariant state-variable gridcell area (2D) */ else if((rcd=nco_inq_varid_flg(in_id,"uarea",&area_id)) == NC_NOERR) area_nm_in=strdup("uarea"); /* CICE time-invariant dynamics variables (2D) */ msk_nm_in=rgr->msk_var; if(msk_nm_in){ if(!strcasecmp(msk_nm_in,"none")){ /* 20170814: Some variables named "*mask*" are, e.g., quality control masks not regridding masks per se */ msk_nm_in=(char *)nco_free(msk_nm_in); }else{ /* User-supplied name overrides database */ rcd=nco_inq_varid(in_id,msk_nm_in,&msk_id); } /* !msk_nm_in */ }else{ /* Otherwise search database */ if((rcd=nco_inq_varid_flg(in_id,"mask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("mask"); else if((rcd=nco_inq_varid_flg(in_id,"Mask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("Mask"); else if((rcd=nco_inq_varid_flg(in_id,"mask_b",&msk_id)) == NC_NOERR) msk_nm_in=strdup("mask_b"); else if((rcd=nco_inq_varid_flg(in_id,"grid_imask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("grid_imask"); else if((rcd=nco_inq_varid_flg(in_id,"landmask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("landmask"); /* ALM/CLM */ else if((rcd=nco_inq_varid_flg(in_id,"tmask",&msk_id)) == NC_NOERR) msk_nm_in=strdup("tmask"); /* CICE */ } /* !msk_nm_in */ /* Mask field requires special handling for non-conformant models */ if(msk_id != NC_MIN_INT){ /* 20151201: All models tested define mask as NC_INT except CICE which uses NC_FLOAT 20160111: Few observations tested define mask. Exceptions include AMSR and GHRSST. AMSR uses NC_SHORT to store bitmasks. Bitmask is 1 for missing data, and up to 128 for various quality levels of valid data. Hence, almost better to ignore AMSR mask variable. GHRSST uses NC_BYTE for its 3D "mask" bit-mask of surface-type values 1,2,4,8,16. */ rcd=nco_inq_varndims(in_id,msk_id,&msk_rnk_nbr); if(msk_rnk_nbr != grd_rnk_nbr && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports input mask variable \"%s\" is rank %d while grid is rank %ld so will use first timestep/layer to determine output mask\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,msk_rnk_nbr,grd_rnk_nbr); rcd=nco_inq_vartype(in_id,msk_id,&msk_typ); msk_unn.vp=(void *)nco_malloc(grd_sz_nbr*nco_typ_lng(msk_typ)); } /* !msk */ /* All grids: Some real-world datasets violate convention that coordinates ought never have missing values CICE lists missing value for lat/lon_ctr arrays (TLAT, TLONG) and re-uses that for bounds arrays (latt_bounds, lont_bounds) that do not bother to have their own missing value attributes Without counter-example, assume has_mss_val_bnd=has_mss_val_ctr and mss_val_bnd_dbl=mss_val_ctr_dbl */ has_mss_val_bnd=has_mss_val_ctr=nco_mss_val_get_dbl(in_id,lat_ctr_id,&mss_val_ctr_dbl); char *att_val; char *area_unt=NULL; /* [sng] Dimensional units used in area */ char *ngl_unt=NULL; /* [sng] Angular units used in coordinates */ long att_sz; nc_type att_typ; nco_bool flg_area_sr=True; /* [flg] Input area is in sterradians not something weird like km2 */ nco_bool flg_crd_rdn=False; /* [flg] Input coordinates are in radians not degrees */ if(flg_grd_1D){ /* Obtain fields that must be present in unstructured input file */ dmn_srt[0]=0L; dmn_cnt[0]=col_nbr; rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); /* Obtain fields that may be present in unstructured input file */ if(area_id != NC_MIN_INT) rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); if(msk_id != NC_MIN_INT){ if(msk_rnk_nbr > grd_rnk_nbr){ /* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */ for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=1L; } /* !dmn_idx */ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=col_nbr; } /* !msk_rnk_nbr */ rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ); } /* !msk_id */ dmn_srt[0]=dmn_srt[1]=0L; if(flg_1D_psd_rct_bnd){ dmn_cnt[0]=col_nbr; dmn_cnt[1]=bnd_nbr; if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); }else if(flg_1D_mpas_bnd){ const long grd_crn_nbrm1=grd_crn_nbr-1L; /* [nbr] Number of corners in gridcell minus one */ vrt_cll=(int *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng((nc_type)NC_INT)); vrt_lat=(double *)nco_malloc(vrt_nbr*nco_typ_lng(crd_typ)); vrt_lon=(double *)nco_malloc(vrt_nbr*nco_typ_lng(crd_typ)); dmn_cnt[0]=col_nbr; dmn_cnt[1]=grd_crn_nbr; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s reports dimension sizes bnd_nbr=%ld, col_nbr=%ld, grd_crn_nbr=%ld, vrt_nbr=%ld\n",nco_prg_nm_get(),fnc_nm,bnd_nbr,col_nbr,grd_crn_nbr,vrt_nbr); if(vrt_cll_id != NC_MIN_INT) rcd=nco_get_vara(in_id,vrt_cll_id,dmn_srt,dmn_cnt,vrt_cll,(nc_type)NC_INT); dmn_cnt[0]=vrt_nbr; if(vrt_lat_id != NC_MIN_INT) rcd=nco_get_vara(in_id,vrt_lat_id,dmn_srt,dmn_cnt,vrt_lat,crd_typ); if(vrt_lon_id != NC_MIN_INT) rcd=nco_get_vara(in_id,vrt_lon_id,dmn_srt,dmn_cnt,vrt_lon,crd_typ); rcd=nco_inq_att_flg(in_id,vrt_lat_id,unt_sng,&att_typ,&att_sz); if(rcd == NC_NOERR && att_typ == NC_CHAR){ att_val=(char *)nco_malloc((att_sz+1L)*nco_typ_lng(att_typ)); rcd+=nco_get_att(in_id,vrt_lat_id,unt_sng,att_val,att_typ); /* NUL-terminate attribute before using strstr() */ att_val[att_sz]='\0'; /* Match "radian" and "radians" */ if(strstr(att_val,"radian")) flg_crd_rdn=True; if(att_val) ngl_unt=(char *)strdup(att_val); if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ for(col_idx=0;col_idx<col_nbr;col_idx++){ idx=col_idx*grd_crn_nbr; for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ ttl_idx=idx+crn_idx; vrt_idx=vrt_cll[ttl_idx]; assert(vrt_idx >= 0); //if(vrt_idx >= vrt_nbr) (void)fprintf(stdout,"%s: WARNING %s input gridcell %ld corner %ld has extreme MPAS input verticesOnCell value %ld (maximum valid vertex = vrt_nbr-1 = %ld-1 = %ld)\n",nco_prg_nm_get(),fnc_nm,col_idx,crn_idx,vrt_idx,vrt_nbr,vrt_nbr-1); if(vrt_idx == 0){ /* 20201220: Convert values of zero to neighboring valid vertex index */ for(idx_fst=1;idx_fst<grd_crn_nbr;idx_fst++){ idx_tmp=crn_idx+idx_fst; /* Wrap to initial corner of this cell when candidate corner would be in next cell */ if(idx_tmp > grd_crn_nbrm1) idx_tmp-=grd_crn_nbr; ttl_idx=idx+idx_tmp; vrt_idx=vrt_cll[ttl_idx]; if(vrt_idx != 0) break; } /* !idx_fst */ assert(idx_fst < grd_crn_nbr); } /* !vrt_idx */ /* 20201220: Stored vertex indices use Fortran-based convention---subtract one for C */ vrt_idx--; lat_crn[ttl_idx]=vrt_lat[vrt_idx]; lon_crn[ttl_idx]=vrt_lon[vrt_idx]; //(void)fprintf(stdout,"%s: DEBUG %s reports col_idx = %ld, crn_idx = %ld, ttl_idx = %ld, vrt_idx = %ld, vrt_lat = %g, vrt_lon = %g\n",nco_prg_nm_get(),fnc_nm,col_idx,crn_idx,ttl_idx,vrt_idx,vrt_lat[vrt_idx],vrt_lon[vrt_idx]); } /* !crn_idx */ } /* !col_idx */ }else{ /* !flg_1D_mpas_bnd */ dmn_cnt[0]=col_nbr; dmn_cnt[1]=grd_crn_nbr; if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_crn,crd_typ); if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_crn,crd_typ); } /* !flg_1D_psd_rct_bnd */ } /* !flg_grd_1D */ if(flg_grd_crv){ /* Obtain fields that must be present in curvilinear input file */ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); /* 20150923: Also input, if present in curvilinear file, corners, area, and mask area and mask are same size as lat and lon */ if(area_id != NC_MIN_INT) rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); if(msk_id != NC_MIN_INT){ if(msk_rnk_nbr > grd_rnk_nbr){ /* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */ for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=1L; } /* !dmn_idx */ dmn_srt[dmn_idx]=dmn_srt[dmn_idx+1]=0L; dmn_cnt[dmn_idx]=lat_nbr; dmn_cnt[dmn_idx+1]=lon_nbr; } /* !msk_rnk_nbr */ rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ); } /* !msk_id */ /* Corners are on curvilinear corner grid Rectangular boundaries (i.e., lat_bnd=[lat_nbr,2]) DNE for curvilinear grids Read-in *_crn arrays in curvilinear grids, and *_bnd arrays for rectilinear grids Rank-ordering of corner arrays is usually lat_nbr,lon_nbr,grd_crn_nbr as produced/expected by SCRIP However some datasets, e.g., OMI DOMINO use grd_crn_nbr,lat_nbr,lon_nbr Sigh... */ dmn_srt[0]=dmn_srt[1]=dmn_srt[2]=0L; if(lat_bnd_id != NC_MIN_INT && lon_bnd_id != NC_MIN_INT){ rcd=nco_inq_vardimid(in_id,lat_bnd_id,dmn_ids); if((dmn_ids[0] == dmn_id_lat && dmn_ids[1] == dmn_id_lon) || (dmn_ids[0] == dmn_id_lon && dmn_ids[1] == dmn_id_lat)){ dmn_id_bnd=dmn_ids[2]; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=lon_nbr; dmn_cnt[2]=grd_crn_nbr; }else if((dmn_ids[1] == dmn_id_lat && dmn_ids[2] == dmn_id_lon) || (dmn_ids[1] == dmn_id_lon && dmn_ids[2] == dmn_id_lat)){ dmn_id_bnd=dmn_ids[0]; dmn_cnt[0]=grd_crn_nbr; dmn_cnt[1]=lat_nbr; dmn_cnt[2]=lon_nbr; flg_crn_grd_lat_lon=True; }else{ (void)fprintf(stdout,"%s: WARNING %s confused by dimension-ordering of latitude bounds variable \"%s\". Will ignore this bounds variable and attempt to extrapolate vertices from centers internally...\n",nco_prg_nm_get(),fnc_nm,lat_nm_in); lat_bnd_id=NC_MIN_INT; lon_bnd_id=NC_MIN_INT; } /* !dmn_ids */ rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_crn,crd_typ); rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_crn,crd_typ); if(flg_crn_grd_lat_lon){ /* Permute corner arrays from non-canonical (grd_nbr,lat_nbr,lon_nbr) to canonical (lat_nbr,lon_nbr,grd_nbr) order */ double *lat_crn_tmp=NULL; double *lon_crn_tmp=NULL; lat_crn_tmp=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); lon_crn_tmp=(double *)nco_malloc(grd_sz_nbr*grd_crn_nbr*nco_typ_lng(crd_typ)); memcpy(lat_crn_tmp,lat_crn,grd_sz_nbr*grd_crn_nbr*sizeof(double)); memcpy(lon_crn_tmp,lon_crn,grd_sz_nbr*grd_crn_nbr*sizeof(double)); for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ for(idx=0;idx<grd_sz_nbr;idx++){ lat_idx=idx/lon_nbr; lon_idx=idx%lon_nbr; /* NB: Variables differ (lat vs. lon) but indexes are identical in next two lines */ lat_crn[lat_idx*lon_nbr*grd_crn_nbr+lon_idx*grd_crn_nbr+crn_idx]=lat_crn_tmp[crn_idx*grd_sz_nbr+idx]; lon_crn[lat_idx*lon_nbr*grd_crn_nbr+lon_idx*grd_crn_nbr+crn_idx]=lon_crn_tmp[crn_idx*grd_sz_nbr+idx]; } /* !idx */ } /* !crn_idx */ if(lat_crn_tmp) lat_crn_tmp=(double *)nco_free(lat_crn_tmp); if(lon_crn_tmp) lon_crn_tmp=(double *)nco_free(lon_crn_tmp); /* In this code branch, thought to be executed only for OMI DOMINO grids, re-compute grid center arrays (known to contain missing values) as centroids of supplied grid corners */ for(idx=0;idx<grd_sz_nbr;idx++){ lat_idx=idx/lon_nbr; lon_idx=idx%lon_nbr; lat_ctr[idx]=0.25*(lat_crn[idx*grd_crn_nbr+0L]+lat_crn[idx*grd_crn_nbr+1L]+lat_crn[idx*grd_crn_nbr+2L]+lat_crn[idx*grd_crn_nbr+3L]); lon_ctr[idx]=nco_lon_crn_avg_brnch(lon_crn[idx*grd_crn_nbr+0L],lon_crn[idx*grd_crn_nbr+1L],lon_crn[idx*grd_crn_nbr+2L],lon_crn[idx*grd_crn_nbr+3L]); } /* !idx */ } /* !flg_crd_grd_lat_lon */ } /* !lat_bnd_id */ } /* !flg_grd_crv */ if(flg_grd_2D){ int lon_psn_in=1L; /* [idx] Ordinal position of longitude dimension in rectangular grid variables like area */ int lat_psn_in=0L; /* [idx] Ordinal position of latitude dimension in rectangular grid variables like area */ int tpl_id=NC_MIN_INT; /* [id] ID of template field */ /* Obtain fields that must be present in input file */ dmn_srt[0L]=0L; dmn_cnt[0L]=lat_nbr; rcd=nco_get_vara(in_id,lat_ctr_id,dmn_srt,dmn_cnt,lat_ctr,crd_typ); dmn_srt[0L]=0L; dmn_cnt[0L]=lon_nbr; rcd=nco_get_vara(in_id,lon_ctr_id,dmn_srt,dmn_cnt,lon_ctr,crd_typ); if(lat_ctr[1L] < lat_ctr[0L]) flg_s2n=False; /* Use fields that may be present in input file to override, if necessary, default lon/lat order area and mask are both suitable templates for determining input lat/lon ordering NB: Algorithm assumes area is same rank as grid, and falls-back to mask if that has same rank as grid */ if(area_id != NC_MIN_INT) tpl_id=area_id; else if(msk_id != NC_MIN_INT && msk_rnk_nbr == grd_rnk_nbr) tpl_id=msk_id; if(tpl_id != NC_MIN_INT){ int tpl_rnk_nbr; var_id=tpl_id; /* NB: Template variable rank may exceed two with --msk_[src/dst] (e.g., SST(time,lat,lon)) */ rcd=nco_inq_varndims(in_id,var_id,&tpl_rnk_nbr); rcd=nco_inq_vardimid(in_id,var_id,dmn_ids); /* fxm: Optimize discovery of lat/lon ordering */ for(dmn_idx=0;dmn_idx<grd_rnk_nbr;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_ids[dmn_idx],dmn_nm); rcd+=nco_inq_dimlen(in_id,dmn_ids[dmn_idx],&dmn_sz); if(!strcmp(dmn_nm,lat_dmn_nm)){ assert(dmn_sz == lat_nbr); assert(dmn_idx == 0); lat_psn_in=dmn_idx; } /* !lat */ if(!strcmp(dmn_nm,lon_dmn_nm)){ assert(dmn_sz == lon_nbr); assert(dmn_idx == 1); lon_psn_in=dmn_idx; } /* !lon */ } /* !dmn_idx */ } /* !tpl */ /* Obtain fields that may be present in input file */ if(area_id != NC_MIN_INT){ var_id=area_id; rcd=nco_inq_vardimid(in_id,var_id,dmn_ids); dmn_srt[lat_psn_in]=0L; dmn_cnt[lat_psn_in]=lat_nbr; dmn_srt[lon_psn_in]=0L; dmn_cnt[lon_psn_in]=lon_nbr; rcd=nco_get_vara(in_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); } /* !area */ if(msk_id != NC_MIN_INT){ var_id=msk_id; rcd=nco_inq_vardimid(in_id,var_id,dmn_ids); dmn_srt[lat_psn_in]=0L; dmn_cnt[lat_psn_in]=lat_nbr; dmn_srt[lon_psn_in]=0L; dmn_cnt[lon_psn_in]=lon_nbr; if(msk_rnk_nbr != grd_rnk_nbr){ /* Retrieve mask elements only from first horizontal grid, e.g., first timestep, first layer... */ for(dmn_idx=0;dmn_idx<msk_rnk_nbr-grd_rnk_nbr;dmn_idx++){ dmn_srt[dmn_idx]=0L; dmn_cnt[dmn_idx]=1L; } /* !dmn_idx */ dmn_srt[dmn_idx]=dmn_srt[dmn_idx+1]=0L; dmn_cnt[dmn_idx+lat_psn_in]=lat_nbr; dmn_cnt[dmn_idx+lon_psn_in]=lon_nbr; } /* !msk_rnk_nbr */ rcd=nco_get_vara(in_id,msk_id,dmn_srt,dmn_cnt,msk_unn.vp,msk_typ); } /* !msk */ /* Rectangular boundaries are often on "abbreviated" bounds grid (two bounds per center) Read-in *_crn arrays for 1D and curvilinear grids, and *_bnd arrays for rectilinear grids */ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lat_nbr; dmn_cnt[1]=bnd_nbr; if(lat_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lat_bnd_id,dmn_srt,dmn_cnt,lat_bnd,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=lon_nbr; dmn_cnt[1]=bnd_nbr; if(lon_bnd_id != NC_MIN_INT) rcd=nco_get_vara(in_id,lon_bnd_id,dmn_srt,dmn_cnt,lon_bnd,crd_typ); } /* !flg_grd_2D */ /* Obtain units, if any, of input area */ if(area_id != NC_MIN_INT){ rcd=nco_inq_att_flg(in_id,area_id,unt_sng,&att_typ,&att_sz); if(rcd == NC_NOERR && att_typ == NC_CHAR){ att_val=(char *)nco_malloc((att_sz+1L)*nco_typ_lng(att_typ)); rcd+=nco_get_att(in_id,area_id,unt_sng,att_val,att_typ); /* NUL-terminate attribute before using strstr() */ att_val[att_sz]='\0'; if(!strcasestr(att_val,"radian")) flg_area_sr=False; if(att_val) area_unt=(char *)strdup(att_val); if(att_val) att_val=(char *)nco_free(att_val); } /* end rcd && att_typ */ } /* !area_id */ /* Additional information that may be required for any input grid */ if(area_id != NC_MIN_INT) has_mss_val_area=nco_mss_val_get_dbl(in_id,area_id,&mss_val_area_dbl); if(msk_id != NC_MIN_INT) has_mss_val_msk=nco_mss_val_get_dbl(in_id,msk_id,&mss_val_msk_dbl); /* 20160115: AMSR coordinates are packed as NC_SHORT with scale_value=0.01f. What to do? Is it worth unpacking everything? */ int flg_pck; /* [flg] Variable is packed on disk */ rcd=nco_inq_var_packing(in_id,lat_ctr_id,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports lat_ctr variable \"%s\" is packed so results unpredictable. HINT: If grid-generation causes problems, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,lat_nm_in); rcd=nco_inq_var_packing(in_id,lon_ctr_id,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports lon_ctr variable \"%s\" is packed so results unpredictable. HINT: If grid-generation causes problems, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,lon_nm_in); /* Close input netCDF file */ nco_close(in_id); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in); /* Above this line, fl_in and in_id refer to input file to be regridded Below this line, fl_out and out_id refer to grid-file to be output */ dfl_lvl=rgr->dfl_lvl; fl_out=rgr->fl_grd; fl_out_fmt=rgr->fl_out_fmt; if(!fl_out){ (void)fprintf(stdout,"%s: ERROR %s filename for inferred SCRIP grid-file is uninitialized, supply it with \"ncks --rgr grid=filename.nc\" or \"ncremap -R '--rgr grid=filename.nc'\"\n",nco_prg_nm_get(),fnc_nm); (void)fprintf(stdout,"%s: HINT ncremap supplies an automatically generated default name for any output SCRIP grid-file. Users of the standalone regridder (ncks) must explicitly specify a name for the inferred SCRIP grid-file.\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* !fl_out */ /* Define output variable values */ int lon_psn; /* [idx] Ordinal position of longitude dimension in rectangular grid dimension-size array */ int lat_psn; /* [idx] Ordinal position of latitude dimension in rectangular grid dimension-size array */ if(grd_rnk_nbr == dmn_nbr_1D){ dmn_sz_int[0]=col_nbr; }else if(grd_rnk_nbr == dmn_nbr_2D){ /* !dmn_nbr_1D */ /* SCRIP introduced [lon,lat] convention because more natural for Fortran NB: This [lon,lat] convention applies ONLY to grid_dims variable Write all other SCRIP variables as [lat,lon] Nonsensical? Yes, but backwards compatibility is priceless */ lon_psn=0; lat_psn=1; dmn_sz_int[lon_psn]=lon_nbr; dmn_sz_int[lat_psn]=lat_nbr; } /* !dmn_nbr_2D */ if(flg_grd_crv){ /* For curvilinear grids first, if necessary, infer corner boundaries Then perform sanity check using same code on inferred and copied grids */ if(False && has_mss_val_bnd && grd_crn_nbr == 4 && !strcmp(lat_bnd_nm,"latt_bounds") && !strcmp(lon_bnd_nm,"lont_bounds") && lat_bnd_id != NC_MIN_INT && lon_bnd_id != NC_MIN_INT){ /* Only CESM CICE is known to fit these constraints Cell center locations are (misleadingly) reported in a regular, rectangular, regional grid Cell corners/boundaries are regular only in SH, curvilinear in NH, i.e., displaced or tripole grid Grid is from southernmost Antarctic Ocean latitude and longitude near 79S,320E to North Pole Nominal centers do not agree with true centers computed from corners CICE may run in decomposed/unstructured mode, each column writes separately to output buffer? This could explain missing coordinates in non-ocean gridcells However, land points are completely masked (grid centers and corners are missing) Oversight? Why not write coordinates for land-masked cells? Regridder needs corners so we fill-in missing boundaries with derived grid Gave up on inferring 20170521 once tri-pole grid complexity became apparent */ const long idx_dbg=rgr->idx_dbg; double lat_ctr_drv; /* [dgr] Latitude center, derived */ double lon_ctr_drv; /* [dgr] Longitude center, derived */ double lat_crn_drv; /* [dgr] Latitude corner, derived */ double lon_crn_drv; /* [dgr] Longitude corner, derived */ long idx_ctr_sth; /* [idx] Index of southern neighbor */ long idx_ctr_nrt; /* [idx] Index of northern neighbor */ long idx_crn_sth; /* [idx] Index of southern neighbor */ long idx_crn_nrt; /* [idx] Index of northern neighbor */ long lon_idx_crr; /* [idx] Current longitude index */ long lon_vld_frs; /* [idx] First valid longitude in latitude row */ long *lon_vld_prv=NULL; /* [idx] Previous valid longitude in latitude row */ long *lon_vld_nxt=NULL; /* [idx] Next valid longitude in latitude row */ lon_vld_prv=(long *)nco_malloc(lon_nbr*sizeof(long)); lon_vld_nxt=(long *)nco_malloc(lon_nbr*sizeof(long)); /* First valid gridcell sets west and south bounds of entire grid */ for(idx_ctr=0;idx_ctr<grd_sz_nbr;idx_ctr++){ if(lat_ctr[idx_ctr] != mss_val_ctr_dbl) break; } /* !grd_sz_nbr */ assert(idx_ctr != grd_sz_nbr); idx_crn=idx_ctr*grd_crn_nbr; lat_sth=lat_crn[idx_crn]; lat_ncr=lat_crn[idx_crn+3]-lat_crn[idx_crn]; /* ul-ll */ lon_wst=lon_crn[idx_crn]; lon_ncr=lon_crn[idx_crn+1]-lon_crn[idx_crn]; /* lr-ll */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO %s will assume grid is regional CICE in curvilinear format with masked land. Will diagnose missing cell boundaries and centers from present boundaries and centers in grid of size lat_nbr=%ld, lon_nbr=%ld.\n",nco_prg_nm_get(),fnc_nm,lat_nbr,lon_nbr); for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ idx_ctr=lat_idx*lon_nbr; /* Find first valid longitude at this latitude */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++) if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break; lon_vld_frs=lon_idx; /* 20170519: Verified all tri-pole grid latitudes have at least one valid point */ if(lon_vld_frs == -1L) abort(); for(lon_idx_crr=0;lon_idx_crr<lon_nbr;lon_idx++){ /* Find previous and next valid longitude for all longitudes at this latitude Cells can be their own previous/next valid longitude */ lon_vld_prv[lon_idx_crr]=-1L; lon_vld_nxt[lon_idx_crr]=-1L; /* Start from current longitude and move left (west)... */ for(lon_idx=lon_idx_crr;lon_idx>=0;lon_idx--) if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break; if(lon_idx >= 0) lon_vld_prv[lon_idx_crr]=lon_idx; /* Start from current longitude and move right (east)... */ for(lon_idx=lon_idx_crr;lon_idx<lon_nbr;lon_idx++) if(lat_ctr[idx_ctr+lon_idx] != mss_val_ctr_dbl) break; if(lon_idx < lon_nbr) lon_vld_nxt[lon_idx_crr]=lon_idx; /* Wrap west if previous valid cell not found */ lon_vld_prv[lon_idx_crr]=lon_vld_prv[lon_nbr-1L]; /* Wrap east if next valid cell not found */ lon_vld_nxt[lon_idx_crr]=lon_vld_nxt[0]; } /* !lon_idx_crr */ /* Derive centers and corners for each missing point */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx_ctr=lat_idx*lon_nbr+lon_idx; idx_crn=idx_ctr*grd_crn_nbr; if(lat_ctr[idx_ctr] != mss_val_ctr_dbl){ lat_sth=lat_crn[idx_crn]; lat_ncr=lat_crn[idx_crn+3]-lat_crn[idx_crn]; /* ul-ll */ lat_ctr_drv=lat_sth+0.5*lat_ncr; lat_crn_drv=lat_sth; lon_wst=lon_crn[idx_crn]; lon_ncr=lon_crn[idx_crn+1]-lon_crn[idx_crn]; /* lr-ll */ lon_ctr_drv=lon_wst+lon_ncr*(lon_idx+0.5); if(nco_dbg_lvl_get() >= nco_dbg_std && idx_ctr == idx_dbg) (void)fprintf(stdout,"%s: DEBUG %s idx=%ld lat_idx=%ld, lon_idx=%ld, lat_sth=%g, lat_ncr=%g, lon_wst=%g, lon_ncr=%g\n",nco_prg_nm_get(),fnc_nm,idx_ctr,lat_idx,lon_idx,lat_sth,lat_ncr,lon_wst,lon_ncr); } /* !idx_ctr */ if(lat_ctr[idx_ctr] == mss_val_ctr_dbl){ if(lat_idx != 0L){ /* Not bottom row */ idx_ctr_sth=idx_ctr-lon_nbr; if(lat_ctr[idx_ctr_sth] != mss_val_ctr_dbl){ /* Copy southern corners from northern corners of southern neighbor */ idx_crn_sth=idx_ctr_sth*grd_crn_nbr; lat_crn[idx_crn+0L]=lat_crn[idx_crn_sth+3L]; lat_crn[idx_crn+1L]=lat_crn[idx_crn_sth+2L]; lon_crn[idx_crn+0L]=lon_crn[idx_crn_sth+3L]; lon_crn[idx_crn+1L]=lon_crn[idx_crn_sth+2L]; } /* !mss_val */ } /* !lat_idx */ if(lat_idx != lat_nbr-1L){ /* Not top row */ idx_ctr_nrt=idx_ctr+lon_nbr; if(lat_ctr[idx_ctr_nrt] != mss_val_ctr_dbl){ /* Copy northern corners from southern corners of northern neighbor */ idx_crn_nrt=idx_ctr_nrt*grd_crn_nbr; lat_crn[idx_crn+2L]=lat_crn[idx_crn_nrt+1L]; lat_crn[idx_crn+3L]=lat_crn[idx_crn_nrt+0L]; lon_crn[idx_crn+2L]=lon_crn[idx_crn_nrt+1L]; lon_crn[idx_crn+3L]=lon_crn[idx_crn_nrt+0L]; } /* !mss_val */ } /* !lat_idx */ /* Got to here before giving up Idea was to interpolate missing cell corners between previous and next valid cell */ /* Algorithm assumes lon_wst never changes (too simple for displaced/tri_pole) */ lon_ctr_drv=lon_wst+lon_ncr*(lon_idx+0.5); lon_crn_drv=lon_wst+lon_ncr*lon_idx; if(lon_ctr_drv >= 360.0) lon_ctr_drv-=360.0; lat_ctr[idx_ctr]=lat_ctr_drv; lon_ctr[idx_ctr]=lon_ctr_drv; lat_crn[idx_crn+0L]=lat_crn[idx_crn+1L]=lat_crn_drv; lat_crn[idx_crn+2L]=lat_crn[idx_crn+3L]=lat_crn_drv+lat_ncr; lon_crn[idx_crn+0L]=lon_crn[idx_crn+3L]=lon_crn_drv; lon_crn[idx_crn+1L]=lon_crn[idx_crn+2L]=lon_crn_drv+lon_ncr; /* Branch-cut rule */ if(lon_crn_drv+lon_ncr >= 360.0){ lon_crn[idx_crn+0L]=lon_crn[idx_crn+3L]=lon_crn_drv-360.0; lon_crn[idx_crn+1L]=lon_crn[idx_crn+2L]=lon_crn_drv+lon_ncr-360.0; } /* !brnch */ } /* !mss_val */ } /* !lon_idx */ } /* !lat_idx */ if(lon_vld_nxt) lon_vld_nxt=(long *)nco_free(lon_vld_nxt); if(lon_vld_prv) lon_vld_prv=(long *)nco_free(lon_vld_prv); } /* !False || !CICE */ if(lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT){ /* Interfaces (ntf) and boundaries (bnd) for curvilinear grids are ill-defined since sides need not follow latitudes nor meridians Simplest representation that contains equivalent information to interfaces/boundaries is grid corners array Diagnose grid corners from midpoints Most curvilinear data (e.g., WRF) is dimensioned lat x lon unlike SCRIP which uses lon x lat Hence we keep lat_ctr, lon_ctr, lat_crn, lon_crn with same order (likely lat x lon) as data file from which we infer grid Always use input order to write skeleton file Change that order, if necessary, to write SCRIP grid file In the interior of a curvilinear grid, nine points contribute to the four corners of a quadrilateral surrounding each center point These are the three points above the point, the three points at the same latitude, and the three points beneath the point In other words, a nine-point stencil is required to define the four corners inferred around each gridcell center It is cleanest to use this stencil only once for all cells in the "real"-grid, including those on the edges, not the interior For this to work cleanly we define an enlarged "fake"-grid where we pre-copy the values that lead to the desired extrapolation on "real"-grid edges Inspired by array-based solutions to integration of PDEs on meshes in Juri Toomre's class NB: implementation is not robust to missing value points in interior of grid. Hopefully grids have no missing values in coordinate variables, although they may have missing values in non-grid fields (e.g., mask, temperature) */ double *lat_ctr_fk; /* [dgr] Latitude grid with extrapolated boundaries necessary for 9-point template to find four grid corners for each real center */ double *lon_ctr_fk; /* [dgr] Longitude grid with extrapolated boundaries necessary for 9-point template to find four grid corners for each real center */ lat_ctr_fk=(double *)nco_malloc((lat_nbr+2)*(lon_nbr+2)*sizeof(double)); lon_ctr_fk=(double *)nco_malloc((lat_nbr+2)*(lon_nbr+2)*sizeof(double)); long int idx_rl; /* [idx] Index into real unrolled array */ long int idx_fk; /* [idx] Index into fake unrolled array */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ /* lat idx on real grid */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ /* lon idx on real grid */ idx_rl=lat_idx*lon_nbr+lon_idx; idx_fk=(lat_idx+1)*(lon_nbr+2)+lon_idx+1; /* Copy real grid to interior of fake grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]; lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]; } /* !lon */ } /* !lat */ /* Formulae to extrapolate sides and corners of fake grid are written as a starting lat/lon plus or minus adjustment Adjustment is positive-definite if grid monotonically increases in latitude and longitude from LL to UR 20160111: Use macros/functions to determine longitude adjustments that are always less than 180 This ensures all longitudes contributing to extrapolated longitude are from same branch cut */ /* Bottom row */ lat_idx=0; /* lat idx of extrapolated point on fake grid */ for(lon_idx=1;lon_idx<lon_nbr+1;lon_idx++){ /* lon idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on bottom row of fake grid */ idx_rl=lat_idx*lon_nbr+lon_idx-1; /* 1D-offset of neighboring point on bottom row of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]-(lat_ctr[idx_rl+lon_nbr]-lat_ctr[idx_rl]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]-nco_lon_dff_brnch_dgr(lon_ctr[idx_rl+lon_nbr],lon_ctr[idx_rl]); } /* !lon */ /* Top row */ lat_idx=lat_nbr+1; /* lat idx of extrapolated point on fake grid */ for(lon_idx=1;lon_idx<lon_nbr+1;lon_idx++){ /* lon idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on top row of fake grid */ idx_rl=(lat_nbr-1)*lon_nbr+lon_idx-1; /* 1D-offset of neighboring point on top row of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]+(lat_ctr[idx_rl]-lat_ctr[idx_rl-lon_nbr]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]+nco_lon_dff_brnch_dgr(lon_ctr[idx_rl],lon_ctr[idx_rl-lon_nbr]); } /* !lon */ /* Left side */ lon_idx=0; /* lon idx of extrapolated point on fake grid */ for(lat_idx=1;lat_idx<lat_nbr+1;lat_idx++){ /* lat idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on left side of fake grid */ idx_rl=(lat_idx-1)*lon_nbr+lon_idx; /* 1D-offset of neighboring point on left side of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]-(lat_ctr[idx_rl+1]-lat_ctr[idx_rl]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]-nco_lon_dff_brnch_dgr(lon_ctr[idx_rl+1],lon_ctr[idx_rl]); } /* !lat */ /* Right side */ lon_idx=lon_nbr+1; /* lon idx of extrapolated point on fake grid */ for(lat_idx=1;lat_idx<lat_nbr+1;lat_idx++){ /* lat idx of extrapolated point on fake grid */ idx_fk=lat_idx*(lon_nbr+2)+lon_idx; /* 1D-offset of extrapolated point on right side of fake grid */ idx_rl=(lat_idx-1)*lon_nbr+lon_idx-2; /* 1D-offset of neighboring point on right side of real grid */ lat_ctr_fk[idx_fk]=lat_ctr[idx_rl]+(lat_ctr[idx_rl]-lat_ctr[idx_rl-1]); lon_ctr_fk[idx_fk]=lon_ctr[idx_rl]+nco_lon_dff_brnch_dgr(lon_ctr[idx_rl],lon_ctr[idx_rl-1]); } /* !lat */ /* LL */ lat_ctr_fk[0]=lat_ctr_fk[lon_nbr+2]-(lat_ctr_fk[2*(lon_nbr+2)]-lat_ctr_fk[lon_nbr+2]); lon_ctr_fk[0]=lon_ctr_fk[1]-nco_lon_dff_brnch_dgr(lon_ctr_fk[2],lon_ctr_fk[1]); /* LR */ lat_ctr_fk[lon_nbr+1]=lat_ctr_fk[2*(lon_nbr+2)-1]-(lat_ctr_fk[3*(lon_nbr+2)-1]-lat_ctr_fk[2*(lon_nbr+2)-1]); lon_ctr_fk[lon_nbr+1]=lon_ctr_fk[lon_nbr]+nco_lon_dff_brnch_dgr(lon_ctr_fk[lon_nbr],lon_ctr_fk[lon_nbr-1]); /* UR */ lat_ctr_fk[(lat_nbr+2)*(lon_nbr+2)-1]=lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-1]+(lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-1]-lat_ctr_fk[lat_nbr*(lon_nbr+2)-1]); lon_ctr_fk[(lat_nbr+2)*(lon_nbr+2)-1]=lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-2]+nco_lon_dff_brnch_dgr(lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-2],lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)-3]); /* UL */ lat_ctr_fk[(lat_nbr+1)*(lon_nbr+2)]=lat_ctr_fk[lat_nbr*(lon_nbr+2)]+(lat_ctr_fk[lat_nbr*(lon_nbr+2)]-lat_ctr_fk[(lat_nbr-1)*(lon_nbr+2)]); lon_ctr_fk[(lat_nbr+1)*(lon_nbr+2)]=lon_ctr_fk[lat_nbr*(lon_nbr+2)+1]-nco_lon_dff_brnch_dgr(lon_ctr_fk[lat_nbr*(lon_nbr+2)+2],lon_ctr_fk[lat_nbr*(lon_nbr+2)+1]); if(nco_dbg_lvl_get() >= nco_dbg_std){ long idx_dbg; idx_dbg=rgr->idx_dbg; (void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Fake Center [lat,lon]=[%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,lat_ctr_fk[idx_dbg],lon_ctr_fk[idx_dbg]); } /* !dbg */ long int lat_idx_fk; /* [idx] Index into fake (extrapolated) latitude array */ long int lon_idx_fk; /* [idx] Index into fake (extrapolated) longitude array */ long int idx_fk_crn_ll_ctr_ll; long int idx_fk_crn_ll_ctr_lr; long int idx_fk_crn_ll_ctr_ur; long int idx_fk_crn_ll_ctr_ul; long int idx_fk_crn_lr_ctr_ll; long int idx_fk_crn_lr_ctr_lr; long int idx_fk_crn_lr_ctr_ur; long int idx_fk_crn_lr_ctr_ul; long int idx_fk_crn_ur_ctr_ll; long int idx_fk_crn_ur_ctr_lr; long int idx_fk_crn_ur_ctr_ur; long int idx_fk_crn_ur_ctr_ul; long int idx_fk_crn_ul_ctr_ll; long int idx_fk_crn_ul_ctr_lr; long int idx_fk_crn_ul_ctr_ur; long int idx_fk_crn_ul_ctr_ul; double *crn_lat; double *crn_lon; crn_lat=(double *)nco_malloc(grd_crn_nbr*sizeof(double)); crn_lon=(double *)nco_malloc(grd_crn_nbr*sizeof(double)); size_t wrn_nbr_max=20; size_t wrn_nbr=0; for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ /* 9-point template valid at all interior (non-edge) points in real grid, and at all points (including edges) in fake grid Read variables idx_crn_ll_ctr_ul as "index of upper left gridcell center that contributes to lower-left gridcell corner" Algorithms execute in counter-clockwise (CCW) direction: lower-left, lower-right, upper-right, upper-left lat_idx and lon_idx are true indices and are used to write into grd_crn_lat/lon arrays lat_idx_fk and lon_idx_fk are indices into fake arrays with extrapolated boundaries and are used to read data from fake arrays */ lon_idx_fk=lon_idx+1; lat_idx_fk=lat_idx+1; idx_rl=lat_idx*lon_nbr+lon_idx; idx_fk=lat_idx_fk*(lon_nbr+2)+lon_idx_fk; /* Determine index into fake array (valid everywhere it is applied) Comments after each equation are formula for real index (valid only at interior gridcells) */ idx_fk_crn_ll_ctr_ll=idx_fk-(lon_nbr+2)-1; // (lat_idx-1)*lon_nbr+lon_idx-1 idx_fk_crn_ll_ctr_lr=idx_fk-(lon_nbr+2); // (lat_idx-1)*lon_nbr+lon_idx idx_fk_crn_ll_ctr_ur=idx_fk; // lat_idx*lon_nbr+lon_idx idx_fk_crn_ll_ctr_ul=idx_fk-1; // lat_idx*lon_nbr+lon_idx-1; idx_fk_crn_lr_ctr_ll=idx_fk-(lon_nbr+2); // (lat_idx-1)*lon_nbr+lon_idx idx_fk_crn_lr_ctr_lr=idx_fk-(lon_nbr+2)+1; // (lat_idx-1)*lon_nbr+lon_idx+1 idx_fk_crn_lr_ctr_ur=idx_fk+1; // lat_idx*lon_nbr+lon_idx+1 idx_fk_crn_lr_ctr_ul=idx_fk; // lat_idx*lon_nbr+lon_idx; idx_fk_crn_ur_ctr_ll=idx_fk; // lat_idx*lon_nbr+lon_idx idx_fk_crn_ur_ctr_lr=idx_fk+1; // lat_idx*lon_nbr+lon_idx+1 idx_fk_crn_ur_ctr_ur=idx_fk+(lon_nbr+2)+1; // (lat_idx+1)*lon_nbr+lon_idx+1 idx_fk_crn_ur_ctr_ul=idx_fk+(lon_nbr+2); // (lat_idx+1)*lon_nbr+lon_idx; idx_fk_crn_ul_ctr_ll=idx_fk-1; // lat_idx*lon_nbr+lon_idx-1 idx_fk_crn_ul_ctr_lr=idx_fk; // lat_idx*lon_nbr+lon_idx idx_fk_crn_ul_ctr_ur=idx_fk+(lon_nbr+2); // (lat_idx+1)*lon_nbr+lon_idx idx_fk_crn_ul_ctr_ul=idx_fk+(lon_nbr+2)-1; // (lat_idx+1)*lon_nbr+lon_idx-1; /* 20160111: Algorithm requires that all longitudes in template be on same "branch cut" If, say, LL longitude is 179.0 and LR longitude is -179.0 then their sum and average are zero, not 180.0 or -180.0 as desired Routines labeled "*_brnch" in the following ensure that branch-cut rules are followed */ idx_crn_ll=grd_crn_nbr*idx_rl+0; lat_crn[idx_crn_ll]=0.25*(lat_ctr_fk[idx_fk_crn_ll_ctr_ll]+lat_ctr_fk[idx_fk_crn_ll_ctr_lr]+lat_ctr_fk[idx_fk_crn_ll_ctr_ur]+lat_ctr_fk[idx_fk_crn_ll_ctr_ul]); lon_crn[idx_crn_ll]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ll_ctr_ll],lon_ctr_fk[idx_fk_crn_ll_ctr_lr],lon_ctr_fk[idx_fk_crn_ll_ctr_ur],lon_ctr_fk[idx_fk_crn_ll_ctr_ul]); idx_crn_lr=grd_crn_nbr*idx_rl+1; lat_crn[idx_crn_lr]=0.25*(lat_ctr_fk[idx_fk_crn_lr_ctr_ll]+lat_ctr_fk[idx_fk_crn_lr_ctr_lr]+lat_ctr_fk[idx_fk_crn_lr_ctr_ur]+lat_ctr_fk[idx_fk_crn_lr_ctr_ul]); lon_crn[idx_crn_lr]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_lr_ctr_ll],lon_ctr_fk[idx_fk_crn_lr_ctr_lr],lon_ctr_fk[idx_fk_crn_lr_ctr_ur],lon_ctr_fk[idx_fk_crn_lr_ctr_ul]); idx_crn_ur=grd_crn_nbr*idx_rl+2; lat_crn[idx_crn_ur]=0.25*(lat_ctr_fk[idx_fk_crn_ur_ctr_ll]+lat_ctr_fk[idx_fk_crn_ur_ctr_lr]+lat_ctr_fk[idx_fk_crn_ur_ctr_ur]+lat_ctr_fk[idx_fk_crn_ur_ctr_ul]); lon_crn[idx_crn_ur]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ur_ctr_ll],lon_ctr_fk[idx_fk_crn_ur_ctr_lr],lon_ctr_fk[idx_fk_crn_ur_ctr_ur],lon_ctr_fk[idx_fk_crn_ur_ctr_ul]); idx_crn_ul=grd_crn_nbr*idx_rl+3; lat_crn[idx_crn_ul]=0.25*(lat_ctr_fk[idx_fk_crn_ul_ctr_ll]+lat_ctr_fk[idx_fk_crn_ul_ctr_lr]+lat_ctr_fk[idx_fk_crn_ul_ctr_ur]+lat_ctr_fk[idx_fk_crn_ul_ctr_ul]); lon_crn[idx_crn_ul]=nco_lon_crn_avg_brnch(lon_ctr_fk[idx_fk_crn_ul_ctr_ll],lon_ctr_fk[idx_fk_crn_ul_ctr_lr],lon_ctr_fk[idx_fk_crn_ul_ctr_ur],lon_ctr_fk[idx_fk_crn_ul_ctr_ul]); crn_lat[0]=lat_crn[idx_crn_ll]; crn_lat[1]=lat_crn[idx_crn_lr]; crn_lat[2]=lat_crn[idx_crn_ur]; crn_lat[3]=lat_crn[idx_crn_ul]; crn_lon[0]=lon_crn[idx_crn_ll]; crn_lon[1]=lon_crn[idx_crn_lr]; crn_lon[2]=lon_crn[idx_crn_ur]; crn_lon[3]=lon_crn[idx_crn_ul]; /* 20210411: From 2016 until today, nco_ccw_chk() overwrote fourth (UL) with first (LL) corner */ flg_ccw=nco_ccw_chk(crn_lat,crn_lon,grd_crn_nbr,idx_ccw,rcr_lvl); if(!flg_ccw && wrn_nbr < wrn_nbr_max){ (void)fprintf(stdout,"%s: %s WARNING reports non-CCW gridcell at idx=%li, (lat,lon)_idx=(%li,%li), (lat,lon) = (%g, %g)\n",nco_prg_nm_get(),fnc_nm,idx_rl,lat_idx,lon_idx,lat_ctr[lat_idx],lon_ctr[lon_idx]); wrn_nbr++; if(wrn_nbr == wrn_nbr_max) (void)fprintf(stdout,"%s: %s INFO Number of non-CCW errors reached maximum = %li, not printing anymore\n",nco_prg_nm_get(),fnc_nm,wrn_nbr_max); } /* endif */ lat_crn[idx_crn_ll]=crn_lat[0]; lat_crn[idx_crn_lr]=crn_lat[1]; lat_crn[idx_crn_ur]=crn_lat[2]; lat_crn[idx_crn_ul]=crn_lat[3]; lon_crn[idx_crn_ll]=crn_lon[0]; lon_crn[idx_crn_lr]=crn_lon[1]; lon_crn[idx_crn_ur]=crn_lon[2]; lon_crn[idx_crn_ul]=crn_lon[3]; } /* !lon */ } /* !lat */ if(lat_ctr_fk) lat_ctr_fk=(double *)nco_free(lat_ctr_fk); if(lon_ctr_fk) lon_ctr_fk=(double *)nco_free(lon_ctr_fk); if(crn_lon) crn_lon=(double *)nco_free(crn_lon); if(crn_lat) crn_lat=(double *)nco_free(crn_lat); } /* !(lat_bnd_id && lon_bnd_id) */ } /* !flg_grd_crv */ if(flg_1D_psd_rct_bnd){ double lon_brnch_min; double lon_brnch_max; double lon_dff; assert(grd_crn_nbr == 4); /* Make boundaries that were provided as pseudo-rectangular branch-cut-compliant */ for(col_idx=0;col_idx<col_nbr;col_idx++){ lon_brnch_min=(lon_bnd[2*col_idx] <= lon_bnd[2*col_idx+1]) ? lon_bnd[2*col_idx] : lon_bnd[2*col_idx+1]; lon_brnch_max=(lon_bnd[2*col_idx] >= lon_bnd[2*col_idx+1]) ? lon_bnd[2*col_idx] : lon_bnd[2*col_idx+1]; lon_dff=lon_brnch_max-lon_brnch_min; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports 1D pseudo-rectangular bounds branch-cut straddle at col_idx=%ld lon_brnch_max, lon_brnch_min, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,col_idx,lon_brnch_max,lon_brnch_min,lon_dff); lon_brnch_max-=360.0; }else if(lon_dff <= -180.0){ lon_brnch_max+=360.0; } /* !lon_dff */ /* Extra condition to convert CW bounds to CCW bounds (necessary for OCO2) */ if(lon_brnch_min <= lon_brnch_max){ lon_bnd[2*col_idx]=lon_brnch_min; lon_bnd[2*col_idx+1]=lon_brnch_max; }else{ lon_bnd[2*col_idx]=lon_brnch_max; lon_bnd[2*col_idx+1]=lon_brnch_min; } /* end else */ } /* !col_idx */ /* Convert boundaries that were provided as pseudo-rectangular to corners */ for(col_idx=0;col_idx<col_nbr;col_idx++){ idx=grd_crn_nbr*col_idx; /* fxm: OCO2 provides boundaries in CW not CCW orientation */ lon_crn[idx]=lon_bnd[2*col_idx]; /* LL */ lon_crn[idx+1]=lon_bnd[2*col_idx+1]; /* LR */ lon_crn[idx+2]=lon_bnd[2*col_idx+1]; /* UR */ lon_crn[idx+3]=lon_bnd[2*col_idx]; /* UL */ lat_crn[idx]=lat_bnd[2*col_idx]; /* LL */ lat_crn[idx+1]=lat_bnd[2*col_idx]; /* LR */ lat_crn[idx+2]=lat_bnd[2*col_idx+1]; /* UR */ lat_crn[idx+3]=lat_bnd[2*col_idx+1]; /* UL */ /* fxm: OCO2 provides boundaries in CW not CCW orientation */ } /* !col_idx */ } /* flg_1D_psd_rct_bnd */ if(flg_grd_crv || flg_1D_psd_rct_bnd){ /* As of 20160308, use same sanity check for 1D pseudo-rectangular grids as for curvilinear grids Pseudo-rectangular grids rely on user-produced boundaries that may be psychotic (CW, non-branch-cut) Starting 20151205, use same sanity check for both inferred and copied curvilinear grids 20151129: Curvilinear extrapolation technique above yields corners outside [-90.0,90.0], [-180.0,360.0] Also, it may assume input is ascending swath and fail for descending swaths Complications not fully addressed: Swaths may (verify this) turn from ascending to descending, or visa-versa, when satellite crosses latitude extrema Swaths may cross the date-line (and back!) */ /* Determine numeric bounds of input coordinate system */ double lon_min_min; double lon_max_max; nco_bool NCO_LON_0_TO_360=True; if(has_mss_val_ctr){ for(idx=0;idx<grd_sz_nbr;idx++) if(lon_ctr[idx] != mss_val_ctr_dbl && lon_ctr[idx] < 0.0) break; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(lon_ctr[idx] < 0.0) break; } /* !has_mss_val_ctr */ if(idx != grd_sz_nbr) NCO_LON_0_TO_360=False; if(NCO_LON_0_TO_360){ lon_min_min=0.0; lon_max_max=360.0; }else{ lon_min_min=-180.0; lon_max_max=180.0; } /* !NCO_LON_0_TO_360 */ /* Correct for extrapolation outside boundaries */ for(idx=0;idx<grd_sz_nbr*grd_crn_nbr;idx++){ idx_ctr=idx/grd_crn_nbr; if(has_mss_val_ctr) if(lat_ctr[idx_ctr] == mss_val_ctr_dbl) continue; if(lat_crn[idx] < -90.0 || lat_crn[idx] > 90.0 || lon_crn[idx] < lon_min_min || lon_crn[idx] > lon_max_max){ idx_crn_ll=grd_crn_nbr*idx_ctr+0; idx_crn_lr=grd_crn_nbr*idx_ctr+1; idx_crn_ur=grd_crn_nbr*idx_ctr+2; idx_crn_ul=grd_crn_nbr*idx_ctr+3; if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stderr,"%s: INFO %s reports %s corner outside canonical bounds at idx = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,(lat_bnd_id == NC_MIN_INT) ? "inferred" : "copied",idx_ctr,lat_ctr[idx_ctr],lon_ctr[idx_ctr],lat_crn[idx_crn_ll],lon_crn[idx_crn_ll],lat_crn[idx_crn_lr],lon_crn[idx_crn_lr],lat_crn[idx_crn_ur],lon_crn[idx_crn_ur],lat_crn[idx_crn_ul],lon_crn[idx_crn_ul]); /* Restrict grid to real latitudes and to the 360-degree range detected from input cell-centers */ if(lat_crn[idx] < -90.0) lat_crn[idx]=-90.0; if(lat_crn[idx] > 90.0) lat_crn[idx]=90.0; if(lon_crn[idx] < lon_min_min) lon_crn[idx]+=360.0; if(lon_crn[idx] > lon_max_max) lon_crn[idx]-=360.0; } /* !sanity */ } /* !idx */ /* Vertices (for valid points) are now within 360 degrees (either [0,360] or [-180,180]) implied by input coordinate system Curvilinear inferred grid are, by construction, branch-cut compliant fxm: Curvilinear and 1D pseudo-rectangular grids prescribed by (i.e., read-in from) input may not be branch-cut compliant */ if(nco_dbg_lvl_get() >= nco_dbg_std){ long idx_dbg; idx_dbg=rgr->idx_dbg; idx_crn_ll=grd_crn_nbr*idx_dbg+0; idx_crn_lr=grd_crn_nbr*idx_dbg+1; idx_crn_ur=grd_crn_nbr*idx_dbg+2; idx_crn_ul=grd_crn_nbr*idx_dbg+3; (void)fprintf(stderr,"%s: INFO %s idx_dbg = %li, Center [lat,lon]=[%g,%g]; Corners LL [%g,%g] LR [%g,%g] UR [%g,%g] UL [%g,%g]\n",nco_prg_nm_get(),fnc_nm,idx_dbg,lat_ctr[idx_dbg],lon_ctr[idx_dbg],lat_crn[idx_crn_ll],lon_crn[idx_crn_ll],lat_crn[idx_crn_lr],lon_crn[idx_crn_lr],lat_crn[idx_crn_ur],lon_crn[idx_crn_ur],lat_crn[idx_crn_ul],lon_crn[idx_crn_ul]); } /* !dbg */ } /* !flg_grd_crv || flg_1D_psd_rct_bnd */ if(flg_grd_crv){ /* Copy centers into empty output array */ for(idx=0;idx<grd_sz_nbr;idx++){ grd_ctr_lat[idx]=lat_ctr[idx]; grd_ctr_lon[idx]=lon_ctr[idx]; } /* !idx */ /* Copy inferred or copied (from input) sanity-checked corners into empty output array */ for(idx=0;idx<grd_sz_nbr*grd_crn_nbr;idx++){ grd_crn_lat[idx]=lat_crn[idx]; grd_crn_lon[idx]=lon_crn[idx]; } /* !idx */ } /* !flg_grd_crv */ /* 20150512 Many 2D datasets have bad bounds Primary example is Gaussian grids archived by CESM models that use midpoint rule rather than iterate to compute interfaces from quadrature points Such files have correct gw arrays and incorrect cell bounds flg_dgn_bnd allows nco_grd_nfr() to override faulty boundaries in file with correct bounds */ const nco_bool flg_dgn_bnd=rgr->flg_dgn_bnd; /* [flg] Diagnose rather than copy inferred bounds */ const long lat_nbr_hlf=lat_nbr/2L; // [nbr] Half number of latitudes (e.g., lat_nbr_hlf=32 for lat_nbr=64 and 65) if(flg_grd_2D){ if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){ if(nco_dbg_lvl_get() >= nco_dbg_std && flg_dgn_bnd) (void)fprintf(stdout,"%s: INFO %s will diagnose cell boundaries from cell centers...\n",nco_prg_nm_get(),fnc_nm); /* Derive interfaces (ntf) and bounds (bnd) from midpoints approximation applied to center data NB: Simplistically derived interfaces (ntf) only valid on some rectangular grids (not on Gaussian grids) These inferred-from-midpoint interfaces/bounds are overwritten in next block once lat grid is known */ if(flg_s2n) lat_ntf[0L]=lat_ctr[0L]-0.5*(lat_ctr[1L]-lat_ctr[0L]); else lat_ntf[0L]=lat_ctr[0L]+0.5*(lat_ctr[0L]-lat_ctr[1L]); if(lat_ntf[0L] < -90.0) lat_ntf[0L]=-90.0; /* NB: lat_ntf[0] can be same as lat_ctr[0] for cap grid */ if(lat_ntf[0L] > 90.0) lat_ntf[0L]=90.0; for(lat_idx=0L;lat_idx<lat_nbr-1L;lat_idx++) lat_ntf[lat_idx+1L]=0.5*(lat_ctr[lat_idx]+lat_ctr[lat_idx+1L]); if(flg_s2n) lat_ntf[lat_nbr]=lat_ctr[lat_nbr-1L]+0.5*(lat_ctr[lat_nbr-1L]-lat_ctr[lat_nbr-2L]); else lat_ntf[lat_nbr]=lat_ctr[lat_nbr-1L]-0.5*(lat_ctr[lat_nbr-2L]-lat_ctr[lat_nbr-1L]); if(lat_ntf[lat_nbr] > 90.0) lat_ntf[lat_nbr]=90.0; /* NB: lat_ntf[lat_nbr] can be same as lat_ctr[lat_nbr-1] for cap grid */ if(lat_ntf[lat_nbr] < -90.0) lat_ntf[lat_nbr]=-90.0; /* NB: lat_ntf[lat_nbr] can be same as lat_ctr[lat_nbr-1] for cap grid */ if(flg_s2n) lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); /* fabs() ensures positive-definite span for N->S grids */ lon_ntf[0L]=lon_ctr[0L]-0.5*(lon_ctr[1L]-lon_ctr[0L]); for(lon_idx=0;lon_idx<lon_nbr-1L;lon_idx++) lon_ntf[lon_idx+1L]=0.5*(lon_ctr[lon_idx]+lon_ctr[lon_idx+1L]); lon_ntf[lon_nbr]=lon_ctr[lon_nbr-1L]+0.5*(lon_ctr[lon_nbr-1L]-lon_ctr[lon_nbr-2L]); lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L]; for(idx=0;idx<lon_nbr;idx++){ lon_bnd[2L*idx]=lon_ntf[idx]; lon_bnd[2L*idx+1L]=lon_ntf[idx+1L]; } /* !idx */ for(idx=0;idx<lat_nbr;idx++){ lat_bnd[2L*idx]=lat_ntf[idx]; lat_bnd[2L*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ }else{ /* !(lat_bnd_id && lon_bnd_id) */ /* Derive interfaces (ntf) from bounds (bnd) data on disk */ for(idx=0;idx<lon_nbr;idx++) lon_ntf[idx]=lon_bnd[2L*idx]; lon_ntf[lon_nbr]=lon_bnd[2L*lon_nbr-1L]; for(idx=0;idx<lat_nbr;idx++) lat_ntf[idx]=lat_bnd[2L*idx]; lat_ntf[lat_nbr]=lat_bnd[2L*lat_nbr-1L]; lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); /* fabs() ensures positive-definite span for N->S grids */ lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L]; } /* !(lat_bnd_id && lon_bnd_id) */ } /* !flg_grd_2D */ if(flg_grd_2D){ /* Diagnose type of two-dimensional input grid by testing second latitude center against formulae */ double lat_ctr_tst_eqa; double lat_ctr_tst_fv; if(flg_s2n) lat_ctr_tst_eqa=lat_ntf[0L]+lat_spn*1.5/lat_nbr; else lat_ctr_tst_eqa=lat_ntf[0L]-lat_spn*1.5/lat_nbr; if(flg_s2n) lat_ctr_tst_fv=lat_ntf[0L]+lat_spn/(lat_nbr-1L); else lat_ctr_tst_fv=lat_ntf[0L]-lat_spn/(lat_nbr-1L); double lat_ctr_tst_gss; /* In diagnosing grids, agreement with input to single-precision is "good enough for government work" Hence some comparisons cast from double to float before comparison 20150526: T42 grid from SCRIP and related maps are only accurate to ~eight digits 20150611: map_ne120np4_to_fv801x1600_bilin.150418.nc has yc_b[1600]=-89.775000006 not expected exact value lat_ctr[1]=-89.775000000000006 20170521: T62 grid from NCEP-NCAR Reanalysis 1 worse than single precision, has yc_[192]=-86.6531 not expected exact value lat_ctr[1]=-86.6531671712612, relative difference is 7.86021e-07 20191008: T62 grid from NCEP-NCAR Reanalysis 2 worse than single precision, has yc_[92]=-86.6531 not expected exact value lat_ctr[1]=-86.6531671712612, relative difference is 7.86021e-07 */ if(nco_dbg_lvl_get() >= nco_dbg_scl && !flg_s2n) (void)fprintf(stderr,"%s: INFO %s reports that grid inferral has detected a 2D grid that runs from north-to-south, not south-to-north. Support for creating/inferring 2D N-to-S grids was added in NCO 4.7.7 (September, 2018) and should work fine.\nHINT: If present command fails, report problem to developers and then re-try inferring grid after reversing input dataset's latitude coordinate (with, e.g., ncpdq -a time,-lat,lon in.nc out.nc)\n",nco_prg_nm_get(),fnc_nm); if((float)lat_ctr[1L] == (float)lat_ctr_tst_eqa) lat_typ=nco_grd_lat_eqa; if((float)lat_ctr[1L] == (float)lat_ctr_tst_fv) lat_typ=nco_grd_lat_fv; double *lat_sin=NULL_CEWI; // [frc] Sine of Gaussian latitudes double precision double *wgt_Gss=NULL; // [frc] Gaussian weights double precision if(lat_typ == nco_grd_lat_nil){ /* Check for Gaussian grid */ lat_sin=(double *)nco_malloc(lat_nbr*sizeof(double)); wgt_Gss=(double *)nco_malloc(lat_nbr*sizeof(double)); (void)nco_lat_wgt_gss(lat_nbr,flg_s2n,lat_sin,wgt_Gss); lat_ctr_tst_gss=rdn2dgr*asin(lat_sin[1L]); /* Gaussian weights on output grid will be double-precision accurate Grid itself is kept as user-specified so area diagnosed by ESMF_RegridWeightGen may be slightly inconsistent with weights */ const double eps_rlt_cnv_gss=1.0e-6; // Convergence criterion (1.0e-7 fails for NCEP NCAR Reanalysis 1!) if(nco_dbg_lvl_get() >= nco_dbg_scl) (void)fprintf(stdout,"%s: DEBUG %s reports lat_ctr[1]=%g, lat_ctr_tst_gss=%g, fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss))=%g\n",nco_prg_nm_get(),fnc_nm,lat_ctr[1],lat_ctr_tst_gss,fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss))); if(fabs(1.0-fabs(lat_ctr[1]/lat_ctr_tst_gss)) < eps_rlt_cnv_gss) lat_typ=nco_grd_lat_gss; } /* !Gaussian */ if(lat_typ == nco_grd_lat_nil){ /* If still of unknown type, this 2D grid may be weird This occurs, e.g., with POP3 destination grid Change gridtype from nil (which means not-yet-set) to unknown (which means none of the others matched) */ lat_typ=nco_grd_lat_unk; } /* !nil */ /* Currently grd_lat_typ and grd_2D_typ are equivalent, though that may be relaxed in future */ if(lat_typ == nco_grd_lat_unk) grd_typ=nco_grd_2D_unk; else if(lat_typ == nco_grd_lat_gss) grd_typ=nco_grd_2D_gss; else if(lat_typ == nco_grd_lat_fv) grd_typ=nco_grd_2D_fv; else if(lat_typ == nco_grd_lat_eqa) grd_typ=nco_grd_2D_eqa; else assert(False); /* Diagnose latitude interfaces from gridcell centers (if boundaries not provided) */ if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){ //if(flg_s2n) lat_nrt=lat_ntf[lat_nbr]; else lat_nrt=lat_ntf[0L]; lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); switch(lat_typ){ case nco_grd_lat_fv: lat_ncr=lat_spn/(lat_nbr-1L); if(flg_s2n) lat_ntf[1L]=lat_ntf[0L]+0.5*lat_ncr; else lat_ntf[1L]=lat_ntf[0L]-0.5*lat_ncr; for(lat_idx=2;lat_idx<lat_nbr;lat_idx++) if(flg_s2n) lat_ntf[lat_idx]=lat_ntf[1L]+(lat_idx-1L)*lat_ncr; else lat_ntf[lat_idx]=lat_ntf[1L]-(lat_idx-1L)*lat_ncr; break; case nco_grd_lat_eqa: lat_ncr=lat_spn/lat_nbr; for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) if(flg_s2n) lat_ntf[lat_idx]=lat_ntf[0L]+lat_idx*lat_ncr; else lat_ntf[lat_idx]=lat_ntf[0L]-lat_idx*lat_ncr; break; case nco_grd_lat_gss: for(lat_idx=0L;lat_idx<lat_nbr;lat_idx++) lat_ctr[lat_idx]=rdn2dgr*asin(lat_sin[lat_idx]); /* First guess for lat_ntf is midway between Gaussian abscissae */ for(lat_idx=1L;lat_idx<lat_nbr;lat_idx++) lat_ntf[lat_idx]=0.5*(lat_ctr[lat_idx-1L]+lat_ctr[lat_idx]); /* Iterate guess until area between interfaces matches Gaussian weight */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++){ double fofx_at_x0; /* [frc] Function to iterate evaluated at current guess */ double dfdx_at_x0; /* [frc] Derivative of equation evaluated at current guess */ // 20190531: Wuyin Lin reports this convergence criterion fails on ECMWF F640 grid // Probably because latitude iterations assume s2n grid and ECMWF is n2s // Possibly also because latitude coordinates are stored in single precision // Implement precision-dependent convergence criterion, e.g., 1.0e-15 and 1.0e-7 for double- and single-precision, respectively? const double eps_rlt_cnv=1.0e-15; // Convergence criterion (1.0e-16 pushes double precision to the brink) itr_cnt=0; lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; while(fabs(fofx_at_x0) > eps_rlt_cnv){ /* Newton-Raphson iteration: Let x=lat_ntf[lat_idx], y0=lat_ntf[lat_idx-1], gw = Gaussian weight (exact solution) f(x)=sin(dgr2rdn*x)-sin(dgr2rdn*y0)-gw=0 # s2n grid f(x)=sin(dgr2rdn*y0)-sin(dgr2rdn*x)-gw=0 # n2s grid dfdx(x)= dgr2rdn*cos(dgr2rdn*x) # s2n grid dfdx(x)=-dgr2rdn*cos(dgr2rdn*x) # n2s grid x_better=x0-f(x0)/f'(x0) */ dfdx_at_x0=dgr2rdn*cos(dgr2rdn*lat_ntf[lat_idx]); if(!flg_s2n) dfdx_at_x0=-dfdx_at_x0; lat_ntf[lat_idx]+=fofx_at_x0/dfdx_at_x0; /* NB: not sure why this is minus not plus but it works :) */ lat_wgt_gss=fabs(sin(dgr2rdn*lat_ntf[lat_idx])-sin(dgr2rdn*lat_ntf[lat_idx-1L])); fofx_at_x0=wgt_Gss[lat_idx-1L]-lat_wgt_gss; if(++itr_cnt > itr_nbr_max){ (void)fprintf(stdout,"%s: ERROR %s reports convergence only %g after %d iterations for lat_idx = %ld\n",nco_prg_nm_get(),fnc_nm,fabs(fofx_at_x0),itr_nbr_max,lat_idx); nco_exit(EXIT_FAILURE); } /* endif */ } /* !while */ } /* !lat_idx */ /* Use Gaussian grid symmetry to obtain same interfaces in both hemispheres (avoids cumulative rounding errors) */ if(lat_nbr%2){ /* lat_nbr is odd */ for(lat_idx=1L;lat_idx<=lat_nbr_hlf+1L;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx+1L]; }else{ /* lat_nbr is even */ for(lat_idx=1L;lat_idx<lat_nbr_hlf;lat_idx++) lat_ntf[lat_nbr_hlf+lat_idx]=-lat_ntf[lat_nbr_hlf-lat_idx]; } /* !flg_lat_evn */ if(lat_sin) lat_sin=(double *)nco_free(lat_sin); break; case nco_grd_lat_unk: /* No generic formula exists so use interfaces already read or diagnosed as midpoints between centers */ break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ if(lat_typ == nco_grd_lat_gss){ /* 20170510: First approximation above to exterior interfaces for Gaussian grid are ~ +/-89 degrees Loops below recompute interior interfaces only Southern- and northern-most interfaces must be explicitly assigned Inferral test for Gaussian grid _assumes_ global grid Hence WLOG can assign [-90.0, 90.0] to Gaussian grid exterior boundaries */ if(flg_s2n) lat_ntf[0L]=-90.0; else lat_ntf[0L]=90.0; if(flg_s2n) lat_ntf[lat_nbr]=90.0; else lat_ntf[lat_nbr]=-90.0; } /* !nco_grd_lat_gss */ /* Now that final latitude interfaces are known for all grid-types, assign to boundaries, overwriting provisional values stored there earlier */ for(idx=0;idx<lat_nbr;idx++){ lat_bnd[2L*idx]=lat_ntf[idx]; lat_bnd[2L*idx+1L]=lat_ntf[idx+1L]; } /* !idx */ } /* !(lat_bnd_id && lon_bnd_id) */ /* Use centers and boundaries to diagnose latitude weights */ switch(lat_typ){ case nco_grd_lat_eqa: case nco_grd_lat_fv: for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx])); break; case nco_grd_lat_gss: for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=wgt_Gss[lat_idx]; break; case nco_grd_lat_unk: for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt[lat_idx]=fabs(sin(dgr2rdn*lat_ntf[lat_idx+1L])-sin(dgr2rdn*lat_ntf[lat_idx])); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING %s reports unknown input latitude grid-type. Guessing that weights for grid of rectangles is OK.\n",nco_prg_nm_get(),fnc_nm); break; default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ /* Diagnose type of longitude grid by testing second longitude center against formulae */ lon_spn=lon_ntf[lon_nbr]-lon_ntf[0L]; lat_spn=fabs(lat_ntf[lat_nbr]-lat_ntf[0L]); if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; if(lon_typ == nco_grd_lon_nil){ if( (float)lon_ctr[0L] == 0.0f && (float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_Grn_ctr; else if((float)lon_ctr[0L] == -180.0f && (float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_180_ctr; else if((float)lon_ntf[0L] == 0.0f && (float)lon_ntf[1L] == (float)(lon_ntf[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_Grn_wst; else if((float)lon_ntf[0L] == -180.0f && (float)lon_ntf[1L] == (float)(lon_ntf[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_180_wst; else if((float)lon_ctr[1L] == (float)(lon_ctr[0L]+lon_spn/lon_nbr)) lon_typ=nco_grd_lon_bb; else lon_typ=nco_grd_lon_unk; } /* !lon_typ */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input 2D grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_2D_sng(grd_typ)); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input latitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lat_sng(lat_typ)); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input longitude grid-type: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_lon_sng(lon_typ)); } /* !flg_grd_2D */ if(flg_grd_2D){ if(nco_dbg_lvl_get() >= nco_dbg_crr){ for(idx=0;idx<lat_nbr;idx++){ (void)fprintf(stdout,"lat[%li] = %g, vertices = ",idx,lat_ctr[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lat_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", "); } /* end loop over lat */ for(idx=0;idx<lon_nbr;idx++){ (void)fprintf(stdout,"lon[%li] = %g, vertices = ",idx,lon_ctr[idx]); for(bnd_idx=0;bnd_idx<bnd_nbr;bnd_idx++) (void)fprintf(stdout,"%s%g%s",bnd_idx == 0 ? "[" : "",lon_bnd[bnd_nbr*idx+bnd_idx],bnd_idx == bnd_nbr-1 ? "]\n" : ", "); } /* end loop over lon */ } /* endif dbg */ /* Fuzzy test of latitude weight normalization */ //const double eps_rlt_max=1.0e-14; /* [frc] Round-off error tolerance: Used 1.0e-14 until 20180904 */ const double eps_rlt_max=1.0e-12; /* [frc] Round-off error tolerance: Used 1.0e-12 since 20180904 */ lat_wgt_ttl=0.0; for(idx=0;idx<lat_nbr;idx++) lat_wgt_ttl+=lat_wgt[idx]; if(grd_typ == nco_grd_2D_fv || grd_typ == nco_grd_2D_eqa){ double lat_wgt_ttl_xpc; /* [frc] Expected sum of latitude weights */ lat_wgt_ttl_xpc=fabs(sin(dgr2rdn*lat_bnd[2*(lat_nbr-1)+1L])-sin(dgr2rdn*lat_bnd[0L])); if(grd_typ != nco_grd_2D_unk && fabs(1.0-lat_wgt_ttl/lat_wgt_ttl_xpc) > eps_rlt_max){ (void)fprintf(stdout,"%s: ERROR %s reports grid normalization does not meet precision tolerance eps_rlt_max = %20.15f\nlat_wgt_ttl = %20.15f, lat_wgt_ttl_xpc = %20.15f, lat_wgt_frc = %20.15f, eps_rlt = %20.15f\n",nco_prg_nm_get(),fnc_nm,eps_rlt_max,lat_wgt_ttl,lat_wgt_ttl_xpc,lat_wgt_ttl/lat_wgt_ttl_xpc,1.0-lat_wgt_ttl/lat_wgt_ttl_xpc); nco_exit(EXIT_FAILURE); } /* !imprecise */ } /* !nco_grd_lat_eqa, !nco_grd_lat_fv */ } /* !flg_grd_2D */ if(flg_grd_2D){ assert(grd_crn_nbr == 4); if(flg_dgn_bnd || (lat_bnd_id == NC_MIN_INT && lon_bnd_id == NC_MIN_INT)){ /* If interfaces were diagnosed from centers, copy corners from interfaces */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx=grd_crn_nbr*lon_idx; lon_crn[idx]=lon_ntf[lon_idx]; /* LL */ lon_crn[idx+1L]=lon_ntf[lon_idx+1L]; /* LR */ lon_crn[idx+2L]=lon_ntf[lon_idx+1L]; /* UR */ lon_crn[idx+3L]=lon_ntf[lon_idx]; /* UL */ } /* !lon_idx */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ idx=grd_crn_nbr*lat_idx; lat_crn[idx]=lat_ntf[lat_idx]; /* LL */ lat_crn[idx+1L]=lat_ntf[lat_idx]; /* LR */ lat_crn[idx+2L]=lat_ntf[lat_idx+1L]; /* UR */ lat_crn[idx+3L]=lat_ntf[lat_idx+1L]; /* UL */ } /* !lat_idx */ }else{ /* !lat_bnd_id */ /* If boundaries were provided in input dataset, copy corners from boundaries */ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx=grd_crn_nbr*lon_idx; lon_crn[idx]=lon_bnd[2*lon_idx]; /* LL */ lon_crn[idx+1L]=lon_bnd[2*lon_idx+1L]; /* LR */ lon_crn[idx+2L]=lon_bnd[2*lon_idx+1L]; /* UR */ lon_crn[idx+3L]=lon_bnd[2*lon_idx]; /* UL */ } /* !lon_idx */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ idx=grd_crn_nbr*lat_idx; lat_crn[idx]=lat_bnd[2*lat_idx]; /* LL */ lat_crn[idx+1L]=lat_bnd[2*lat_idx]; /* LR */ lat_crn[idx+2L]=lat_bnd[2*lat_idx+1L]; /* UR */ lat_crn[idx+3L]=lat_bnd[2*lat_idx+1L]; /* UL */ } /* !lat_idx */ } /* !lat_bnd_id */ } /* !flg_grd_2D */ /* lat/lon_crn will not change anymore so stuff rectangular arrays into unrolled arrays */ if(flg_grd_1D){ for(idx=0;idx<grd_sz_nbr;idx++){ grd_ctr_lat[idx]=lat_ctr[idx]; grd_ctr_lon[idx]=lon_ctr[idx]; if(flg_wrt_crn){ for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; grd_crn_lat[idx2]=lat_crn[idx2]; grd_crn_lon[idx2]=lon_crn[idx2]; } /* !crn */ }else{ /* !flg_wrt_crn */ /* Defaults for ERWG when corners are unknown */ for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; grd_crn_lat[idx2]=0.0; grd_crn_lon[idx2]=0.0; } /* !crn */ } /* !flg_wrt_crn */ } /* !col */ } /* !flg_grd_1D */ if(flg_grd_2D){ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++){ for(lon_idx=0;lon_idx<lon_nbr;lon_idx++){ idx=lat_idx*lon_nbr+lon_idx; grd_ctr_lat[idx]=lat_ctr[lat_idx]; grd_ctr_lon[idx]=lon_ctr[lon_idx]; for(crn_idx=0;crn_idx<grd_crn_nbr;crn_idx++){ idx2=grd_crn_nbr*idx+crn_idx; lat_idx2=lat_idx*grd_crn_nbr+crn_idx; lon_idx2=lon_idx*grd_crn_nbr+crn_idx; grd_crn_lat[idx2]=lat_crn[lat_idx2]; grd_crn_lon[idx2]=lon_crn[lon_idx2]; } /* !crn */ } /* !lon */ } /* !lat */ /* 20190613: Convert CW quadrilaterals to CCW quadrilaterals so TempestRemap accepts grids Default construction/inferral method orders corners CCW and CW for s2n and n2s grids, respectively */ if(!flg_s2n){ for(idx=0L;idx<grd_sz_nbr;idx++){ idx2=grd_crn_nbr*idx; flg_ccw=nco_ccw_chk(grd_crn_lat+idx2,grd_crn_lon+idx2,grd_crn_nbr,idx_ccw,rcr_lvl); } /* !idx */ } /* !flg_s2n */ } /* !flg_grd_2D */ /* Find span of all grids */ double lat_max; /* [dgr] Maximum latitude */ double lat_min; /* [dgr] Minimum latitude */ double lon_max; /* [dgr] Maximum longitude */ double lon_min; /* [dgr] Minimum longitude */ idx_ctr=0; if(has_mss_val_ctr){ /* Find first non-missing value center and thus corners */ for(idx_ctr=0;idx_ctr<grd_sz_nbr;idx_ctr++){ if(grd_ctr_lat[idx_ctr] != mss_val_ctr_dbl) break; } /* !grd_sz_nbr */ assert(idx_ctr != grd_sz_nbr); } /* !has_mss_val_ctr */ if(flg_wrt_crn){ /* Grids with corner boundaries supplied or inferred */ lon_max=grd_crn_lon[idx_ctr*grd_crn_nbr]; lat_max=grd_crn_lat[idx_ctr*grd_crn_nbr]; lon_min=grd_crn_lon[idx_ctr*grd_crn_nbr]; lat_min=grd_crn_lat[idx_ctr*grd_crn_nbr]; for(idx=1;idx<grd_sz_nbr*grd_crn_nbr;idx++){ idx_ctr=idx/grd_crn_nbr; if(has_mss_val_ctr) if(grd_ctr_lat[idx_ctr] == mss_val_ctr_dbl) continue; lat_max=(grd_crn_lat[idx] > lat_max) ? grd_crn_lat[idx] : lat_max; lon_max=(grd_crn_lon[idx] > lon_max) ? grd_crn_lon[idx] : lon_max; lat_min=(grd_crn_lat[idx] < lat_min) ? grd_crn_lat[idx] : lat_min; lon_min=(grd_crn_lon[idx] < lon_min) ? grd_crn_lon[idx] : lon_min; } /* !idx */ }else{ /* !flg_wrt_crn */ /* 20170424: Diagnose grid-extent when corners were not provided or inferred This is usually (always?) for 1d unstructured grids with only centers provided */ lon_max=grd_ctr_lon[idx_ctr]; lat_max=grd_ctr_lat[idx_ctr]; lon_min=grd_ctr_lon[idx_ctr]; lat_min=grd_ctr_lat[idx_ctr]; for(idx_ctr=1;idx_ctr<grd_sz_nbr;idx_ctr++){ if(has_mss_val_ctr) if(grd_ctr_lat[idx_ctr] == mss_val_ctr_dbl) continue; lat_max=(grd_ctr_lat[idx_ctr] > lat_max) ? grd_ctr_lat[idx_ctr] : lat_max; lon_max=(grd_ctr_lon[idx_ctr] > lon_max) ? grd_ctr_lon[idx_ctr] : lon_max; lat_min=(grd_ctr_lat[idx_ctr] < lat_min) ? grd_ctr_lat[idx_ctr] : lat_min; lon_min=(grd_ctr_lon[idx_ctr] < lon_min) ? grd_ctr_lon[idx_ctr] : lon_min; } /* !idx_ctr */ } /* flg_wrt_crn */ lat_spn=lat_max-lat_min; lon_spn=lon_max-lon_min; /* Use strict rules for rectangular grids, looser for spans that are inferred, or center-to-center not corner-to-corner */ if(flg_grd_2D){ if((float)lon_spn == 360.0f && (float)lat_spn == 180.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; }else{ /* !flg_grd_2D */ if((float)lon_spn >= 340.0f && (float)lat_spn >= 170.0f) nco_grd_xtn=nco_grd_xtn_glb; else nco_grd_xtn=nco_grd_xtn_rgn; } /* flg_wrt_crn */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s reports grid resolution %li x %li, spans %g x %g degrees: [%g <= lat <= %g], [%g <= lon <= %g]\n",nco_prg_nm_get(),fnc_nm,lat_nbr,lon_nbr,lat_spn,lon_spn,lat_min,lat_max,lon_min,lon_max); if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s diagnosed input grid-extent: %s\n",nco_prg_nm_get(),fnc_nm,nco_grd_xtn_sng(nco_grd_xtn)); /* Write ERWG hints if filenames provided and grid is regional */ char *fl_hnt=NULL; char *fl_hnt_dst=NULL; char *fl_hnt_src=NULL; if(rgr->fl_hnt_dst) fl_hnt=fl_hnt_dst=rgr->fl_hnt_dst; if(rgr->fl_hnt_src) fl_hnt=fl_hnt_src=rgr->fl_hnt_src; if(nco_grd_xtn == nco_grd_xtn_rgn && fl_hnt){ const char *fl_mode="w"; FILE *fp_hnt; /* [fl] Hint file (for ERWG switches) file handle */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s writing ERWG weight-generation regional hint to file %s\n",nco_prg_nm_get(),fnc_nm,fl_hnt); /* Open output file */ if((fp_hnt=fopen(fl_hnt,fl_mode)) == NULL){ (void)fprintf(stderr,"%s: ERROR unable to open hint output file %s\n",nco_prg_nm_get(),fl_hnt); nco_exit(EXIT_FAILURE); } /* end if */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: Opened hint file %s\n",nco_prg_nm_get(),fl_hnt); if(fl_hnt_src) (void)fprintf(fp_hnt,"--src_regional"); if(fl_hnt_dst) (void)fprintf(fp_hnt,"--dst_regional"); rcd=fclose(fp_hnt); if(rcd != 0){ (void)fprintf(stderr,"%s: ERROR unable to close hint output file %s\n",nco_prg_nm_get(),fl_hnt); nco_exit(EXIT_FAILURE); } /* end if */ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: Closed hint file %s\n",nco_prg_nm_get(),fl_hnt); } /* !nco_grd_xtn */ /* Diagnose area if necessary 20170510: ALM/CLM "area" is _FillValue=1.0e36f over ocean and total gridcell area in km2 (not multiplied by landfrac) elsewhere Writing this ALM/CLM "area" variable to gridfile, then using with ERWG --user_areas could be disastrous (depending on mask array and interpolation type) On the other hand CAM "area" variable is exactly what we want for gridfile Input areas are considered "untrustworthy" iff they have _and use_ missing value attribute Re-diagnose areas considered untrustworthy so output area array does not contain missing values */ if(flg_wrt_crn && has_mss_val_area){ const double mss_val_dbl=mss_val_area_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(area[idx] == mss_val_dbl) break; if(idx < grd_sz_nbr) use_mss_val_area=True; if(nco_dbg_lvl_get() >= nco_dbg_fl && use_mss_val_area) (void)fprintf(stdout,"%s: INFO %s reports input area field %s is considered untrustworthy because it uses missing values, will diagnose area from cell boundaries instead...\n",nco_prg_nm_get(),fnc_nm,area_nm_in); } /* !has_mss_val_area */ /* 20170511: There remain a handful of cases when input area should be diagnosed not copied These include using ncremap in SGS mode when inferred grids must use sensible area units Otherwise an inferred grid with area [km2] from ALM/CLM might be combined with area [sr] from NCO This would bias ERWG --user_areas produced values by ~10^10 Setting flg_dgn_area ensures inferred area uses [sr] */ const nco_bool flg_dgn_area=rgr->flg_dgn_area; /* [flg] Diagnose rather than copy inferred area */ if(flg_wrt_crn && /* If bounds are available to compute area and ... */ (area_id == NC_MIN_INT || /* Area is not in input file ... */ use_mss_val_area || /* Area is untrustworthy */ flg_dgn_area)){ /* User/application explicitly requests diagnostic area */ /* Not absolutely necessary to diagnose area because ERWG will diagnose and output area itself _unless_ --user_areas option is given */ if(nco_dbg_lvl_get() >= nco_dbg_std && flg_dgn_area) (void)fprintf(stdout,"%s: INFO %s reports diagnosing area from cell boundaries...\n",nco_prg_nm_get(),fnc_nm); if(flg_grd_crv || flg_grd_1D){ /* Area of arbitrary unstructured or curvilinear grids requires spherical trigonometry */ nco_sph_plg_area(rgr,grd_crn_lat,grd_crn_lon,grd_sz_nbr,grd_crn_nbr,area); }else if(flg_grd_2D){ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr;lon_idx++) area[lat_idx*lon_nbr+lon_idx]=fabs(dgr2rdn*(lon_bnd[2*lon_idx+1L]-lon_bnd[2*lon_idx])*(sin(dgr2rdn*lat_bnd[2*lat_idx+1L])-sin(dgr2rdn*lat_bnd[2*lat_idx]))); /* fabs() ensures positive area in n2s grids */ } /* !flg_grd_2D */ } /* !area_id */ /* ERWG will fail unless grid file has mask variable Use nul-mask (all points included) whenever input mask variable not supplied/detected Define nul-mask true everywhere and overwrite with false below Input mask can be any type and output mask will always be NC_INT */ for(idx=0;idx<grd_sz_nbr;idx++) msk[idx]=1; if(msk_id != NC_MIN_INT){ /* Change missing-value-masked points to 0 integer mask for SCRIP grids (SCRIP has no missing value convention) Input mask can be any type and output mask will always be NC_INT Applications: ALM/CLM mask (landmask) is NC_FLOAT and defines though does not use NC_FLOAT missing value CICE mask (tmask/umask) is NC_FLOAT and defines and uses NC_FLOAT missing value RACMO mask is NC_FLOAT and defines though does not use NC_FLOAT missing value AMSR mask is NC_SHORT and has no missing value GHRSST mask is NC_BYTE and is a multi-valued surface-type flag with missing value == -1b */ if(msk_typ != NC_INT){ if(nco_dbg_lvl_get() == nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s mask variable \"%s\" has odd type = %s. Re-run with higher debugging level for more information.\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,nco_typ_sng(msk_typ)); if(nco_dbg_lvl_get() > nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s mask variable \"%s\" has odd type = %s. Regridding weight generators require a mask variable of type NC_INT to specify points to include/exclude as sources/destinations. Points where the mask variable is zero or the missing value will be excluded (ignored) in regridding, all other points will be included. When inferring gridfiles, NCO assumes the first variable with a \"mask\"-like name (\"mask\", \"Mask\", \"grid_imask\", \"landmask\", or \"tmask\"), or the variable designated by the \"--msk_[src/dst]=msk_nm\" option, is this mask. However the variable \"%s\" in this file is not type NC_INT and so may not be intended as a regridding mask, hence this oh so pleasant informational WARNING. To prevent NCO from interpreting \"%s\" as a regridding mask, specify \"--msk_src=none\" and/or \"--msk_dst=none\", as appropriate. To utilize some other variable as the mask variable, specify \"--msk_src=msk_nm\" and/or \"--msk_dst=msk_nm\", as appropriate. Mask treatment is subtle, and NCO tries to \"do the right thing\". Whether it does is often easiest to discern by visual inspection of the regridded results in a turn-key viewer like Panoply or ncview.\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,nco_typ_sng(msk_typ),msk_nm_in,msk_nm_in); } /* msk_typ */ switch(msk_typ){ case NC_FLOAT: if(has_mss_val_msk){ const float mss_val_flt=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.fp[idx] == mss_val_flt || msk_unn.fp[idx] == 0.0f) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.fp[idx] == 0.0f) msk[idx]=0; } /* !mss_val */ break; case NC_DOUBLE: if(has_mss_val_msk){ const double mss_val_dbl=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.dp[idx] == mss_val_dbl || msk_unn.dp[idx] == 0.0) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.dp[idx] == 0.0) msk[idx]=0; } /* !mss_val */ break; case NC_INT: if(has_mss_val_msk){ const int mss_val_int=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.ip[idx] == mss_val_int || msk_unn.ip[idx] == 0) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.ip[idx] == 0) msk[idx]=0; } /* !mss_val */ break; case NC_SHORT: /* http://stackoverflow.com/questions/208433/how-do-i-write-a-short-literal-in-c */ if(has_mss_val_msk){ const short mss_val_sht=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.sp[idx] == mss_val_sht || msk_unn.sp[idx] == ((short)0)) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.sp[idx] == ((short)0)) msk[idx]=0; /* 20160111: AMSR kludge fxm */ // for(idx=0;idx<grd_sz_nbr;idx++) if(msk[idx] == 1) msk[idx]=0; } /* !mss_val */ break; case NC_BYTE: if(has_mss_val_msk){ const nco_byte mss_val_byt=mss_val_msk_dbl; for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.bp[idx] == mss_val_byt || msk_unn.bp[idx] == ((nco_byte)0)) msk[idx]=0; }else{ for(idx=0;idx<grd_sz_nbr;idx++) if(msk_unn.bp[idx] == ((nco_byte)0)) msk[idx]=0; /* 20170811: GHRSST kludge? */ } /* !mss_val */ break; default: (void)fprintf(stderr,"%s: ERROR %s mask variable \"%s\" has unsupported type = %s\n",nco_prg_nm_get(),fnc_nm,msk_nm_in,nco_typ_sng(msk_typ)); nco_dfl_case_generic_err(); return NCO_ERR; break; } /* !msk_typ */ if(msk_unn.vp) msk_unn.vp=(void *)nco_free(msk_unn.vp); } /* !msk_id */ if(nco_dbg_lvl_get() >= nco_dbg_scl){ lat_wgt_ttl=0.0; area_ttl=0.0; if(flg_grd_2D){ (void)fprintf(stderr,"%s: INFO %s reports inferred rectangular latitude grid area diagnostics (lat_wgt_ttl and frc_lat_wgt should be valid):\n",nco_prg_nm_get(),fnc_nm); for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) lat_wgt_ttl+=lat_wgt[lat_idx]; }else{ (void)fprintf(stderr,"%s: INFO %s reports inferred unstructured or curvilinear latitude grid area diagnostics (ignore lat_wgt_ttl and frc_lat_wgt):\n",nco_prg_nm_get(),fnc_nm); } /* !flg_grd_2D */ for(lat_idx=0;lat_idx<lat_nbr;lat_idx++) for(lon_idx=0;lon_idx<lon_nbr;lon_idx++) area_ttl+=area[lat_idx*lon_nbr+lon_idx]; (void)fprintf(stdout,"lat_wgt_ttl = %20.15f, frc_lat_wgt = %20.15f, area_ttl = %20.15f, frc_area = %20.15f\n",lat_wgt_ttl,lat_wgt_ttl/2.0,area_ttl,area_ttl/(4.0*M_PI)); assert(area_ttl > 0.0); /* Protect following assertion since area might be in, e.g., km2 (ELM, RACMO) */ if(flg_area_sr) assert(area_ttl <= 4.0*M_PI); const double eps_rlt_area=1.0e-12; /* [frc] Error tolerance for global area */ if(nco_grd_xtn == nco_grd_xtn_glb){ if(fabs(1.0-area_ttl/(4.0*M_PI)) > eps_rlt_area) (void)fprintf(stdout,"%s: WARNING %s reports area for inferred global grid differs from true global area (4*pi sr) by greater than allowed fraction %g\n",nco_prg_nm_get(),fnc_nm,eps_rlt_area); } /* !nco_grd_xtn_glb */ } /* !dbg */ /* Open grid file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Define dimensions */ /* 20151230 ERWG appears to require presence of corner arrays in grid file even when they are not used (e.g., bilinear) But ERWG will break when corner values are bad. Default is do not write bad corner values. Uncomment next line to write bad corner values. */ /* flg_wrt_crn=True; */ if(flg_wrt_crn) rcd=nco_def_dim(out_id,grd_crn_nm,grd_crn_nbr,&dmn_id_grd_crn); rcd=nco_def_dim(out_id,grd_sz_nm,grd_sz_nbr,&dmn_id_grd_sz); rcd=nco_def_dim(out_id,grd_rnk_nm,grd_rnk_nbr,&dmn_id_grd_rnk); int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; /* Define variables */ (void)nco_def_var(out_id,dmn_sz_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_rnk,&dmn_sz_int_id); /* NB: Too small to deflate */ (void)nco_def_var(out_id,area_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&area_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,msk_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_grd_sz,&msk_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msk_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lat_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_ctr_lon_nm,crd_typ,dmn_nbr_1D,&dmn_id_grd_sz,&grd_ctr_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_ctr_lon_id,shuffle,deflate,dfl_lvl); if(flg_wrt_crn){ dmn_ids[0]=dmn_id_grd_sz; dmn_ids[1]=dmn_id_grd_crn; (void)nco_def_var(out_id,grd_crn_lat_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lat_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lat_id,shuffle,deflate,dfl_lvl); (void)nco_def_var(out_id,grd_crn_lon_nm,crd_typ,dmn_nbr_2D,dmn_ids,&grd_crn_lon_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,grd_crn_lon_id,shuffle,deflate,dfl_lvl); } /* !flg_wrt_crn */ /* Define attributes */ aed_sct aed_mtd; char *att_nm; if(strstr(rgr->grd_ttl,"None given")){ const char att_fmt[]="NCO inferred this grid from input file %s"; att_val=(char *)nco_malloc((strlen(att_fmt)+strlen(rgr->fl_in)+1L)*sizeof(char)); sprintf(att_val,att_fmt,rgr->fl_in); }else{ att_val=strdup(rgr->grd_ttl); } /* !grd_ttl */ rcd=nco_char_att_put(out_id,NULL,"title",att_val); rcd=nco_char_att_put(out_id,NULL,"Conventions","SCRIP"); const char usr_cpp[]=TKN2SNG(USER); /* [sng] Hostname from C pre-processor */ rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO"); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,NULL,"latitude_grid_type",nco_grd_lat_sng(lat_typ)); rcd=nco_char_att_put(out_id,NULL,"longitude_grid_type",nco_grd_lon_sng(lon_typ)); rcd=nco_char_att_put(out_id,dmn_sz_nm,"long_name","Size(s) of horizontal dimensions (in Fortran storage order for historical reasons)"); if(flg_area_sr){ rcd=nco_char_att_put(out_id,area_nm,"long_name","Solid Angle Subtended on Source Grid"); rcd=nco_char_att_put(out_id,area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm,"units","steradian"); }else{ /* !flg_area_sr */ rcd=nco_char_att_put(out_id,area_nm,"long_name","Area on Source Grid"); // rcd=nco_char_att_put(out_id,area_nm,"standard_name","solid_angle"); rcd=nco_char_att_put(out_id,area_nm,"units",area_unt); } /* !flg_area_sr */ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"long_name","Latitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"standard_name","latitude"); if(ngl_unt){ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,unt_sng,ngl_unt); }else{ /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"units","degrees"); } /* !ngl_unt */ rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"long_name","Longitude of Grid Cell Centers"); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"standard_name","longitude"); if(ngl_unt){ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,unt_sng,ngl_unt); }else{ /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees_east"); else rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"units","degrees"); } /* !ngl_unt */ if(flg_wrt_crn){ rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"long_name","Latitude of Grid Cell Vertices"); if(ngl_unt){ rcd=nco_char_att_put(out_id,grd_crn_lat_nm,unt_sng,ngl_unt); }else{ /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_crn_lat_nm,"units","degrees"); } /* !ngl_unt */ rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"long_name","Longitude of Grid Cell Vertices"); if(ngl_unt){ rcd=nco_char_att_put(out_id,grd_crn_lon_nm,unt_sng,ngl_unt); }else{ /* 20191009: ERWG 7.1.0r- breaks on CF-compliant units strings */ if(rgr->flg_cf_units) rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees_north"); else rcd=nco_char_att_put(out_id,grd_crn_lon_nm,"units","degrees"); } /* !ngl_unt */ rcd=nco_char_att_put(out_id,grd_ctr_lat_nm,"bounds",grd_crn_lat_nm); rcd=nco_char_att_put(out_id,grd_ctr_lon_nm,"bounds",grd_crn_lon_nm); } /* !flg_wrt_crn */ rcd=nco_char_att_put(out_id,msk_nm,"long_name","Binary Integer Mask for Grid"); rcd=nco_char_att_put(out_id,msk_nm,"units","none"); /* Begin data mode */ (void)nco_enddef(out_id); /* Write variables */ dmn_srt[0]=0L; dmn_cnt[0]=grd_rnk_nbr; rcd=nco_put_vara(out_id,dmn_sz_int_id,dmn_srt,dmn_cnt,dmn_sz_int,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,area_id,dmn_srt,dmn_cnt,area,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,msk_id,dmn_srt,dmn_cnt,msk,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lat_id,dmn_srt,dmn_cnt,grd_ctr_lat,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=grd_sz_nbr; rcd=nco_put_vara(out_id,grd_ctr_lon_id,dmn_srt,dmn_cnt,grd_ctr_lon,crd_typ); if(flg_wrt_crn){ dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lat_id,dmn_srt,dmn_cnt,grd_crn_lat,crd_typ); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=grd_sz_nbr; dmn_cnt[1]=grd_crn_nbr; rcd=nco_put_vara(out_id,grd_crn_lon_id,dmn_srt,dmn_cnt,grd_crn_lon,crd_typ); } /* !flg_wrt_crn */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); fl_out=rgr->fl_ugrid; if(fl_out){ /* Test UGRID: Documentation: https://github.com/ugrid-conventions/ugrid-conventions Procedure: Create 1x1 skeleton file, infer UGRID and SCRIP grids from it ncks -O -D 1 --rgr ttl='Equiangular grid 180x360' --rgr skl=${HOME}/skl_180x360.nc --rgr scrip=${HOME}/grd_180x360_SCRIP.nc --rgr latlon=180,360#lat_typ=eqa#lon_typ=Grn_ctr ~/nco/data/in.nc ~/foo.nc ncks -O -D 1 --rgr infer --rgr ugrid=${HOME}/grd_ugrid.nc --rgr scrip=${HOME}/grd_scrip.nc ~/skl_180x360.nc ~/foo.nc ncks --cdl -v mesh_node_y ~/grd_ugrid.nc ncks --cdl -v mesh_face_nodes,mesh_face_x,mesh_face_y -d nFaces,0 ~/grd_ugrid.nc ncks --cdl -v mesh_edge_nodes,mesh_edge_x,mesh_edge_y -d nEdges,0 ~/grd_ugrid.nc ncks --cdl -v grid_center_lat,grid_corner_lat -d grid_size,0,,360 -d grid_corners,0,3 ~/grd_scrip.nc ncks --cdl -m -M ~/grd_ugrid.nc */ char *dgx_nm=NULL_CEWI; /* [sng] Name of edge_coordinates x variable */ char *dgy_nm=NULL_CEWI; /* [sng] Name of edge_coordinates y variable */ char *dg_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as edges */ char *dg_nd_nm=NULL_CEWI; /* [sng] Name of edge_node_connectivity variable */ char *fcx_nm=NULL_CEWI; /* [sng] Name of face_coordinates x variable */ char *fcy_nm=NULL_CEWI; /* [sng] Name of face_coordinates y variable */ char *fc_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as faces */ char *fc_nd_nm=NULL_CEWI; /* [sng] Name of face_node_connectivity variable */ char *msh_nm=NULL_CEWI; /* [sng] Name of mesh topology variable */ char *nd_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes */ char *ndx_nm=NULL_CEWI; /* [sng] Name of node_coordinates x variable */ char *ndy_nm=NULL_CEWI; /* [sng] Name of node_coordinates y variable */ char *npe_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes-per-edge */ char *npf_dmn_nm=NULL_CEWI; /* [sng] Name of dimension to recognize as nodes-per-face */ double *dgx=NULL_CEWI; /* [dgr] Characteristic longitude of edges */ double *dgy=NULL_CEWI; /* [dgr] Characteristic latitude of edges */ double *fcx=NULL_CEWI; /* [dgr] Characteristic longitude of faces */ double *fcy=NULL_CEWI; /* [dgr] Characteristic latitude of faces */ double *ndx=NULL_CEWI; /* [dgr] Longitude of nodes */ double *ndy=NULL_CEWI; /* [dgr] Latitude of nodes */ int *dg_nd; /* [idx] edge_node_connectivity variable */ int *fc_nd; /* [idx] face_node_connectivity variable */ int dg_nd_id=NC_MIN_INT; /* [id] edge_node_connectivity variable ID */ int dgx_id=NC_MIN_INT; /* [id] Characteristic longitude of edges variable ID */ int dgy_id=NC_MIN_INT; /* [id] Characteristic latitude of edges variable ID */ int dmn_id_dg=NC_MIN_INT; /* [id] Dimension ID for edges */ int dmn_id_fc=NC_MIN_INT; /* [id] Dimension ID for faces */ int dmn_id_nd=NC_MIN_INT; /* [id] Dimension ID for nodes */ int dmn_id_npe=NC_MIN_INT; /* [id] Dimension ID for nodes-per-edge */ int dmn_id_npf=NC_MIN_INT; /* [id] Dimension ID for nodes-per-face */ int fc_nd_id=NC_MIN_INT; /* [id] face_node_connectivity variable ID */ int fcx_id=NC_MIN_INT; /* [id] Characteristic longitude of faces variable ID */ int fcy_id=NC_MIN_INT; /* [id] Characteristic latitude of faces variable ID */ int msh_id=NC_MIN_INT; /* [id] Mesh topology variable ID */ int msh_val=42; /* [id] Mesh topology variable value from Monty Python */ int ndx_id=NC_MIN_INT; /* [id] Longitude of mesh nodes variable ID */ int ndy_id=NC_MIN_INT; /* [id] Latitude of mesh nodes variable ID */ const long fc_nbr=grd_sz_nbr; /* [nbr] Number of faces in mesh */ const long npe_nbr=2; /* [nbr] Number of nodes per edge */ const long npf_nbr=grd_crn_nbr; /* [nbr] Number of nodes per face */ long dg_idx; /* [idx] Counting index for edges */ long dg_nbr=(long)NC_MIN_INT64; /* [nbr] Number of edges in mesh */ long fc_idx; /* [idx] Counting index for faces */ long nd_idx; /* [idx] Counting index for nodes */ long nd_nbr=(long)NC_MIN_INT64; /* [nbr] Number of nodes in mesh */ long srt_idx=0; /* [idx] start_index (C/Fortran) for edge_nodes, face_nodes */ if(!dgx_nm) dgx_nm=(char *)strdup("mesh_edge_x"); if(!dgy_nm) dgy_nm=(char *)strdup("mesh_edge_y"); if(!dg_dmn_nm) dg_dmn_nm=(char *)strdup("nEdges"); if(!fcx_nm) fcx_nm=(char *)strdup("mesh_face_x"); if(!fcy_nm) fcy_nm=(char *)strdup("mesh_face_y"); if(!fc_dmn_nm) fc_dmn_nm=(char *)strdup("nFaces"); if(!dg_nd_nm) dg_nd_nm=(char *)strdup("mesh_edge_nodes"); if(!fc_nd_nm) fc_nd_nm=(char *)strdup("mesh_face_nodes"); if(!msh_nm) msh_nm=(char *)strdup("mesh"); if(!nd_dmn_nm) nd_dmn_nm=(char *)strdup("nNodes"); if(!ndx_nm) ndx_nm=(char *)strdup("mesh_node_x"); if(!ndy_nm) ndy_nm=(char *)strdup("mesh_node_y"); if(!npe_dmn_nm) npe_dmn_nm=(char *)strdup("two"); if(!npf_dmn_nm) npf_dmn_nm=(char *)strdup("maxNodesPerFace"); if(flg_grd_1D){ (void)fprintf(stdout,"%s: ERROR %s UGRID output does not yet support 1D grids\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); }else if(flg_grd_2D){ /* Assume 2D grids are global and comprised of quadrilaterals */ switch(lat_typ){ case nco_grd_lat_fv: /* Currently all 2D grids are converted to the same UGRID representation fxm: Cap grids (e.g., FV) should eventually be written with a real cap, rather than as the "polar teeth" representation currently used. Polar teeth convention allows cap grid to be represented as rectangular on disk However, cap grids are better suited to non-rectangular UGRID meshes */ case nco_grd_lat_eqa: case nco_grd_lat_gss: /* Numbers of unique edges and nodes counted from South Pole (SP) to North Pole (NP) */ dg_nbr=lon_nbr*2+ /* SP: cells_per_lat*unique_edges_per_cell */ (lat_nbr-2)*lon_nbr*2+ /* Mid: lats*cells_per_lat*unique_edges_per_cell */ lon_nbr*1; /* NP: cells_per_lat*unique_edges_per_cell */ nd_nbr=1+lon_nbr*1+ /* SP: SP+cells_per_lat*unique_nodes_per_cell */ (lat_nbr-2)*lon_nbr*1+ /* Mid: lats*cells_per_lat*unique_nodes_per_cell */ 1; /* NP: NP */ break; case nco_grd_lat_unk: case nco_grd_lat_nil: default: nco_dfl_case_generic_err(); break; } /* !lat_typ */ }else if(flg_grd_crv){ (void)fprintf(stdout,"%s: ERROR %s UGRID output does not yet support curvilinear grids\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !flg_grd */ dg_nd=(int *)nco_malloc(dg_nbr*npe_nbr*nco_typ_lng(NC_INT)); dgx=(double *)nco_malloc(dg_nbr*nco_typ_lng(crd_typ)); dgy=(double *)nco_malloc(dg_nbr*nco_typ_lng(crd_typ)); fc_nd=(int *)nco_malloc(fc_nbr*npf_nbr*nco_typ_lng(NC_INT)); fcx=(double *)nco_malloc(fc_nbr*nco_typ_lng(crd_typ)); fcy=(double *)nco_malloc(fc_nbr*nco_typ_lng(crd_typ)); ndx=(double *)nco_malloc(nd_nbr*nco_typ_lng(crd_typ)); ndy=(double *)nco_malloc(nd_nbr*nco_typ_lng(crd_typ)); const long int idx_fst_crn_ll=0; const long int idx_fst_crn_lr=1; const long int idx_fst_crn_ur=2; const long int idx_fst_crn_ul=3; /* Node Ordering: Each interior face requires one new node Node 0 at SP New latitude row moves next node North Add nodes to run West->East */ /* SP */ ndx[0]=lon_crn[0]; /* Longitude degenerate at SP, NP, keep same longitude as corner array */ ndy[0]=lat_crn[0]; /* Mid */ for(nd_idx=1;nd_idx<nd_nbr-1L;nd_idx++){ fc_idx=nd_idx-1L; lat_idx=fc_idx/lon_nbr; lon_idx=fc_idx%lon_nbr; ndx[nd_idx]=lon_crn[lon_idx*grd_crn_nbr+idx_fst_crn_ul]; ndy[nd_idx]=lat_crn[lat_idx*grd_crn_nbr+idx_fst_crn_ul]; } /* !nd_idx */ /* NP */ ndx[nd_nbr-1L]=lon_crn[(lon_nbr-1)*grd_crn_nbr+idx_fst_crn_ul]; ndy[nd_nbr-1L]=lat_crn[(lat_nbr-1)*grd_crn_nbr+idx_fst_crn_ul]; /* Edge Ordering: epf_nbr is number of distinct edges-per-face (incremental, for interior cells) Each additional interior rectangular gridcell requires two new edges: Edge 0 runs South->North for all cells Edge 1 runs West->East for all cells NP row requires only one new edge per face */ /* SP */ const int epf_nbr=2; /* [nbr] Number of distinct edges-per-face (incremental, for interior cells) */ for(fc_idx=0;fc_idx<lon_nbr;fc_idx++){ dg_idx=fc_idx*epf_nbr; /* Edge 0 */ dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx; dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+fc_idx+1L; /* Edge 1 */ dg_nd[(dg_idx+1L)*npe_nbr+0L]=srt_idx+fc_idx+1L; dg_nd[(dg_idx+1L)*npe_nbr+1L]=srt_idx+fc_idx+2L; } /* !fc_idx */ /* Mid */ for(fc_idx=lon_nbr;fc_idx<(lat_nbr-1L)*lon_nbr;fc_idx++){ dg_idx=fc_idx*epf_nbr; /* Edge 0 */ dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx+fc_idx-lon_nbr+1L; dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+fc_idx+1L; /* Edge 1 */ dg_nd[(dg_idx+1L)*npe_nbr+0L]=srt_idx+fc_idx+1L; dg_nd[(dg_idx+1L)*npe_nbr+1L]=srt_idx+fc_idx+2L; } /* !fc_idx */ /* NP */ for(fc_idx=fc_nbr-lon_nbr;fc_idx<fc_nbr;fc_idx++){ /* Only one new edge per face in last row, easiest to count backwards from last edge */ dg_idx=dg_nbr-(fc_nbr-fc_idx); /* NP faces require only only one new edge, Edge 0 */ dg_nd[(dg_idx+0L)*npe_nbr+0L]=srt_idx+fc_idx-lon_nbr+1L; dg_nd[(dg_idx+0L)*npe_nbr+1L]=srt_idx+nd_nbr-1L; } /* !fc_idx */ /* SP */ for(fc_idx=0;fc_idx<lon_nbr;fc_idx++){ fc_nd[fc_idx*npf_nbr+0L]=srt_idx+0L; fc_nd[fc_idx*npf_nbr+1L]=srt_idx+fc_idx+2L; /* NB: CCW */ fc_nd[fc_idx*npf_nbr+2L]=srt_idx+fc_idx+1L; /* NB: CCW */ fc_nd[fc_idx*npf_nbr+3L]=mss_val_int_out; } /* !fc_idx */ /* Mid */ for(fc_idx=lon_nbr;fc_idx<fc_nbr-lon_nbr;fc_idx++){ fc_nd[fc_idx*npf_nbr+idx_fst_crn_ll]=srt_idx+fc_idx-lon_nbr+1L; fc_nd[fc_idx*npf_nbr+idx_fst_crn_lr]=srt_idx+fc_idx-lon_nbr+2L; fc_nd[fc_idx*npf_nbr+idx_fst_crn_ur]=srt_idx+fc_idx+2L; fc_nd[fc_idx*npf_nbr+idx_fst_crn_ul]=srt_idx+fc_idx+1L; } /* !fc_idx */ /* NP */ for(fc_idx=fc_nbr-lon_nbr;fc_idx<fc_nbr;fc_idx++){ fc_nd[fc_idx*npf_nbr+0L]=srt_idx+nd_nbr-(fc_nbr-fc_idx)-2L; fc_nd[fc_idx*npf_nbr+1L]=srt_idx+nd_nbr-(fc_nbr-fc_idx)-1L; fc_nd[fc_idx*npf_nbr+2L]=srt_idx+nd_nbr-1L; fc_nd[fc_idx*npf_nbr+3L]=mss_val_int_out; } /* !fc_idx */ /* Characteristic coordinates */ for(dg_idx=0;dg_idx<dg_nbr-1L;dg_idx++){ idx=dg_idx*npe_nbr; dgx[dg_idx]=0.5*(ndx[dg_nd[idx+0L]]+ndx[dg_nd[idx+1L]]); dgy[dg_idx]=0.5*(ndy[dg_nd[idx+0L]]+ndy[dg_nd[idx+1L]]); } /* !dg_idx */ /* Degenerate longitude at SP, NP, causes weird characterisic longitude unless special care taken */ for(fc_idx=0;fc_idx<fc_nbr-1L;fc_idx++){ idx=fc_idx*npf_nbr; if(fc_idx < lon_nbr){ fcx[fc_idx]=0.5*(ndx[fc_nd[idx+1L]]+ndx[fc_nd[idx+2L]]); }else if(fc_idx >= fc_nbr-lon_nbr-1){ fcx[fc_idx]=0.5*(ndx[fc_nd[idx+0L]]+ndx[fc_nd[idx+1L]]); }else if(fc_nd[idx+3L] != mss_val_int_out){ /* fxm for fcx use nco_lon_crn_avg_brnch() and 3-node version too */ fcx[fc_idx]=0.25*(ndx[fc_nd[idx+0L]]+ndx[fc_nd[idx+1L]]+ndx[fc_nd[idx+2L]]+ndx[fc_nd[idx+3L]]); }else{ abort(); } /* !fc_idx */ if(fc_nd[idx+3L] != mss_val_int_out) fcy[fc_idx]=0.25*(ndy[fc_nd[idx+0L]]+ndy[fc_nd[idx+1L]]+ndy[fc_nd[idx+2L]]+ndy[fc_nd[idx+3L]]); else fcy[fc_idx]=0.33*(ndy[fc_nd[idx+0L]]+ndy[fc_nd[idx+1L]]+ndy[fc_nd[idx+2L]]); } /* !fc_idx */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); rcd=nco_def_dim(out_id,dg_dmn_nm,dg_nbr,&dmn_id_dg); rcd=nco_def_dim(out_id,fc_dmn_nm,fc_nbr,&dmn_id_fc); rcd=nco_def_dim(out_id,nd_dmn_nm,nd_nbr,&dmn_id_nd); rcd=nco_def_dim(out_id,npe_dmn_nm,npe_nbr,&dmn_id_npe); rcd=nco_def_dim(out_id,npf_dmn_nm,npf_nbr,&dmn_id_npf); dmn_ids[0]=dmn_id_dg; dmn_ids[1]=dmn_id_npe; rcd=nco_def_var(out_id,dg_nd_nm,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids,&dg_nd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dg_nd_id,shuffle,deflate,dfl_lvl); dmn_ids[0]=dmn_id_fc; dmn_ids[1]=dmn_id_npf; rcd=nco_def_var(out_id,fc_nd_nm,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids,&fc_nd_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fc_nd_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,msh_nm,(nc_type)NC_INT,dmn_nbr_0D,(int *)NULL,&msh_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,msh_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,ndx_nm,crd_typ,dmn_nbr_1D,&dmn_id_nd,&ndx_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ndx_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,ndy_nm,crd_typ,dmn_nbr_1D,&dmn_id_nd,&ndy_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,ndy_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,dgx_nm,crd_typ,dmn_nbr_1D,&dmn_id_dg,&dgx_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dgx_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,dgy_nm,crd_typ,dmn_nbr_1D,&dmn_id_dg,&dgy_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,dgy_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,fcx_nm,crd_typ,dmn_nbr_1D,&dmn_id_fc,&fcx_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fcx_id,shuffle,deflate,dfl_lvl); rcd=nco_def_var(out_id,fcy_nm,crd_typ,dmn_nbr_1D,&dmn_id_fc,&fcy_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,fcy_id,shuffle,deflate,dfl_lvl); if(strstr(rgr->grd_ttl,"None given")){ const char att_fmt[]="NCO constructed this UGRID grid from scratch"; att_val=(char *)nco_malloc((strlen(att_fmt)+strlen(rgr->fl_in)+1L)*sizeof(char)); sprintf(att_val,att_fmt); }else{ att_val=strdup(rgr->grd_ttl); } /* !grd_ttl */ rcd=nco_char_att_put(out_id,NULL,"title",att_val); rcd=nco_char_att_put(out_id,NULL,"Conventions","CF-1.6, UGRID-1.0"); rcd=nco_char_att_put(out_id,NULL,"created_by",usr_cpp); rcd=nco_char_att_put(out_id,NULL,"grid_generator","NCO"); (void)nco_hst_att_cat(out_id,rgr->cmd_ln); (void)nco_vrs_att_cat(out_id); rcd=nco_char_att_put(out_id,msh_nm,"cf_role","mesh_topology"); rcd=nco_char_att_put(out_id,msh_nm,"standard_name","mesh_topology"); rcd=nco_char_att_put(out_id,msh_nm,"long_name","Topology data"); att_nm=strdup("topology_dimension"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=msh_nm; aed_mtd.id=msh_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&val_two; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,msh_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); aed_mtd.sz=strlen(ndx_nm)+strlen(ndy_nm)+1L; att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR)); (void)sprintf(att_val,"%s %s",ndx_nm,ndy_nm); rcd=nco_char_att_put(out_id,msh_nm,"node_coordinates",att_val); rcd=nco_char_att_put(out_id,msh_nm,"face_node_connectivity",fc_nd_nm); aed_mtd.sz=strlen(fcx_nm)+strlen(fcy_nm)+1L; att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR)); (void)sprintf(att_val,"%s %s",fcx_nm,fcy_nm); rcd=nco_char_att_put(out_id,msh_nm,"face_coordinates",att_val); rcd=nco_char_att_put(out_id,msh_nm,"face_dimension",fc_dmn_nm); rcd=nco_char_att_put(out_id,msh_nm,"edge_node_connectivity",dg_nd_nm); aed_mtd.sz=strlen(dgx_nm)+strlen(dgy_nm)+1L; att_val=(char *)nco_malloc((aed_mtd.sz+1L)*nco_typ_lng(NC_CHAR)); (void)sprintf(att_val,"%s %s",dgx_nm,dgy_nm); rcd=nco_char_att_put(out_id,msh_nm,"edge_coordinates",att_val); rcd=nco_char_att_put(out_id,msh_nm,"edge_dimension",dg_dmn_nm); rcd=nco_char_att_put(out_id,ndx_nm,"standard_name","longitude"); rcd=nco_char_att_put(out_id,ndx_nm,"long_name","Longitude of mesh nodes"); rcd=nco_char_att_put(out_id,ndx_nm,"units","degrees_east"); rcd=nco_char_att_put(out_id,ndy_nm,"standard_name","latitude"); rcd=nco_char_att_put(out_id,ndy_nm,"long_name","Latitude of mesh nodes"); rcd=nco_char_att_put(out_id,ndy_nm,"units","degrees_north"); rcd=nco_char_att_put(out_id,dg_nd_nm,"cf_role","edge_node_connectivity"); rcd=nco_char_att_put(out_id,dg_nd_nm,"long_name","Maps every edge to the two nodes that it connects"); att_nm=strdup("start_index"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=dg_nd_nm; aed_mtd.id=dg_nd_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&val_zero; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,dg_nd_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,fc_nd_nm,"cf_role","face_node_connectivity"); rcd=nco_char_att_put(out_id,fc_nd_nm,"long_name","Maps every face to its corner nodes"); att_nm=strdup("start_index"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=fc_nd_nm; aed_mtd.id=fc_nd_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&val_zero; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,fc_nd_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); att_nm=strdup("_FillValue"); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=fc_nd_nm; aed_mtd.id=fc_nd_id; aed_mtd.sz=1; aed_mtd.type=NC_INT; aed_mtd.val.ip=&mss_val_int_out; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,fc_nd_id,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); rcd=nco_char_att_put(out_id,dgx_nm,"standard_name","longitude"); rcd=nco_char_att_put(out_id,dgx_nm,"long_name","Characteristic longitude of 2D mesh face"); rcd=nco_char_att_put(out_id,dgx_nm,"units","degrees_east"); rcd=nco_char_att_put(out_id,dgy_nm,"standard_name","latitude"); rcd=nco_char_att_put(out_id,dgy_nm,"long_name","Characteristic latitude of 2D mesh face"); rcd=nco_char_att_put(out_id,dgy_nm,"units","degrees_north"); rcd=nco_char_att_put(out_id,fcx_nm,"standard_name","longitude"); rcd=nco_char_att_put(out_id,fcx_nm,"long_name","Characteristic longitude of 2D mesh edge"); rcd=nco_char_att_put(out_id,fcx_nm,"units","degrees_east"); rcd=nco_char_att_put(out_id,fcy_nm,"standard_name","latitude"); rcd=nco_char_att_put(out_id,fcy_nm,"long_name","Characteristic latitude of 2D mesh edge"); rcd=nco_char_att_put(out_id,fcy_nm,"units","degrees_north"); /* Begin data mode */ (void)nco_enddef(out_id); (void)nco_put_vara(out_id,msh_id,dmn_srt,dmn_cnt,&msh_val,(nc_type)NC_INT); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=dg_nbr; dmn_cnt[1]=epf_nbr; (void)nco_put_vara(out_id,dg_nd_id,dmn_srt,dmn_cnt,dg_nd,(nc_type)NC_INT); dmn_srt[0]=dmn_srt[1]=0L; dmn_cnt[0]=fc_nbr; dmn_cnt[1]=npf_nbr; (void)nco_put_vara(out_id,fc_nd_id,dmn_srt,dmn_cnt,fc_nd,(nc_type)NC_INT); dmn_srt[0]=0L; dmn_cnt[0]=nd_nbr; (void)nco_put_vara(out_id,ndx_id,dmn_srt,dmn_cnt,ndx,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=nd_nbr; (void)nco_put_vara(out_id,ndy_id,dmn_srt,dmn_cnt,ndy,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=dg_nbr; (void)nco_put_vara(out_id,dgx_id,dmn_srt,dmn_cnt,dgx,crd_typ); (void)nco_put_vara(out_id,dgy_id,dmn_srt,dmn_cnt,dgy,crd_typ); dmn_srt[0]=0L; dmn_cnt[0]=fc_nbr; (void)nco_put_vara(out_id,fcx_id,dmn_srt,dmn_cnt,fcx,crd_typ); (void)nco_put_vara(out_id,fcy_id,dmn_srt,dmn_cnt,fcy,crd_typ); /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); /* Free memory associated with output file */ if(dgx) dgx=(double *)nco_free(dgx); if(dgy) dgy=(double *)nco_free(dgy); if(dg_nd) dg_nd=(int *)nco_free(dg_nd); if(fcx) fcx=(double *)nco_free(fcx); if(fcy) fcy=(double *)nco_free(fcy); if(fc_nd) fc_nd=(int *)nco_free(fc_nd); if(ndx) ndx=(double *)nco_free(ndx); if(ndy) ndy=(double *)nco_free(ndy); /* Free strings */ if(dgx_nm) dgx_nm=(char *)nco_free(dgx_nm); if(dgy_nm) dgy_nm=(char *)nco_free(dgy_nm); if(dg_dmn_nm) dg_dmn_nm=(char *)nco_free(dg_dmn_nm); if(dg_nd_nm) dg_nd_nm=(char *)nco_free(dg_nd_nm); if(fcx_nm) fcx_nm=(char *)nco_free(fcx_nm); if(fcy_nm) fcy_nm=(char *)nco_free(fcy_nm); if(fc_dmn_nm) fc_dmn_nm=(char *)nco_free(fc_dmn_nm); if(fc_nd_nm) fc_nd_nm=(char *)nco_free(fc_nd_nm); if(msh_nm) msh_nm=(char *)nco_free(msh_nm); if(nd_dmn_nm) nd_dmn_nm=(char *)nco_free(nd_dmn_nm); if(ndx_nm) ndx_nm=(char *)nco_free(ndx_nm); if(ndy_nm) ndy_nm=(char *)nco_free(ndy_nm); if(npe_dmn_nm) npe_dmn_nm=(char *)nco_free(npe_dmn_nm); if(npf_dmn_nm) npf_dmn_nm=(char *)nco_free(npf_dmn_nm); } /* !fl_ugrid */ /* Free memory associated with input file */ if(dmn_sz_int) dmn_sz_int=(int *)nco_free(dmn_sz_int); if(msk) msk=(int *)nco_free(msk); if(area) area=(double *)nco_free(area); if(grd_ctr_lat) grd_ctr_lat=(double *)nco_free(grd_ctr_lat); if(grd_ctr_lon) grd_ctr_lon=(double *)nco_free(grd_ctr_lon); if(grd_crn_lat) grd_crn_lat=(double *)nco_free(grd_crn_lat); if(grd_crn_lon) grd_crn_lon=(double *)nco_free(grd_crn_lon); if(lat_bnd) lat_bnd=(double *)nco_free(lat_bnd); if(lat_crn) lat_crn=(double *)nco_free(lat_crn); if(lat_ctr) lat_ctr=(double *)nco_free(lat_ctr); if(lat_ntf) lat_ntf=(double *)nco_free(lat_ntf); if(lat_wgt) lat_wgt=(double *)nco_free(lat_wgt); if(lon_bnd) lon_bnd=(double *)nco_free(lon_bnd); if(lon_crn) lon_crn=(double *)nco_free(lon_crn); if(lon_ctr) lon_ctr=(double *)nco_free(lon_ctr); if(lon_ntf) lon_ntf=(double *)nco_free(lon_ntf); if(vrt_cll) vrt_cll=(int *)nco_free(vrt_cll); if(vrt_lat) vrt_lat=(double *)nco_free(vrt_lat); if(vrt_lon) vrt_lon=(double *)nco_free(vrt_lon); /* Free strings */ if(area_nm_in) area_nm_in=(char *)nco_free(area_nm_in); if(area_unt) area_unt=(char *)nco_free(area_unt); if(bnd_dmn_nm) bnd_dmn_nm=(char *)nco_free(bnd_dmn_nm); if(col_dmn_nm) col_dmn_nm=(char *)nco_free(col_dmn_nm); if(lat_bnd_nm) lat_bnd_nm=(char *)nco_free(lat_bnd_nm); if(lat_dmn_nm) lat_dmn_nm=(char *)nco_free(lat_dmn_nm); if(lat_nm_in) lat_nm_in=(char *)nco_free(lat_nm_in); if(lon_bnd_nm) lon_bnd_nm=(char *)nco_free(lon_bnd_nm); if(lon_dmn_nm) lon_dmn_nm=(char *)nco_free(lon_dmn_nm); if(lon_nm_in) lon_nm_in=(char *)nco_free(lon_nm_in); if(msk_nm_in) msk_nm_in=(char *)nco_free(msk_nm_in); if(ngl_unt) ngl_unt=(char *)nco_free(ngl_unt); if(vrt_cll_nm) vrt_cll_nm=(char *)nco_free(vrt_cll_nm); if(vrt_lat_nm) vrt_lat_nm=(char *)nco_free(vrt_lat_nm); if(vrt_lon_nm) vrt_lon_nm=(char *)nco_free(vrt_lon_nm); return rcd; } /* !nco_grd_nfr() */ double /* O [dgr] Longitude difference (lon_r-lon_l) */ nco_lon_dff_brnch_dgr /* [fnc] Subtract longitudes with branch-cut rules */ (double lon_r, /* I [dgr] Longitude on right of gridcell (subtractor) */ double lon_l) /* I [dgr] Longitude on left of gridcell (subtractee) */ { /* Purpose: Return difference of two longitudes in degrees Assume longitudes are within 180 degrees of eachother Default orientation is monotonically increasing longitude from left to right */ const char fnc_nm[]="nco_lon_dff_brnch_dgr()"; const double lon_dff=lon_r-lon_l; /* [dgr] Longitude difference (lon_r-lon_l) */ if(lon_dff >= 180.0){ (void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff); return lon_dff-360.0; }else if(lon_dff <= -180.0){ return lon_dff+360.0; } /* !lon_dff */ return lon_dff; } /* !nco_lon_dff_brnch_dgr() */ double /* O [rdn] Longitude difference (lon_r-lon_l) */ nco_lon_dff_brnch_rdn /* [fnc] Subtract longitudes with branch-cut rules */ (double lon_r, /* I [rdn] Longitude on right of gridcell (subtractor) */ double lon_l) /* I [rdn] Longitude on left of gridcell (subtractee) */ { /* Purpose: Return difference of two longitudes in radians Assume longitudes are within pi radians of eachother Default orientation is monotonically increasing longitude from left to right */ const char fnc_nm[]="nco_lon_dff_brnch_rdn()"; const double lon_dff=lon_r-lon_l; /* [rdn] Longitude difference (lon_r-lon_l) */ //nco_bool dbg_prn=False; /* [flg] Print warning when longitude difference is suspicious */ /* longitudes on different branch cuts are expected when computing polygon area, so warn only if requested with high debugging level */ if(lon_dff >= M_PI){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff); return lon_dff-M_PI-M_PI; }else if(lon_dff <= -M_PI){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports lon_r, lon_l, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_r,lon_l,lon_dff); return lon_dff+M_PI+M_PI; } /* !lon_dff */ return lon_dff; } /* !nco_lon_dff_brnch_rdn() */ double /* O [dgr] Longitude average */ nco_lon_crn_avg_brnch /* [fnc] Average quadrilateral longitude with branch-cut rules */ (double lon_ll, /* I [dgr] Longitude at lower left of gridcell */ double lon_lr, /* I [dgr] Longitude at lower right of gridcell */ double lon_ur, /* I [dgr] Longitude at upper right of gridcell */ double lon_ul) /* I [dgr] Longitude at upper left of gridcell */ { /* Purpose: Return average of four corner longitudes of quadrilateral Assume longitudes are within 180 degrees of eachother Default orientation is monotonically increasing longitude from left to right WLOG, adjust all longitudes to be on same branch as lon_ll */ const char fnc_nm[]="nco_lon_crn_avg_brnch()"; double lon_dff; /* [dgr] Longitude difference */ lon_dff=lon_lr-lon_ll; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_lr, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_lr,lon_ll,lon_dff); lon_lr-=360.0; }else if(lon_dff <= -180.0){ lon_lr+=360.0; } /* !lon_dff */ lon_dff=lon_ur-lon_ll; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_ur, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_ur,lon_ll,lon_dff); lon_ur-=360.0; }else if(lon_dff <= -180.0){ lon_ur+=360.0; } /* !lon_dff */ lon_dff=lon_ul-lon_ll; if(lon_dff >= 180.0){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: INFO %s reports lon_ul, lon_ll, lon_dff = %g, %g, %g\n",nco_prg_nm_get(),fnc_nm,lon_ul,lon_ll,lon_dff); lon_ul-=360.0; }else if(lon_dff <= -180.0){ lon_ul+=360.0; } /* !lon_dff */ return 0.25*(lon_ll+lon_lr+lon_ur+lon_ul); } /* !nco_lon_crn_avg_brnch() */ double /* O [dgr] Longitude average */ nco_lon_ply_avg_brnch_dgr /* [fnc] Average polygon longitude with branch-cut rules */ (double *lon_crn, /* I [dgr] Longitude of gridcell corners */ long lon_nbr) /* I [nbr] Number of vertices in polygon */ { /* Purpose: Return average longitude of polygon vertices, i.e., centroid longitude Assume longitudes are within 180 degrees of one another Default orientation is monotonically increasing longitude from left to right WLOG, adjust all longitudes to be on same branch as lon_ll */ // const char fnc_nm[]="nco_lon_ply_avg_brnch()"; double lon_dff; /* [dgr] Longitude difference */ double lon_avg; /* [dgr] Longitude average */ int lon_idx; /* [idx] Polygon vertex index */ assert(lon_nbr != 0); lon_avg=lon_crn[0]; for(lon_idx=1;lon_idx<lon_nbr;lon_idx++){ lon_avg+=lon_crn[lon_idx]; lon_dff=lon_crn[lon_idx]-lon_crn[0]; if(lon_dff >= 180.0){ lon_avg-=360.0; }else if(lon_dff <= -180.0){ lon_avg+=360.0; } /* !lon_dff */ } /* !lon_idx */ return lon_avg/lon_nbr; } /* !nco_lon_ply_avg_brnch() */ nco_bool /* O [flg] Input corners were CCW */ nco_ccw_chk /* [fnc] Convert quadrilateral gridcell corners to CCW orientation */ (double * const crn_lat, /* [dgr] Latitude corners of gridcell */ double * const crn_lon, /* [dgr] Latitude corners of gridcell */ const int crn_nbr, /* [nbr] Number of corners per gridcell */ int idx_ccw, /* [idx] Index of starting vertice for CCW check (Point A = tail side AB) */ const int rcr_lvl) /* [nbr] Recursion level */ { /* Purpose: Determine whether corner vertices are oriented CCW If not, alter order so they are returned in CCW order Function can call itself, and rcr_lvl indicates recursion level: rcr_lvl=1: Called by host code, i.e., nco_grd_nfr() rcr_lvl=2: Called by itself, i.e., nco_ccw_chk() Assumptions: Quadrilateral vertices are already corrected to obey branch-cut rules, i.e., all vertices are on "same side" of dateline or Greenwich as appropriate Algorithm: Start crn_idx=0, i.e., quadrilateral LL corner Vector A runs from crn_idx=0 to crn_idx=1, i.e., quadrilateral LL->LR Vector B runs from crn_idx=1 to crn_idx=2, i.e., quadrilateral LR->UR Compute cross-product C = A x B C is normal to plane containing A and B Dot-product of C with radial vector to head A = tail B is positive if A and B are CCW if(ABC is CCW){ if(CDA is CCW) Done else Copy D:=A (make CDA degenerate, triangularize quadrilateral) endif }else(ABC is not CCW){ Assume entire quadrilateral is CW Take mirror image of quadrilateral by switching B with D If(new ABC is CCW){ If(CDA is CCW) Done else Copy D:=A (make CDA degenerate, triangularize quadrilateral) endif }else{ Fail (return False, meaning point should be masked) } All cases return True (i.e., CCW) from rcr_lvl=1 except last Last case returns False, and calling code should mask such an aberrant point */ const char fnc_nm[]="nco_ccw_chk()"; /* MSVC compiler chokes unless array size is compile-time constant */ const int CRN_NBR_MSVC=4; double sin_lat[CRN_NBR_MSVC]; double sin_lon[CRN_NBR_MSVC]; double cos_lat[CRN_NBR_MSVC]; double cos_lon[CRN_NBR_MSVC]; double A_tail_x,A_tail_y,A_tail_z; double A_head_x,A_head_y,A_head_z; double A_x,A_y,A_z; double B_tail_x,B_tail_y,B_tail_z; double B_head_x,B_head_y,B_head_z; double B_x,B_y,B_z; double C_x,C_y,C_z; double R_x,R_y,R_z; double lat_rdn; double lon_rdn; double dot_prd; int crn_idx; /* [idx] Corner idx */ int A_tail_idx,A_head_idx; int B_tail_idx,B_head_idx; nco_bool flg_ccw; /* [flg] Input is CCW */ assert(crn_nbr == CRN_NBR_MSVC); for(crn_idx=0;crn_idx<crn_nbr;crn_idx++){ lat_rdn=crn_lat[crn_idx]*M_PI/180.0; lon_rdn=crn_lon[crn_idx]*M_PI/180.0; sin_lat[crn_idx]=sin(lat_rdn); cos_lat[crn_idx]=cos(lat_rdn); sin_lon[crn_idx]=sin(lon_rdn); cos_lon[crn_idx]=cos(lon_rdn); } /* !crn_idx */ /* Calls from host code (i.e., nco_grd_nfr()) start at lower-left of quadrilateral ABCD = Point A = vertex 0 Calls from self can start from quadrilateral Point A or C To check triangle CDA, start at upper-right of quadrilateral ABCD = Point C = vertex 2 */ A_tail_idx=idx_ccw; A_head_idx=B_tail_idx=(A_tail_idx+1)%crn_nbr; B_head_idx=(B_tail_idx+1)%crn_nbr; A_tail_x=cos_lat[A_tail_idx]*cos_lon[A_tail_idx]; A_tail_y=cos_lat[A_tail_idx]*sin_lon[A_tail_idx]; A_tail_z=sin_lat[A_tail_idx]; A_head_x=B_tail_x=R_x=cos_lat[A_head_idx]*cos_lon[A_head_idx]; A_head_y=B_tail_y=R_y=cos_lat[A_head_idx]*sin_lon[A_head_idx]; A_head_z=B_tail_z=R_z=sin_lat[A_head_idx]; B_head_x=cos_lat[B_head_idx]*cos_lon[B_head_idx]; B_head_y=cos_lat[B_head_idx]*sin_lon[B_head_idx]; B_head_z=sin_lat[B_head_idx]; A_x=A_head_x-A_tail_x; A_y=A_head_y-A_tail_y; A_z=A_head_z-A_tail_z; B_x=B_head_x-B_tail_x; B_y=B_head_y-B_tail_y; B_z=B_head_z-B_tail_z; /* Cross-Product C = A x B */ C_x=A_y*B_z-B_y*A_z; C_y=-A_x*B_z+B_x*A_z; C_z=A_x*B_y-B_x*A_y; /* Dot-Product R dot C */ dot_prd=C_x*R_x+C_y*R_y+C_z*R_z; if(dot_prd > 0.0) flg_ccw=True; else flg_ccw=False; if(flg_ccw && crn_nbr == 4 && rcr_lvl == 1){ /* Original ABC is CCW, now check CDA */ idx_ccw=2; flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1); if(!flg_ccw){ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports triangle ABC is and CDA is not CCW in quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Setting D:=A to triangularize quadrilateral.\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd); /* Triangularize quadrilateral D:=A */ /* 20210411: From 2016 until today, nco_ccw_chk() overwrote fourth (UL) with first (LL) corner right here even when flg_ccw was True :( */ crn_lat[3]=crn_lat[0]; crn_lon[3]=crn_lon[0]; return True; } /* !flg_ccw */ }else if(!flg_ccw && crn_nbr == 4 && rcr_lvl == 1){ /* Original ABC is not CCW 20160124: Simplistic fix: reverse gridpoint order This only works for quadrilaterals without degenerate points */ double crn_tmp; if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: INFO %s reports triangle ABC is non-CCW in quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Mirror-imaging...\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd); crn_tmp=crn_lat[1]; crn_lat[1]=crn_lat[3]; crn_lat[3]=crn_tmp; crn_tmp=crn_lon[1]; crn_lon[1]=crn_lon[3]; crn_lon[3]=crn_tmp; /* Check new triangle ABC */ idx_ccw=0; flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1); if(flg_ccw){ /* Inverted ABC is CCW, now check CDA */ idx_ccw=2; flg_ccw=nco_ccw_chk(crn_lat,crn_lon,crn_nbr,idx_ccw,rcr_lvl+1); if(flg_ccw){ return True; }else{ if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_io) (void)fprintf(stdout,"%s: INFO %s reports triangle ABC is CCW after inversion, but triangle CDA is not at quadrilateral gridcell with LL (lat,lon) = (%g, %g), dot_prd = %g. Setting D:=A to triangularize quadrilateral.\n",nco_prg_nm_get(),fnc_nm,*crn_lat+0,*crn_lon+0,dot_prd); /* Triangularize quadrilateral D:=A */ crn_lat[3]=crn_lat[0]; crn_lon[3]=crn_lon[0]; return True; } /* flg_ccw */ }else{ /* Original and Inverted ABC are not CCW */ if(!flg_ccw && nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(stdout,"%s: WARNING %s reports triangle ABC remains non-CCW after first inversion\n",nco_prg_nm_get(),fnc_nm); return False; } /* !flg_ccw */ } /* flg_ccw */ return flg_ccw; } /* !nco_ccw_chk() */
StmtOpenMP.h
//===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file /// This file defines OpenMP AST classes for executable directives and /// clauses. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMTOPENMP_H #define LLVM_CLANG_AST_STMTOPENMP_H #include "clang/AST/Expr.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for directives. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP executable /// directive. /// class OMPExecutableDirective : public Stmt { friend class ASTStmtReader; /// Kind of the directive. OpenMPDirectiveKind Kind; /// Starting location of the directive (directive keyword). SourceLocation StartLoc; /// Ending location of the directive. SourceLocation EndLoc; /// Numbers of clauses. const unsigned NumClauses; /// Number of child expressions/stmts. const unsigned NumChildren; /// Offset from this to the start of clauses. /// There are NumClauses pointers to clauses, they are followed by /// NumChildren pointers to child stmts/exprs (if the directive type /// requires an associated stmt, then it has to be the first of them). const unsigned ClausesOffset; /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>( reinterpret_cast<char *>(this) + ClausesOffset); return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses); } protected: /// Build instance of directive of class \a K. /// /// \param SC Statement class. /// \param K Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// template <typename T> OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses, unsigned NumChildren) : Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)), EndLoc(std::move(EndLoc)), NumClauses(NumClauses), NumChildren(NumChildren), ClausesOffset(llvm::alignTo(sizeof(T), alignof(OMPClause *))) {} /// Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// Set the associated statement for the directive. /// /// /param S Associated statement. /// void setAssociatedStmt(Stmt *S) { assert(hasAssociatedStmt() && "no associated statement."); *child_begin() = S; } public: /// Iterates over expressions/statements used in the construct. class used_clauses_child_iterator : public llvm::iterator_adaptor_base< used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator, std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> { ArrayRef<OMPClause *>::iterator End; OMPClause::child_iterator ChildI, ChildEnd; void MoveToNext() { if (ChildI != ChildEnd) return; while (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); if (ChildI != ChildEnd) return; } } } public: explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses) : used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); MoveToNext(); } } Stmt *operator*() const { return *ChildI; } Stmt *operator->() const { return **this; } used_clauses_child_iterator &operator++() { ++ChildI; if (ChildI != ChildEnd) return *this; if (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); } } MoveToNext(); return *this; } }; static llvm::iterator_range<used_clauses_child_iterator> used_clauses_children(ArrayRef<OMPClause *> Clauses) { return {used_clauses_child_iterator(Clauses), used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))}; } /// Iterates over a filtered subrange of clauses applied to a /// directive. /// /// This iterator visits only clauses of type SpecificClause. template <typename SpecificClause> class specific_clause_iterator : public llvm::iterator_adaptor_base< specific_clause_iterator<SpecificClause>, ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag, const SpecificClause *, ptrdiff_t, const SpecificClause *, const SpecificClause *> { ArrayRef<OMPClause *>::const_iterator End; void SkipToNextClause() { while (this->I != End && !isa<SpecificClause>(*this->I)) ++this->I; } public: explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses) : specific_clause_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { SkipToNextClause(); } const SpecificClause *operator*() const { return cast<SpecificClause>(*this->I); } const SpecificClause *operator->() const { return **this; } specific_clause_iterator &operator++() { ++this->I; SkipToNextClause(); return *this; } }; template <typename SpecificClause> static llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind(ArrayRef<OMPClause *> Clauses) { return {specific_clause_iterator<SpecificClause>(Clauses), specific_clause_iterator<SpecificClause>( llvm::makeArrayRef(Clauses.end(), 0))}; } template <typename SpecificClause> llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind() const { return getClausesOfKind<SpecificClause>(clauses()); } /// Gets a single clause of the specified kind associated with the /// current directive iff there is only one clause of this kind (and assertion /// is fired if there is more than one clause is associated with the /// directive). Returns nullptr if no clause of this kind is associated with /// the directive. template <typename SpecificClause> const SpecificClause *getSingleClause() const { auto Clauses = getClausesOfKind<SpecificClause>(); if (Clauses.begin() != Clauses.end()) { assert(std::next(Clauses.begin()) == Clauses.end() && "There are at least 2 clauses of the specified kind"); return *Clauses.begin(); } return nullptr; } /// Returns true if the current directive has one or more clauses of a /// specific kind. template <typename SpecificClause> bool hasClausesOfKind() const { auto Clauses = getClausesOfKind<SpecificClause>(); return Clauses.begin() != Clauses.end(); } /// Returns starting location of directive kind. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns ending location of directive. SourceLocation getEndLoc() const { return EndLoc; } /// Set starting location of directive kind. /// /// \param Loc New starting location of directive. /// void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Set ending location of directive. /// /// \param Loc New ending location of directive. /// void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Get number of clauses. unsigned getNumClauses() const { return NumClauses; } /// Returns specified clause. /// /// \param i Number of clause. /// OMPClause *getClause(unsigned i) const { return clauses()[i]; } /// Returns true if directive has associated statement. bool hasAssociatedStmt() const { return NumChildren > 0; } /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } Stmt *getAssociatedStmt() { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. // // \param RegionKind Component region kind. const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const { SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(std::any_of( CaptureRegions.begin(), CaptureRegions.end(), [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } /// Get innermost captured statement for the construct. CapturedStmt *getInnermostCapturedStmt() { assert(hasAssociatedStmt() && getAssociatedStmt() && "Must have associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(!CaptureRegions.empty() && "At least one captured statement must be provided."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (unsigned Level = CaptureRegions.size(); Level > 1; --Level) CS = cast<CapturedStmt>(CS->getCapturedStmt()); return CS; } const CapturedStmt *getInnermostCapturedStmt() const { return const_cast<OMPExecutableDirective *>(this) ->getInnermostCapturedStmt(); } OpenMPDirectiveKind getDirectiveKind() const { return Kind; } static bool classof(const Stmt *S) { return S->getStmtClass() >= firstOMPExecutableDirectiveConstant && S->getStmtClass() <= lastOMPExecutableDirectiveConstant; } child_range children() { if (!hasAssociatedStmt()) return child_range(child_iterator(), child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end()); /// Do not mark all the special expression/statements as children, except /// for the associated statement. return child_range(ChildStorage, ChildStorage + 1); } const_child_range children() const { if (!hasAssociatedStmt()) return const_child_range(const_child_iterator(), const_child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>( const_cast<OMPExecutableDirective *>(this)->getClauses().end()); return const_child_range(ChildStorage, ChildStorage + 1); } ArrayRef<OMPClause *> clauses() { return getClauses(); } ArrayRef<OMPClause *> clauses() const { return const_cast<OMPExecutableDirective *>(this)->getClauses(); } /// Returns whether or not this is a Standalone directive. /// /// Stand-alone directives are executable directives /// that have no associated user code. bool isStandaloneDirective() const; /// Returns the AST node representing OpenMP structured-block of this /// OpenMP executable directive, /// Prerequisite: Executable Directive must not be Standalone directive. const Stmt *getStructuredBlock() const; Stmt *getStructuredBlock() { return const_cast<Stmt *>( const_cast<const OMPExecutableDirective *>(this)->getStructuredBlock()); } }; /// This represents '#pragma omp parallel' directive. /// /// \code /// #pragma omp parallel private(a,b) reduction(+: c,d) /// \endcode /// In this example directive '#pragma omp parallel' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending Location of the directive. /// OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, llvm::omp::OMPD_parallel, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, llvm::omp::OMPD_parallel, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement associated with the directive. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelDirectiveClass; } }; /// This is a common base class for loop directives ('omp simd', 'omp /// for', 'omp for simd' etc.). It is responsible for the loop code generation. /// class OMPLoopDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Number of collapsed loops as specified by 'collapse' clause. unsigned CollapsedNum; /// Offsets to the stored exprs. /// This enumeration contains offsets to all the pointers to children /// expressions stored in OMPLoopDirective. /// The first 9 children are necessary for all the loop directives, /// the next 8 are specific to the worksharing ones, and the next 11 are /// used for combined constructs containing two pragmas associated to loops. /// After the fixed children, three arrays of length CollapsedNum are /// allocated: loop counters, their updates and final values. /// PrevLowerBound and PrevUpperBound are used to communicate blocking /// information in composite constructs which require loop blocking /// DistInc is used to generate the increment expression for the distribute /// loop when combined with a further nested loop /// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the /// for loop when combined with a previous distribute loop in the same pragma /// (e.g. 'distribute parallel for') /// enum { AssociatedStmtOffset = 0, IterationVariableOffset = 1, LastIterationOffset = 2, CalcLastIterationOffset = 3, PreConditionOffset = 4, CondOffset = 5, InitOffset = 6, IncOffset = 7, PreInitsOffset = 8, // The '...End' enumerators do not correspond to child expressions - they // specify the offset to the end (and start of the following counters/ // updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays). DefaultEnd = 9, // The following 8 exprs are used by worksharing and distribute loops only. IsLastIterVariableOffset = 9, LowerBoundVariableOffset = 10, UpperBoundVariableOffset = 11, StrideVariableOffset = 12, EnsureUpperBoundOffset = 13, NextLowerBoundOffset = 14, NextUpperBoundOffset = 15, NumIterationsOffset = 16, // Offset to the end for worksharing loop directives. WorksharingEnd = 17, PrevLowerBoundVariableOffset = 17, PrevUpperBoundVariableOffset = 18, DistIncOffset = 19, PrevEnsureUpperBoundOffset = 20, CombinedLowerBoundVariableOffset = 21, CombinedUpperBoundVariableOffset = 22, CombinedEnsureUpperBoundOffset = 23, CombinedInitOffset = 24, CombinedConditionOffset = 25, CombinedNextLowerBoundOffset = 26, CombinedNextUpperBoundOffset = 27, CombinedDistConditionOffset = 28, CombinedParForInDistConditionOffset = 29, // Offset to the end (and start of the following // counters/updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays) for combined distribute loop directives. CombinedDistributeEnd = 30, }; /// Get the counters storage. MutableArrayRef<Expr *> getCounters() { Expr **Storage = reinterpret_cast<Expr **>( &(*(std::next(child_begin(), getArraysOffset(getDirectiveKind()))))); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the private counters storage. MutableArrayRef<Expr *> getPrivateCounters() { Expr **Storage = reinterpret_cast<Expr **>(&*std::next( child_begin(), getArraysOffset(getDirectiveKind()) + CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getInits() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getUpdates() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the final counter updates storage. MutableArrayRef<Expr *> getFinals() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 4 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the dependent counters storage. MutableArrayRef<Expr *> getDependentCounters() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 5 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the dependent inits storage. MutableArrayRef<Expr *> getDependentInits() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 6 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the finals conditions storage. MutableArrayRef<Expr *> getFinalsConditions() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 7 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } protected: /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed loops from 'collapse' clause. /// \param NumClauses Number of clauses. /// \param NumSpecialChildren Number of additional directive-specific stmts. /// template <typename T> OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses, unsigned NumSpecialChildren = 0) : OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses, numLoopChildren(CollapsedNum, Kind) + NumSpecialChildren), CollapsedNum(CollapsedNum) {} /// Offset to the start of children expression arrays. static unsigned getArraysOffset(OpenMPDirectiveKind Kind) { if (isOpenMPLoopBoundSharingDirective(Kind)) return CombinedDistributeEnd; if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) || isOpenMPDistributeDirective(Kind)) return WorksharingEnd; return DefaultEnd; } /// Children number. static unsigned numLoopChildren(unsigned CollapsedNum, OpenMPDirectiveKind Kind) { return getArraysOffset(Kind) + 8 * CollapsedNum; // Counters, PrivateCounters, Inits, // Updates, Finals, DependentCounters, // DependentInits, FinalsConditions. } void setIterationVariable(Expr *IV) { *std::next(child_begin(), IterationVariableOffset) = IV; } void setLastIteration(Expr *LI) { *std::next(child_begin(), LastIterationOffset) = LI; } void setCalcLastIteration(Expr *CLI) { *std::next(child_begin(), CalcLastIterationOffset) = CLI; } void setPreCond(Expr *PC) { *std::next(child_begin(), PreConditionOffset) = PC; } void setCond(Expr *Cond) { *std::next(child_begin(), CondOffset) = Cond; } void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; } void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; } void setPreInits(Stmt *PreInits) { *std::next(child_begin(), PreInitsOffset) = PreInits; } void setIsLastIterVariable(Expr *IL) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), IsLastIterVariableOffset) = IL; } void setLowerBoundVariable(Expr *LB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), LowerBoundVariableOffset) = LB; } void setUpperBoundVariable(Expr *UB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), UpperBoundVariableOffset) = UB; } void setStrideVariable(Expr *ST) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), StrideVariableOffset) = ST; } void setEnsureUpperBound(Expr *EUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), EnsureUpperBoundOffset) = EUB; } void setNextLowerBound(Expr *NLB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextLowerBoundOffset) = NLB; } void setNextUpperBound(Expr *NUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextUpperBoundOffset) = NUB; } void setNumIterations(Expr *NI) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NumIterationsOffset) = NI; } void setPrevLowerBoundVariable(Expr *PrevLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevLowerBoundVariableOffset) = PrevLB; } void setPrevUpperBoundVariable(Expr *PrevUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevUpperBoundVariableOffset) = PrevUB; } void setDistInc(Expr *DistInc) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), DistIncOffset) = DistInc; } void setPrevEnsureUpperBound(Expr *PrevEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevEnsureUpperBoundOffset) = PrevEUB; } void setCombinedLowerBoundVariable(Expr *CombLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedLowerBoundVariableOffset) = CombLB; } void setCombinedUpperBoundVariable(Expr *CombUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedUpperBoundVariableOffset) = CombUB; } void setCombinedEnsureUpperBound(Expr *CombEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedEnsureUpperBoundOffset) = CombEUB; } void setCombinedInit(Expr *CombInit) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedInitOffset) = CombInit; } void setCombinedCond(Expr *CombCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedConditionOffset) = CombCond; } void setCombinedNextLowerBound(Expr *CombNLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextLowerBoundOffset) = CombNLB; } void setCombinedNextUpperBound(Expr *CombNUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextUpperBoundOffset) = CombNUB; } void setCombinedDistCond(Expr *CombDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedDistConditionOffset) = CombDistCond; } void setCombinedParForInDistCond(Expr *CombParForInDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedParForInDistConditionOffset) = CombParForInDistCond; } void setCounters(ArrayRef<Expr *> A); void setPrivateCounters(ArrayRef<Expr *> A); void setInits(ArrayRef<Expr *> A); void setUpdates(ArrayRef<Expr *> A); void setFinals(ArrayRef<Expr *> A); void setDependentCounters(ArrayRef<Expr *> A); void setDependentInits(ArrayRef<Expr *> A); void setFinalsConditions(ArrayRef<Expr *> A); public: /// The expressions built to support OpenMP loops in combined/composite /// pragmas (e.g. pragma omp distribute parallel for) struct DistCombinedHelperExprs { /// DistributeLowerBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *LB; /// DistributeUpperBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *UB; /// DistributeEnsureUpperBound - used when composing 'omp distribute' /// with 'omp for' in a same construct, EUB depends on DistUB Expr *EUB; /// Distribute loop iteration variable init used when composing 'omp /// distribute' /// with 'omp for' in a same construct Expr *Init; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct Expr *Cond; /// Update of LowerBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NLB; /// Update of UpperBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NUB; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct when schedule is chunked. Expr *DistCond; /// 'omp parallel for' loop condition used when composed with /// 'omp distribute' in the same construct and when schedule is /// chunked and the chunk size is 1. Expr *ParForInDistCond; }; /// The expressions built for the OpenMP loop CodeGen for the /// whole collapsed loop nest. struct HelperExprs { /// Loop iteration variable. Expr *IterationVarRef; /// Loop last iteration number. Expr *LastIteration; /// Loop number of iterations. Expr *NumIterations; /// Calculation of last iteration. Expr *CalcLastIteration; /// Loop pre-condition. Expr *PreCond; /// Loop condition. Expr *Cond; /// Loop iteration variable init. Expr *Init; /// Loop increment. Expr *Inc; /// IsLastIteration - local flag variable passed to runtime. Expr *IL; /// LowerBound - local variable passed to runtime. Expr *LB; /// UpperBound - local variable passed to runtime. Expr *UB; /// Stride - local variable passed to runtime. Expr *ST; /// EnsureUpperBound -- expression UB = min(UB, NumIterations). Expr *EUB; /// Update of LowerBound for statically scheduled 'omp for' loops. Expr *NLB; /// Update of UpperBound for statically scheduled 'omp for' loops. Expr *NUB; /// PreviousLowerBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevLB; /// PreviousUpperBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevUB; /// DistInc - increment expression for distribute loop when found /// combined with a further loop level (e.g. in 'distribute parallel for') /// expression IV = IV + ST Expr *DistInc; /// PrevEUB - expression similar to EUB but to be used when loop /// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for' /// when ensuring that the UB is either the calculated UB by the runtime or /// the end of the assigned distribute chunk) /// expression UB = min (UB, PrevUB) Expr *PrevEUB; /// Counters Loop counters. SmallVector<Expr *, 4> Counters; /// PrivateCounters Loop counters. SmallVector<Expr *, 4> PrivateCounters; /// Expressions for loop counters inits for CodeGen. SmallVector<Expr *, 4> Inits; /// Expressions for loop counters update for CodeGen. SmallVector<Expr *, 4> Updates; /// Final loop counter values for GodeGen. SmallVector<Expr *, 4> Finals; /// List of counters required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentCounters; /// List of initializers required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentInits; /// List of final conditions required for the generation of the /// non-rectangular loops. SmallVector<Expr *, 4> FinalsConditions; /// Init statement for all captured expressions. Stmt *PreInits; /// Expressions used when combining OpenMP loop pragmas DistCombinedHelperExprs DistCombinedFields; /// Check if all the expressions are built (does not check the /// worksharing ones). bool builtAll() { return IterationVarRef != nullptr && LastIteration != nullptr && NumIterations != nullptr && PreCond != nullptr && Cond != nullptr && Init != nullptr && Inc != nullptr; } /// Initialize all the fields to null. /// \param Size Number of elements in the /// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions /// arrays. void clear(unsigned Size) { IterationVarRef = nullptr; LastIteration = nullptr; CalcLastIteration = nullptr; PreCond = nullptr; Cond = nullptr; Init = nullptr; Inc = nullptr; IL = nullptr; LB = nullptr; UB = nullptr; ST = nullptr; EUB = nullptr; NLB = nullptr; NUB = nullptr; NumIterations = nullptr; PrevLB = nullptr; PrevUB = nullptr; DistInc = nullptr; PrevEUB = nullptr; Counters.resize(Size); PrivateCounters.resize(Size); Inits.resize(Size); Updates.resize(Size); Finals.resize(Size); DependentCounters.resize(Size); DependentInits.resize(Size); FinalsConditions.resize(Size); for (unsigned i = 0; i < Size; ++i) { Counters[i] = nullptr; PrivateCounters[i] = nullptr; Inits[i] = nullptr; Updates[i] = nullptr; Finals[i] = nullptr; DependentCounters[i] = nullptr; DependentInits[i] = nullptr; FinalsConditions[i] = nullptr; } PreInits = nullptr; DistCombinedFields.LB = nullptr; DistCombinedFields.UB = nullptr; DistCombinedFields.EUB = nullptr; DistCombinedFields.Init = nullptr; DistCombinedFields.Cond = nullptr; DistCombinedFields.NLB = nullptr; DistCombinedFields.NUB = nullptr; DistCombinedFields.DistCond = nullptr; DistCombinedFields.ParForInDistCond = nullptr; } }; /// Get number of collapsed loops. unsigned getCollapsedNumber() const { return CollapsedNum; } Expr *getIterationVariable() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IterationVariableOffset))); } Expr *getLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LastIterationOffset))); } Expr *getCalcLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CalcLastIterationOffset))); } Expr *getPreCond() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PreConditionOffset))); } Expr *getCond() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset))); } Expr *getInit() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset))); } Expr *getInc() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset))); } const Stmt *getPreInits() const { return *std::next(child_begin(), PreInitsOffset); } Stmt *getPreInits() { return *std::next(child_begin(), PreInitsOffset); } Expr *getIsLastIterVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IsLastIterVariableOffset))); } Expr *getLowerBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LowerBoundVariableOffset))); } Expr *getUpperBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), UpperBoundVariableOffset))); } Expr *getStrideVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), StrideVariableOffset))); } Expr *getEnsureUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), EnsureUpperBoundOffset))); } Expr *getNextLowerBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextLowerBoundOffset))); } Expr *getNextUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextUpperBoundOffset))); } Expr *getNumIterations() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NumIterationsOffset))); } Expr *getPrevLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevLowerBoundVariableOffset))); } Expr *getPrevUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevUpperBoundVariableOffset))); } Expr *getDistInc() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), DistIncOffset))); } Expr *getPrevEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevEnsureUpperBoundOffset))); } Expr *getCombinedLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedLowerBoundVariableOffset))); } Expr *getCombinedUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedUpperBoundVariableOffset))); } Expr *getCombinedEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedEnsureUpperBoundOffset))); } Expr *getCombinedInit() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedInitOffset))); } Expr *getCombinedCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedConditionOffset))); } Expr *getCombinedNextLowerBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextLowerBoundOffset))); } Expr *getCombinedNextUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextUpperBoundOffset))); } Expr *getCombinedDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedDistConditionOffset))); } Expr *getCombinedParForInDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedParForInDistConditionOffset))); } /// Try to find the next loop sub-statement in the specified statement \p /// CurStmt. /// \param TryImperfectlyNestedLoops true, if we need to try to look for the /// imperfectly nested loop. static Stmt *tryToFindNextInnerLoop(Stmt *CurStmt, bool TryImperfectlyNestedLoops); static const Stmt *tryToFindNextInnerLoop(const Stmt *CurStmt, bool TryImperfectlyNestedLoops) { return tryToFindNextInnerLoop(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops); } Stmt *getBody(); const Stmt *getBody() const { return const_cast<OMPLoopDirective *>(this)->getBody(); } ArrayRef<Expr *> counters() { return getCounters(); } ArrayRef<Expr *> counters() const { return const_cast<OMPLoopDirective *>(this)->getCounters(); } ArrayRef<Expr *> private_counters() { return getPrivateCounters(); } ArrayRef<Expr *> private_counters() const { return const_cast<OMPLoopDirective *>(this)->getPrivateCounters(); } ArrayRef<Expr *> inits() { return getInits(); } ArrayRef<Expr *> inits() const { return const_cast<OMPLoopDirective *>(this)->getInits(); } ArrayRef<Expr *> updates() { return getUpdates(); } ArrayRef<Expr *> updates() const { return const_cast<OMPLoopDirective *>(this)->getUpdates(); } ArrayRef<Expr *> finals() { return getFinals(); } ArrayRef<Expr *> finals() const { return const_cast<OMPLoopDirective *>(this)->getFinals(); } ArrayRef<Expr *> dependent_counters() { return getDependentCounters(); } ArrayRef<Expr *> dependent_counters() const { return const_cast<OMPLoopDirective *>(this)->getDependentCounters(); } ArrayRef<Expr *> dependent_inits() { return getDependentInits(); } ArrayRef<Expr *> dependent_inits() const { return const_cast<OMPLoopDirective *>(this)->getDependentInits(); } ArrayRef<Expr *> finals_conditions() { return getFinalsConditions(); } ArrayRef<Expr *> finals_conditions() const { return const_cast<OMPLoopDirective *>(this)->getFinalsConditions(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass || T->getStmtClass() == OMPForDirectiveClass || T->getStmtClass() == OMPForSimdDirectiveClass || T->getStmtClass() == OMPParallelForDirectiveClass || T->getStmtClass() == OMPParallelForSimdDirectiveClass || T->getStmtClass() == OMPTaskLoopDirectiveClass || T->getStmtClass() == OMPTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPDistributeDirectiveClass || T->getStmtClass() == OMPTargetParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPDistributeSimdDirectiveClass || T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp simd' directive. /// /// \code /// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, llvm::omp::OMPD_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, llvm::omp::OMPD_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass; } }; /// This represents '#pragma omp for' directive. /// /// \code /// #pragma omp for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for' has clauses 'private' with the /// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c' /// and 'd'. /// class OMPForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, llvm::omp::OMPD_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, llvm::omp::OMPD_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForDirectiveClass; } }; /// This represents '#pragma omp for simd' directive. /// /// \code /// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForSimdDirectiveClass; } }; /// This represents '#pragma omp sections' directive. /// /// \code /// #pragma omp sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp sections' has clauses 'private' with /// the variables 'a' and 'b' and 'reduction' with operator '+' and variables /// 'c' and 'd'. /// class OMPSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, llvm::omp::OMPD_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, llvm::omp::OMPD_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSectionsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionsDirectiveClass; } }; /// This represents '#pragma omp section' directive. /// /// \code /// #pragma omp section /// \endcode /// class OMPSectionDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPSectionDirectiveClass, llvm::omp::OMPD_section, StartLoc, EndLoc, 0, 1), HasCancel(false) {} /// Build an empty directive. /// explicit OMPSectionDirective() : OMPExecutableDirective(this, OMPSectionDirectiveClass, llvm::omp::OMPD_section, SourceLocation(), SourceLocation(), 0, 1), HasCancel(false) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive. /// /// \param C AST context. /// static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionDirectiveClass; } }; /// This represents '#pragma omp single' directive. /// /// \code /// #pragma omp single private(a,b) copyprivate(c,d) /// \endcode /// In this example directive '#pragma omp single' has clauses 'private' with /// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'. /// class OMPSingleDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, llvm::omp::OMPD_single, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSingleDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, llvm::omp::OMPD_single, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSingleDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSingleDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSingleDirectiveClass; } }; /// This represents '#pragma omp master' directive. /// /// \code /// #pragma omp master /// \endcode /// class OMPMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPMasterDirectiveClass, llvm::omp::OMPD_master, StartLoc, EndLoc, 0, 1) { } /// Build an empty directive. /// explicit OMPMasterDirective() : OMPExecutableDirective(this, OMPMasterDirectiveClass, llvm::omp::OMPD_master, SourceLocation(), SourceLocation(), 0, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMasterDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterDirectiveClass; } }; /// This represents '#pragma omp critical' directive. /// /// \code /// #pragma omp critical /// \endcode /// class OMPCriticalDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Name of the directive. DeclarationNameInfo DirName; /// Build directive with the given start and end location. /// /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, llvm::omp::OMPD_critical, StartLoc, EndLoc, NumClauses, 1), DirName(Name) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPCriticalDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, llvm::omp::OMPD_critical, SourceLocation(), SourceLocation(), NumClauses, 1), DirName() {} /// Set name of the directive. /// /// \param Name Name of the directive. /// void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; } public: /// Creates directive. /// /// \param C AST context. /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPCriticalDirective * Create(const ASTContext &C, const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCriticalDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return name of the directive. /// DeclarationNameInfo getDirectiveName() const { return DirName; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCriticalDirectiveClass; } }; /// This represents '#pragma omp parallel for' directive. /// /// \code /// #pragma omp parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, llvm::omp::OMPD_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, llvm::omp::OMPD_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForDirectiveClass; } }; /// This represents '#pragma omp parallel for simd' directive. /// /// \code /// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for simd' has clauses /// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j' /// and linear step 's', 'reduction' with operator '+' and variables 'c' and /// 'd'. /// class OMPParallelForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, llvm::omp::OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, llvm::omp::OMPD_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp parallel master' directive. /// /// \code /// #pragma omp parallel master private(a,b) /// \endcode /// In this example directive '#pragma omp parallel master' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPParallelMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; OMPParallelMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelMasterDirectiveClass, llvm::omp::OMPD_parallel_master, StartLoc, EndLoc, NumClauses, 1) {} explicit OMPParallelMasterDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelMasterDirectiveClass, llvm::omp::OMPD_parallel_master, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPParallelMasterDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelMasterDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterDirectiveClass; } }; /// This represents '#pragma omp parallel sections' directive. /// /// \code /// #pragma omp parallel sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel sections' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPParallelSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, llvm::omp::OMPD_parallel_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, llvm::omp::OMPD_parallel_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelSectionsDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelSectionsDirectiveClass; } }; /// This represents '#pragma omp task' directive. /// /// \code /// #pragma omp task private(a,b) final(d) /// \endcode /// In this example directive '#pragma omp task' has clauses 'private' with the /// variables 'a' and 'b' and 'final' with condition 'd'. /// class OMPTaskDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if this directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, llvm::omp::OMPD_task, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTaskDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, llvm::omp::OMPD_task, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true, if current directive has inner cancel directive. /// static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskDirectiveClass; } }; /// This represents '#pragma omp taskyield' directive. /// /// \code /// #pragma omp taskyield /// \endcode /// class OMPTaskyieldDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, llvm::omp::OMPD_taskyield, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskyieldDirective() : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, llvm::omp::OMPD_taskyield, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskyieldDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskyieldDirectiveClass; } }; /// This represents '#pragma omp barrier' directive. /// /// \code /// #pragma omp barrier /// \endcode /// class OMPBarrierDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPBarrierDirectiveClass, llvm::omp::OMPD_barrier, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPBarrierDirective() : OMPExecutableDirective(this, OMPBarrierDirectiveClass, llvm::omp::OMPD_barrier, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPBarrierDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPBarrierDirectiveClass; } }; /// This represents '#pragma omp taskwait' directive. /// /// \code /// #pragma omp taskwait /// \endcode /// class OMPTaskwaitDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, llvm::omp::OMPD_taskwait, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskwaitDirective() : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, llvm::omp::OMPD_taskwait, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskwaitDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskwaitDirectiveClass; } }; /// This represents '#pragma omp taskgroup' directive. /// /// \code /// #pragma omp taskgroup /// \endcode /// class OMPTaskgroupDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, llvm::omp::OMPD_taskgroup, StartLoc, EndLoc, NumClauses, 2) {} /// Build an empty directive. /// \param NumClauses Number of clauses. /// explicit OMPTaskgroupDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, llvm::omp::OMPD_taskgroup, SourceLocation(), SourceLocation(), NumClauses, 2) {} /// Sets the task_reduction return variable. void setReductionRef(Expr *RR) { *std::next(child_begin(), 1) = RR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param ReductionRef Reference to the task_reduction return variable. /// static OMPTaskgroupDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *ReductionRef); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns reference to the task_reduction return variable. const Expr *getReductionRef() const { return static_cast<const Expr *>(*std::next(child_begin(), 1)); } Expr *getReductionRef() { return static_cast<Expr *>(*std::next(child_begin(), 1)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskgroupDirectiveClass; } }; /// This represents '#pragma omp flush' directive. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has 2 arguments- variables 'a' /// and 'b'. /// 'omp flush' directive does not have clauses but have an optional list of /// variables to flush. This list of variables is stored within some fake clause /// FlushClause. class OMPFlushDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, llvm::omp::OMPD_flush, StartLoc, EndLoc, NumClauses, 0) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPFlushDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, llvm::omp::OMPD_flush, SourceLocation(), SourceLocation(), NumClauses, 0) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPFlushDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPFlushDirectiveClass; } }; /// This represents '#pragma omp ordered' directive. /// /// \code /// #pragma omp ordered /// \endcode /// class OMPOrderedDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, llvm::omp::OMPD_ordered, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPOrderedDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, llvm::omp::OMPD_ordered, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPOrderedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPOrderedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPOrderedDirectiveClass; } }; /// This represents '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has clause 'capture'. /// class OMPAtomicDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// x = x binop expr; /// x = expr binop x; /// \endcode /// This field is true for the first form of the expression and false for the /// second. Required for correct codegen of non-associative operations (like /// << or >>). bool IsXLHSInRHSPart; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// v = x; <update x>; /// <update x>; v = x; /// \endcode /// This field is true for the first(postfix) form of the expression and false /// otherwise. bool IsPostfixUpdate; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic, StartLoc, EndLoc, NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPAtomicDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic, SourceLocation(), SourceLocation(), NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Set 'x' part of the associated expression/statement. void setX(Expr *X) { *std::next(child_begin()) = X; } /// Set helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; } /// Set 'v' part of the associated expression/statement. void setV(Expr *V) { *std::next(child_begin(), 3) = V; } /// Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; } public: /// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' /// parts of the atomic construct (see Section 2.12.6, atomic Construct, for /// detailed description of 'x', 'v' and 'expr'). /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param X 'x' part of the associated expression/statement. /// \param V 'v' part of the associated expression/statement. /// \param E 'expr' part of the associated expression/statement. /// \param UE Helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the /// second. /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. static OMPAtomicDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPAtomicDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get 'x' part of the associated expression/statement. Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); } const Expr *getX() const { return cast_or_null<Expr>(*std::next(child_begin())); } /// Get helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. Expr *getUpdateExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } const Expr *getUpdateExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } /// Return true if helper update expression has form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; } /// Return true if 'v' expression must be updated to original value of /// 'x', false if 'v' must be updated to the new value of 'x'. bool isPostfixUpdate() const { return IsPostfixUpdate; } /// Get 'v' part of the associated expression/statement. Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } const Expr *getV() const { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } /// Get 'expr' part of the associated expression/statement. Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } const Expr *getExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPAtomicDirectiveClass; } }; /// This represents '#pragma omp target' directive. /// /// \code /// #pragma omp target if(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'if' with /// condition 'a'. /// class OMPTargetDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, llvm::omp::OMPD_target, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, llvm::omp::OMPD_target, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDirectiveClass; } }; /// This represents '#pragma omp target data' directive. /// /// \code /// #pragma omp target data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target data' has clauses 'device' /// with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, llvm::omp::OMPD_target_data, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, llvm::omp::OMPD_target_data, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDataDirectiveClass; } }; /// This represents '#pragma omp target enter data' directive. /// /// \code /// #pragma omp target enter data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target enter data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetEnterDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, llvm::omp::OMPD_target_enter_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetEnterDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, llvm::omp::OMPD_target_enter_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetEnterDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetEnterDataDirectiveClass; } }; /// This represents '#pragma omp target exit data' directive. /// /// \code /// #pragma omp target exit data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target exit data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetExitDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, llvm::omp::OMPD_target_exit_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetExitDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, llvm::omp::OMPD_target_exit_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetExitDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetExitDataDirectiveClass; } }; /// This represents '#pragma omp target parallel' directive. /// /// \code /// #pragma omp target parallel if(a) /// \endcode /// In this example directive '#pragma omp target parallel' has clause 'if' with /// condition 'a'. /// class OMPTargetParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, llvm::omp::OMPD_target_parallel, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, llvm::omp::OMPD_target_parallel, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetParallelDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelDirectiveClass; } }; /// This represents '#pragma omp target parallel for' directive. /// /// \code /// #pragma omp target parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp target parallel for' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPTargetParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, llvm::omp::OMPD_target_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, llvm::omp::OMPD_target_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPTargetParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForDirectiveClass; } }; /// This represents '#pragma omp teams' directive. /// /// \code /// #pragma omp teams if(a) /// \endcode /// In this example directive '#pragma omp teams' has clause 'if' with /// condition 'a'. /// class OMPTeamsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, llvm::omp::OMPD_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, llvm::omp::OMPD_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDirectiveClass; } }; /// This represents '#pragma omp cancellation point' directive. /// /// \code /// #pragma omp cancellation point for /// \endcode /// /// In this example a cancellation point is created for innermost 'for' region. class OMPCancellationPointDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, llvm::omp::OMPD_cancellation_point, StartLoc, EndLoc, 0, 0), CancelRegion(llvm::omp::OMPD_unknown) {} /// Build an empty directive. /// explicit OMPCancellationPointDirective() : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, llvm::omp::OMPD_cancellation_point, SourceLocation(), SourceLocation(), 0, 0), CancelRegion(llvm::omp::OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPCancellationPointDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancellationPointDirectiveClass; } }; /// This represents '#pragma omp cancel' directive. /// /// \code /// #pragma omp cancel for /// \endcode /// /// In this example a cancel is created for innermost 'for' region. class OMPCancelDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, llvm::omp::OMPD_cancel, StartLoc, EndLoc, NumClauses, 0), CancelRegion(llvm::omp::OMPD_unknown) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. explicit OMPCancelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, llvm::omp::OMPD_cancel, SourceLocation(), SourceLocation(), NumClauses, 0), CancelRegion(llvm::omp::OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPCancelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCancelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancelDirectiveClass; } }; /// This represents '#pragma omp taskloop' directive. /// /// \code /// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopDirectiveClass; } }; /// This represents '#pragma omp taskloop simd' directive. /// /// \code /// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop simd' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, llvm::omp::OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, llvm::omp::OMPD_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp master taskloop' directive. /// /// \code /// #pragma omp master taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp master taskloop' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopDirectiveClass, llvm::omp::OMPD_master_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPMasterTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopDirectiveClass, llvm::omp::OMPD_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopDirectiveClass; } }; /// This represents '#pragma omp master taskloop simd' directive. /// /// \code /// #pragma omp master taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp master taskloop simd' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPMasterTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \p NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp parallel master taskloop' directive. /// /// \code /// #pragma omp parallel master taskloop private(a,b) grainsize(val) /// num_tasks(num) /// \endcode /// In this example directive '#pragma omp parallel master taskloop' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelMasterTaskLoopDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelMasterTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelMasterTaskLoopDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass; } }; /// This represents '#pragma omp parallel master taskloop simd' directive. /// /// \code /// #pragma omp parallel master taskloop simd private(a,b) grainsize(val) /// num_tasks(num) /// \endcode /// In this example directive '#pragma omp parallel master taskloop simd' has /// clauses 'private' with the variables 'a' and 'b', 'grainsize' with /// expression 'val' and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelMasterTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp distribute' directive. /// /// \code /// #pragma omp distribute private(a,b) /// \endcode /// In this example directive '#pragma omp distribute' has clauses 'private' /// with the variables 'a' and 'b' /// class OMPDistributeDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, llvm::omp::OMPD_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, llvm::omp::OMPD_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeDirectiveClass; } }; /// This represents '#pragma omp target update' directive. /// /// \code /// #pragma omp target update to(a) from(b) device(1) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' with /// argument 'a', clause 'from' with argument 'b' and clause 'device' with /// argument '1'. /// class OMPTargetUpdateDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, llvm::omp::OMPD_target_update, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetUpdateDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, llvm::omp::OMPD_target_update, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetUpdateDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses The number of clauses. /// static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetUpdateDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for' composite /// directive. /// /// \code /// #pragma omp distribute parallel for private(a,b) /// \endcode /// In this example directive '#pragma omp distribute parallel for' has clause /// 'private' with the variables 'a' and 'b' /// class OMPDistributeParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, llvm::omp::OMPD_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, llvm::omp::OMPD_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp distribute parallel for simd' has /// clause 'private' with the variables 'x' /// class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeParallelForSimdDirective *Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForSimdDirective *CreateEmpty( const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp distribute simd' composite directive. /// /// \code /// #pragma omp distribute simd private(x) /// \endcode /// In this example directive '#pragma omp distribute simd' has clause /// 'private' with the variables 'x' /// class OMPDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, llvm::omp::OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, llvm::omp::OMPD_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp target parallel for simd' directive. /// /// \code /// #pragma omp target parallel for simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target parallel for simd' has clauses /// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen' /// with the variable 'c'. /// class OMPTargetParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, llvm::omp::OMPD_target_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, llvm::omp::OMPD_target_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target simd' directive. /// /// \code /// #pragma omp target simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target simd' has clauses 'private' /// with the variable 'a', 'map' with the variable 'b' and 'safelen' with /// the variable 'c'. /// class OMPTargetSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, llvm::omp::OMPD_target_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, llvm::omp::OMPD_target_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute' directive. /// /// \code /// #pragma omp teams distribute private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, llvm::omp::OMPD_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, llvm::omp::OMPD_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp teams distribute simd' /// combined directive. /// /// \code /// #pragma omp teams distribute simd private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute simd' /// has clause 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for simd' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams' directive. /// /// \code /// #pragma omp target teams if(a>0) /// \endcode /// In this example directive '#pragma omp target teams' has clause 'if' with /// condition 'a>0'. /// class OMPTargetTeamsDirective final : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, llvm::omp::OMPD_target_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, llvm::omp::OMPD_target_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDirectiveClass; } }; /// This represents '#pragma omp target teams distribute' combined directive. /// /// \code /// #pragma omp target teams distribute private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute' has clause /// 'private' with the variables 'x' /// class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, llvm::omp::OMPD_target_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, llvm::omp::OMPD_target_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for' combined /// directive. /// /// \code /// #pragma omp target teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for simd' /// combined directive. /// /// \code /// #pragma omp target teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for simd' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForSimdDirective( unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target teams distribute simd' combined /// directive. /// /// \code /// #pragma omp target teams distribute simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute simd' /// has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; } // end namespace clang #endif
//===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file /// This file defines OpenMP AST classes for executable directives and /// clauses. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMTOPENMP_H #define LLVM_CLANG_AST_STMTOPENMP_H #include "clang/AST/Expr.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for directives. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP executable /// directive. /// class OMPExecutableDirective : public Stmt { friend class ASTStmtReader; /// Kind of the directive. OpenMPDirectiveKind Kind; /// Starting location of the directive (directive keyword). SourceLocation StartLoc; /// Ending location of the directive. SourceLocation EndLoc; /// Numbers of clauses. const unsigned NumClauses; /// Number of child expressions/stmts. const unsigned NumChildren; /// Offset from this to the start of clauses. /// There are NumClauses pointers to clauses, they are followed by /// NumChildren pointers to child stmts/exprs (if the directive type /// requires an associated stmt, then it has to be the first of them). const unsigned ClausesOffset; /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>( reinterpret_cast<char *>(this) + ClausesOffset); return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses); } protected: /// Build instance of directive of class \a K. /// /// \param SC Statement class. /// \param K Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// template <typename T> OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses, unsigned NumChildren) : Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)), EndLoc(std::move(EndLoc)), NumClauses(NumClauses), NumChildren(NumChildren), ClausesOffset(llvm::alignTo(sizeof(T), alignof(OMPClause *))) {} /// Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// Set the associated statement for the directive. /// /// /param S Associated statement. /// void setAssociatedStmt(Stmt *S) { assert(hasAssociatedStmt() && "no associated statement."); *child_begin() = S; } public: /// Iterates over expressions/statements used in the construct. class used_clauses_child_iterator : public llvm::iterator_adaptor_base< used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator, std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> { ArrayRef<OMPClause *>::iterator End; OMPClause::child_iterator ChildI, ChildEnd; void MoveToNext() { if (ChildI != ChildEnd) return; while (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); if (ChildI != ChildEnd) return; } } } public: explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses) : used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); MoveToNext(); } } Stmt *operator*() const { return *ChildI; } Stmt *operator->() const { return **this; } used_clauses_child_iterator &operator++() { ++ChildI; if (ChildI != ChildEnd) return *this; if (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); } } MoveToNext(); return *this; } }; static llvm::iterator_range<used_clauses_child_iterator> used_clauses_children(ArrayRef<OMPClause *> Clauses) { return {used_clauses_child_iterator(Clauses), used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))}; } /// Iterates over a filtered subrange of clauses applied to a /// directive. /// /// This iterator visits only clauses of type SpecificClause. template <typename SpecificClause> class specific_clause_iterator : public llvm::iterator_adaptor_base< specific_clause_iterator<SpecificClause>, ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag, const SpecificClause *, ptrdiff_t, const SpecificClause *, const SpecificClause *> { ArrayRef<OMPClause *>::const_iterator End; void SkipToNextClause() { while (this->I != End && !isa<SpecificClause>(*this->I)) ++this->I; } public: explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses) : specific_clause_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { SkipToNextClause(); } const SpecificClause *operator*() const { return cast<SpecificClause>(*this->I); } const SpecificClause *operator->() const { return **this; } specific_clause_iterator &operator++() { ++this->I; SkipToNextClause(); return *this; } }; template <typename SpecificClause> static llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind(ArrayRef<OMPClause *> Clauses) { return {specific_clause_iterator<SpecificClause>(Clauses), specific_clause_iterator<SpecificClause>( llvm::makeArrayRef(Clauses.end(), 0))}; } template <typename SpecificClause> llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind() const { return getClausesOfKind<SpecificClause>(clauses()); } /// Gets a single clause of the specified kind associated with the /// current directive iff there is only one clause of this kind (and assertion /// is fired if there is more than one clause is associated with the /// directive). Returns nullptr if no clause of this kind is associated with /// the directive. template <typename SpecificClause> const SpecificClause *getSingleClause() const { auto Clauses = getClausesOfKind<SpecificClause>(); if (Clauses.begin() != Clauses.end()) { assert(std::next(Clauses.begin()) == Clauses.end() && "There are at least 2 clauses of the specified kind"); return *Clauses.begin(); } return nullptr; } /// Returns true if the current directive has one or more clauses of a /// specific kind. template <typename SpecificClause> bool hasClausesOfKind() const { auto Clauses = getClausesOfKind<SpecificClause>(); return Clauses.begin() != Clauses.end(); } /// Returns starting location of directive kind. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns ending location of directive. SourceLocation getEndLoc() const { return EndLoc; } /// Set starting location of directive kind. /// /// \param Loc New starting location of directive. /// void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Set ending location of directive. /// /// \param Loc New ending location of directive. /// void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Get number of clauses. unsigned getNumClauses() const { return NumClauses; } /// Returns specified clause. /// /// \param i Number of clause. /// OMPClause *getClause(unsigned i) const { return clauses()[i]; } /// Returns true if directive has associated statement. bool hasAssociatedStmt() const { return NumChildren > 0; } /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } Stmt *getAssociatedStmt() { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. // // \param RegionKind Component region kind. const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const { SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(std::any_of( CaptureRegions.begin(), CaptureRegions.end(), [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } /// Get innermost captured statement for the construct. CapturedStmt *getInnermostCapturedStmt() { assert(hasAssociatedStmt() && getAssociatedStmt() && "Must have associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(!CaptureRegions.empty() && "At least one captured statement must be provided."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (unsigned Level = CaptureRegions.size(); Level > 1; --Level) CS = cast<CapturedStmt>(CS->getCapturedStmt()); return CS; } const CapturedStmt *getInnermostCapturedStmt() const { return const_cast<OMPExecutableDirective *>(this) ->getInnermostCapturedStmt(); } OpenMPDirectiveKind getDirectiveKind() const { return Kind; } static bool classof(const Stmt *S) { return S->getStmtClass() >= firstOMPExecutableDirectiveConstant && S->getStmtClass() <= lastOMPExecutableDirectiveConstant; } child_range children() { if (!hasAssociatedStmt()) return child_range(child_iterator(), child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end()); /// Do not mark all the special expression/statements as children, except /// for the associated statement. return child_range(ChildStorage, ChildStorage + 1); } const_child_range children() const { if (!hasAssociatedStmt()) return const_child_range(const_child_iterator(), const_child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>( const_cast<OMPExecutableDirective *>(this)->getClauses().end()); return const_child_range(ChildStorage, ChildStorage + 1); } ArrayRef<OMPClause *> clauses() { return getClauses(); } ArrayRef<OMPClause *> clauses() const { return const_cast<OMPExecutableDirective *>(this)->getClauses(); } /// Returns whether or not this is a Standalone directive. /// /// Stand-alone directives are executable directives /// that have no associated user code. bool isStandaloneDirective() const; /// Returns the AST node representing OpenMP structured-block of this /// OpenMP executable directive, /// Prerequisite: Executable Directive must not be Standalone directive. const Stmt *getStructuredBlock() const; Stmt *getStructuredBlock() { return const_cast<Stmt *>( const_cast<const OMPExecutableDirective *>(this)->getStructuredBlock()); } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending Location of the directive. /// OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, llvm::omp::OMPD_parallel, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, llvm::omp::OMPD_parallel, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement associated with the directive. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelDirectiveClass; } }; /// This is a common base class for loop directives ('omp simd', 'omp /// for', 'omp for simd' etc.). It is responsible for the loop code generation. /// class OMPLoopDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Number of collapsed loops as specified by 'collapse' clause. unsigned CollapsedNum; /// Offsets to the stored exprs. /// This enumeration contains offsets to all the pointers to children /// expressions stored in OMPLoopDirective. /// The first 9 children are necessary for all the loop directives, /// the next 8 are specific to the worksharing ones, and the next 11 are /// used for combined constructs containing two pragmas associated to loops. /// After the fixed children, three arrays of length CollapsedNum are /// allocated: loop counters, their updates and final values. /// PrevLowerBound and PrevUpperBound are used to communicate blocking /// information in composite constructs which require loop blocking /// DistInc is used to generate the increment expression for the distribute /// loop when combined with a further nested loop /// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the /// for loop when combined with a previous distribute loop in the same pragma /// (e.g. 'distribute parallel for') /// enum { AssociatedStmtOffset = 0, IterationVariableOffset = 1, LastIterationOffset = 2, CalcLastIterationOffset = 3, PreConditionOffset = 4, CondOffset = 5, InitOffset = 6, IncOffset = 7, PreInitsOffset = 8, // The '...End' enumerators do not correspond to child expressions - they // specify the offset to the end (and start of the following counters/ // updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays). DefaultEnd = 9, // The following 8 exprs are used by worksharing and distribute loops only. IsLastIterVariableOffset = 9, LowerBoundVariableOffset = 10, UpperBoundVariableOffset = 11, StrideVariableOffset = 12, EnsureUpperBoundOffset = 13, NextLowerBoundOffset = 14, NextUpperBoundOffset = 15, NumIterationsOffset = 16, // Offset to the end for worksharing loop directives. WorksharingEnd = 17, PrevLowerBoundVariableOffset = 17, PrevUpperBoundVariableOffset = 18, DistIncOffset = 19, PrevEnsureUpperBoundOffset = 20, CombinedLowerBoundVariableOffset = 21, CombinedUpperBoundVariableOffset = 22, CombinedEnsureUpperBoundOffset = 23, CombinedInitOffset = 24, CombinedConditionOffset = 25, CombinedNextLowerBoundOffset = 26, CombinedNextUpperBoundOffset = 27, CombinedDistConditionOffset = 28, CombinedParForInDistConditionOffset = 29, // Offset to the end (and start of the following // counters/updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays) for combined distribute loop directives. CombinedDistributeEnd = 30, }; /// Get the counters storage. MutableArrayRef<Expr *> getCounters() { Expr **Storage = reinterpret_cast<Expr **>( &(*(std::next(child_begin(), getArraysOffset(getDirectiveKind()))))); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the private counters storage. MutableArrayRef<Expr *> getPrivateCounters() { Expr **Storage = reinterpret_cast<Expr **>(&*std::next( child_begin(), getArraysOffset(getDirectiveKind()) + CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getInits() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getUpdates() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the final counter updates storage. MutableArrayRef<Expr *> getFinals() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 4 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the dependent counters storage. MutableArrayRef<Expr *> getDependentCounters() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 5 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the dependent inits storage. MutableArrayRef<Expr *> getDependentInits() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 6 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the finals conditions storage. MutableArrayRef<Expr *> getFinalsConditions() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 7 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } protected: /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed loops from 'collapse' clause. /// \param NumClauses Number of clauses. /// \param NumSpecialChildren Number of additional directive-specific stmts. /// template <typename T> OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses, unsigned NumSpecialChildren = 0) : OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses, numLoopChildren(CollapsedNum, Kind) + NumSpecialChildren), CollapsedNum(CollapsedNum) {} /// Offset to the start of children expression arrays. static unsigned getArraysOffset(OpenMPDirectiveKind Kind) { if (isOpenMPLoopBoundSharingDirective(Kind)) return CombinedDistributeEnd; if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) || isOpenMPDistributeDirective(Kind)) return WorksharingEnd; return DefaultEnd; } /// Children number. static unsigned numLoopChildren(unsigned CollapsedNum, OpenMPDirectiveKind Kind) { return getArraysOffset(Kind) + 8 * CollapsedNum; // Counters, PrivateCounters, Inits, // Updates, Finals, DependentCounters, // DependentInits, FinalsConditions. } void setIterationVariable(Expr *IV) { *std::next(child_begin(), IterationVariableOffset) = IV; } void setLastIteration(Expr *LI) { *std::next(child_begin(), LastIterationOffset) = LI; } void setCalcLastIteration(Expr *CLI) { *std::next(child_begin(), CalcLastIterationOffset) = CLI; } void setPreCond(Expr *PC) { *std::next(child_begin(), PreConditionOffset) = PC; } void setCond(Expr *Cond) { *std::next(child_begin(), CondOffset) = Cond; } void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; } void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; } void setPreInits(Stmt *PreInits) { *std::next(child_begin(), PreInitsOffset) = PreInits; } void setIsLastIterVariable(Expr *IL) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), IsLastIterVariableOffset) = IL; } void setLowerBoundVariable(Expr *LB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), LowerBoundVariableOffset) = LB; } void setUpperBoundVariable(Expr *UB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), UpperBoundVariableOffset) = UB; } void setStrideVariable(Expr *ST) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), StrideVariableOffset) = ST; } void setEnsureUpperBound(Expr *EUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), EnsureUpperBoundOffset) = EUB; } void setNextLowerBound(Expr *NLB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextLowerBoundOffset) = NLB; } void setNextUpperBound(Expr *NUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextUpperBoundOffset) = NUB; } void setNumIterations(Expr *NI) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NumIterationsOffset) = NI; } void setPrevLowerBoundVariable(Expr *PrevLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevLowerBoundVariableOffset) = PrevLB; } void setPrevUpperBoundVariable(Expr *PrevUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevUpperBoundVariableOffset) = PrevUB; } void setDistInc(Expr *DistInc) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), DistIncOffset) = DistInc; } void setPrevEnsureUpperBound(Expr *PrevEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevEnsureUpperBoundOffset) = PrevEUB; } void setCombinedLowerBoundVariable(Expr *CombLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedLowerBoundVariableOffset) = CombLB; } void setCombinedUpperBoundVariable(Expr *CombUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedUpperBoundVariableOffset) = CombUB; } void setCombinedEnsureUpperBound(Expr *CombEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedEnsureUpperBoundOffset) = CombEUB; } void setCombinedInit(Expr *CombInit) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedInitOffset) = CombInit; } void setCombinedCond(Expr *CombCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedConditionOffset) = CombCond; } void setCombinedNextLowerBound(Expr *CombNLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextLowerBoundOffset) = CombNLB; } void setCombinedNextUpperBound(Expr *CombNUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextUpperBoundOffset) = CombNUB; } void setCombinedDistCond(Expr *CombDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedDistConditionOffset) = CombDistCond; } void setCombinedParForInDistCond(Expr *CombParForInDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedParForInDistConditionOffset) = CombParForInDistCond; } void setCounters(ArrayRef<Expr *> A); void setPrivateCounters(ArrayRef<Expr *> A); void setInits(ArrayRef<Expr *> A); void setUpdates(ArrayRef<Expr *> A); void setFinals(ArrayRef<Expr *> A); void setDependentCounters(ArrayRef<Expr *> A); void setDependentInits(ArrayRef<Expr *> A); void setFinalsConditions(ArrayRef<Expr *> A); public: /// The expressions built to support OpenMP loops in combined/composite /// pragmas (e.g. pragma omp distribute parallel for) struct DistCombinedHelperExprs { /// DistributeLowerBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *LB; /// DistributeUpperBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *UB; /// DistributeEnsureUpperBound - used when composing 'omp distribute' /// with 'omp for' in a same construct, EUB depends on DistUB Expr *EUB; /// Distribute loop iteration variable init used when composing 'omp /// distribute' /// with 'omp for' in a same construct Expr *Init; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct Expr *Cond; /// Update of LowerBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NLB; /// Update of UpperBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NUB; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct when schedule is chunked. Expr *DistCond; /// 'omp parallel for' loop condition used when composed with /// 'omp distribute' in the same construct and when schedule is /// chunked and the chunk size is 1. Expr *ParForInDistCond; }; /// The expressions built for the OpenMP loop CodeGen for the /// whole collapsed loop nest. struct HelperExprs { /// Loop iteration variable. Expr *IterationVarRef; /// Loop last iteration number. Expr *LastIteration; /// Loop number of iterations. Expr *NumIterations; /// Calculation of last iteration. Expr *CalcLastIteration; /// Loop pre-condition. Expr *PreCond; /// Loop condition. Expr *Cond; /// Loop iteration variable init. Expr *Init; /// Loop increment. Expr *Inc; /// IsLastIteration - local flag variable passed to runtime. Expr *IL; /// LowerBound - local variable passed to runtime. Expr *LB; /// UpperBound - local variable passed to runtime. Expr *UB; /// Stride - local variable passed to runtime. Expr *ST; /// EnsureUpperBound -- expression UB = min(UB, NumIterations). Expr *EUB; /// Update of LowerBound for statically scheduled 'omp for' loops. Expr *NLB; /// Update of UpperBound for statically scheduled 'omp for' loops. Expr *NUB; /// PreviousLowerBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevLB; /// PreviousUpperBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevUB; /// DistInc - increment expression for distribute loop when found /// combined with a further loop level (e.g. in 'distribute parallel for') /// expression IV = IV + ST Expr *DistInc; /// PrevEUB - expression similar to EUB but to be used when loop /// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for' /// when ensuring that the UB is either the calculated UB by the runtime or /// the end of the assigned distribute chunk) /// expression UB = min (UB, PrevUB) Expr *PrevEUB; /// Counters Loop counters. SmallVector<Expr *, 4> Counters; /// PrivateCounters Loop counters. SmallVector<Expr *, 4> PrivateCounters; /// Expressions for loop counters inits for CodeGen. SmallVector<Expr *, 4> Inits; /// Expressions for loop counters update for CodeGen. SmallVector<Expr *, 4> Updates; /// Final loop counter values for GodeGen. SmallVector<Expr *, 4> Finals; /// List of counters required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentCounters; /// List of initializers required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentInits; /// List of final conditions required for the generation of the /// non-rectangular loops. SmallVector<Expr *, 4> FinalsConditions; /// Init statement for all captured expressions. Stmt *PreInits; /// Expressions used when combining OpenMP loop pragmas DistCombinedHelperExprs DistCombinedFields; /// Check if all the expressions are built (does not check the /// worksharing ones). bool builtAll() { return IterationVarRef != nullptr && LastIteration != nullptr && NumIterations != nullptr && PreCond != nullptr && Cond != nullptr && Init != nullptr && Inc != nullptr; } /// Initialize all the fields to null. /// \param Size Number of elements in the /// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions /// arrays. void clear(unsigned Size) { IterationVarRef = nullptr; LastIteration = nullptr; CalcLastIteration = nullptr; PreCond = nullptr; Cond = nullptr; Init = nullptr; Inc = nullptr; IL = nullptr; LB = nullptr; UB = nullptr; ST = nullptr; EUB = nullptr; NLB = nullptr; NUB = nullptr; NumIterations = nullptr; PrevLB = nullptr; PrevUB = nullptr; DistInc = nullptr; PrevEUB = nullptr; Counters.resize(Size); PrivateCounters.resize(Size); Inits.resize(Size); Updates.resize(Size); Finals.resize(Size); DependentCounters.resize(Size); DependentInits.resize(Size); FinalsConditions.resize(Size); for (unsigned i = 0; i < Size; ++i) { Counters[i] = nullptr; PrivateCounters[i] = nullptr; Inits[i] = nullptr; Updates[i] = nullptr; Finals[i] = nullptr; DependentCounters[i] = nullptr; DependentInits[i] = nullptr; FinalsConditions[i] = nullptr; } PreInits = nullptr; DistCombinedFields.LB = nullptr; DistCombinedFields.UB = nullptr; DistCombinedFields.EUB = nullptr; DistCombinedFields.Init = nullptr; DistCombinedFields.Cond = nullptr; DistCombinedFields.NLB = nullptr; DistCombinedFields.NUB = nullptr; DistCombinedFields.DistCond = nullptr; DistCombinedFields.ParForInDistCond = nullptr; } }; /// Get number of collapsed loops. unsigned getCollapsedNumber() const { return CollapsedNum; } Expr *getIterationVariable() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IterationVariableOffset))); } Expr *getLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LastIterationOffset))); } Expr *getCalcLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CalcLastIterationOffset))); } Expr *getPreCond() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PreConditionOffset))); } Expr *getCond() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset))); } Expr *getInit() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset))); } Expr *getInc() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset))); } const Stmt *getPreInits() const { return *std::next(child_begin(), PreInitsOffset); } Stmt *getPreInits() { return *std::next(child_begin(), PreInitsOffset); } Expr *getIsLastIterVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IsLastIterVariableOffset))); } Expr *getLowerBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LowerBoundVariableOffset))); } Expr *getUpperBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), UpperBoundVariableOffset))); } Expr *getStrideVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), StrideVariableOffset))); } Expr *getEnsureUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), EnsureUpperBoundOffset))); } Expr *getNextLowerBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextLowerBoundOffset))); } Expr *getNextUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextUpperBoundOffset))); } Expr *getNumIterations() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NumIterationsOffset))); } Expr *getPrevLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevLowerBoundVariableOffset))); } Expr *getPrevUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevUpperBoundVariableOffset))); } Expr *getDistInc() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), DistIncOffset))); } Expr *getPrevEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevEnsureUpperBoundOffset))); } Expr *getCombinedLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedLowerBoundVariableOffset))); } Expr *getCombinedUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedUpperBoundVariableOffset))); } Expr *getCombinedEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedEnsureUpperBoundOffset))); } Expr *getCombinedInit() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedInitOffset))); } Expr *getCombinedCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedConditionOffset))); } Expr *getCombinedNextLowerBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextLowerBoundOffset))); } Expr *getCombinedNextUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextUpperBoundOffset))); } Expr *getCombinedDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedDistConditionOffset))); } Expr *getCombinedParForInDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedParForInDistConditionOffset))); } /// Try to find the next loop sub-statement in the specified statement \p /// CurStmt. /// \param TryImperfectlyNestedLoops true, if we need to try to look for the /// imperfectly nested loop. static Stmt *tryToFindNextInnerLoop(Stmt *CurStmt, bool TryImperfectlyNestedLoops); static const Stmt *tryToFindNextInnerLoop(const Stmt *CurStmt, bool TryImperfectlyNestedLoops) { return tryToFindNextInnerLoop(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops); } Stmt *getBody(); const Stmt *getBody() const { return const_cast<OMPLoopDirective *>(this)->getBody(); } ArrayRef<Expr *> counters() { return getCounters(); } ArrayRef<Expr *> counters() const { return const_cast<OMPLoopDirective *>(this)->getCounters(); } ArrayRef<Expr *> private_counters() { return getPrivateCounters(); } ArrayRef<Expr *> private_counters() const { return const_cast<OMPLoopDirective *>(this)->getPrivateCounters(); } ArrayRef<Expr *> inits() { return getInits(); } ArrayRef<Expr *> inits() const { return const_cast<OMPLoopDirective *>(this)->getInits(); } ArrayRef<Expr *> updates() { return getUpdates(); } ArrayRef<Expr *> updates() const { return const_cast<OMPLoopDirective *>(this)->getUpdates(); } ArrayRef<Expr *> finals() { return getFinals(); } ArrayRef<Expr *> finals() const { return const_cast<OMPLoopDirective *>(this)->getFinals(); } ArrayRef<Expr *> dependent_counters() { return getDependentCounters(); } ArrayRef<Expr *> dependent_counters() const { return const_cast<OMPLoopDirective *>(this)->getDependentCounters(); } ArrayRef<Expr *> dependent_inits() { return getDependentInits(); } ArrayRef<Expr *> dependent_inits() const { return const_cast<OMPLoopDirective *>(this)->getDependentInits(); } ArrayRef<Expr *> finals_conditions() { return getFinalsConditions(); } ArrayRef<Expr *> finals_conditions() const { return const_cast<OMPLoopDirective *>(this)->getFinalsConditions(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass || T->getStmtClass() == OMPForDirectiveClass || T->getStmtClass() == OMPForSimdDirectiveClass || T->getStmtClass() == OMPParallelForDirectiveClass || T->getStmtClass() == OMPParallelForSimdDirectiveClass || T->getStmtClass() == OMPTaskLoopDirectiveClass || T->getStmtClass() == OMPTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPDistributeDirectiveClass || T->getStmtClass() == OMPTargetParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPDistributeSimdDirectiveClass || T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, llvm::omp::OMPD_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, llvm::omp::OMPD_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c' /// and 'd'. /// class OMPForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, llvm::omp::OMPD_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, llvm::omp::OMPD_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForSimdDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// the variables 'a' and 'b' and 'reduction' with operator '+' and variables /// 'c' and 'd'. /// class OMPSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, llvm::omp::OMPD_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, llvm::omp::OMPD_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSectionsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionsDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// class OMPSectionDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPSectionDirectiveClass, llvm::omp::OMPD_section, StartLoc, EndLoc, 0, 1), HasCancel(false) {} /// Build an empty directive. /// explicit OMPSectionDirective() : OMPExecutableDirective(this, OMPSectionDirectiveClass, llvm::omp::OMPD_section, SourceLocation(), SourceLocation(), 0, 1), HasCancel(false) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive. /// /// \param C AST context. /// static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'. /// class OMPSingleDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, llvm::omp::OMPD_single, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSingleDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, llvm::omp::OMPD_single, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSingleDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSingleDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSingleDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// class OMPMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPMasterDirectiveClass, llvm::omp::OMPD_master, StartLoc, EndLoc, 0, 1) { } /// Build an empty directive. /// explicit OMPMasterDirective() : OMPExecutableDirective(this, OMPMasterDirectiveClass, llvm::omp::OMPD_master, SourceLocation(), SourceLocation(), 0, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMasterDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// class OMPCriticalDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Name of the directive. DeclarationNameInfo DirName; /// Build directive with the given start and end location. /// /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, llvm::omp::OMPD_critical, StartLoc, EndLoc, NumClauses, 1), DirName(Name) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPCriticalDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, llvm::omp::OMPD_critical, SourceLocation(), SourceLocation(), NumClauses, 1), DirName() {} /// Set name of the directive. /// /// \param Name Name of the directive. /// void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; } public: /// Creates directive. /// /// \param C AST context. /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPCriticalDirective * Create(const ASTContext &C, const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCriticalDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return name of the directive. /// DeclarationNameInfo getDirectiveName() const { return DirName; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCriticalDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, llvm::omp::OMPD_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, llvm::omp::OMPD_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j' /// and linear step 's', 'reduction' with operator '+' and variables 'c' and /// 'd'. /// class OMPParallelForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, llvm::omp::OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, llvm::omp::OMPD_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForSimdDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// 'private' with the variables 'a' and 'b' /// class OMPParallelMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; OMPParallelMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelMasterDirectiveClass, llvm::omp::OMPD_parallel_master, StartLoc, EndLoc, NumClauses, 1) {} explicit OMPParallelMasterDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelMasterDirectiveClass, llvm::omp::OMPD_parallel_master, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPParallelMasterDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelMasterDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPParallelSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, llvm::omp::OMPD_parallel_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, llvm::omp::OMPD_parallel_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelSectionsDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelSectionsDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// variables 'a' and 'b' and 'final' with condition 'd'. /// class OMPTaskDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if this directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, llvm::omp::OMPD_task, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTaskDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, llvm::omp::OMPD_task, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true, if current directive has inner cancel directive. /// static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// class OMPTaskyieldDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, llvm::omp::OMPD_taskyield, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskyieldDirective() : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, llvm::omp::OMPD_taskyield, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskyieldDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskyieldDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// class OMPBarrierDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPBarrierDirectiveClass, llvm::omp::OMPD_barrier, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPBarrierDirective() : OMPExecutableDirective(this, OMPBarrierDirectiveClass, llvm::omp::OMPD_barrier, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPBarrierDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPBarrierDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// class OMPTaskwaitDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, llvm::omp::OMPD_taskwait, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskwaitDirective() : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, llvm::omp::OMPD_taskwait, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskwaitDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskwaitDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// class OMPTaskgroupDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, llvm::omp::OMPD_taskgroup, StartLoc, EndLoc, NumClauses, 2) {} /// Build an empty directive. /// \param NumClauses Number of clauses. /// explicit OMPTaskgroupDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, llvm::omp::OMPD_taskgroup, SourceLocation(), SourceLocation(), NumClauses, 2) {} /// Sets the task_reduction return variable. void setReductionRef(Expr *RR) { *std::next(child_begin(), 1) = RR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param ReductionRef Reference to the task_reduction return variable. /// static OMPTaskgroupDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *ReductionRef); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns reference to the task_reduction return variable. const Expr *getReductionRef() const { return static_cast<const Expr *>(*std::next(child_begin(), 1)); } Expr *getReductionRef() { return static_cast<Expr *>(*std::next(child_begin(), 1)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskgroupDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// and 'b'. /// 'omp flush' directive does not have clauses but have an optional list of /// variables to flush. This list of variables is stored within some fake clause /// FlushClause. class OMPFlushDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, llvm::omp::OMPD_flush, StartLoc, EndLoc, NumClauses, 0) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPFlushDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, llvm::omp::OMPD_flush, SourceLocation(), SourceLocation(), NumClauses, 0) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPFlushDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPFlushDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// class OMPOrderedDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, llvm::omp::OMPD_ordered, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPOrderedDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, llvm::omp::OMPD_ordered, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPOrderedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPOrderedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPOrderedDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// class OMPAtomicDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// x = x binop expr; /// x = expr binop x; /// \endcode /// This field is true for the first form of the expression and false for the /// second. Required for correct codegen of non-associative operations (like /// << or >>). bool IsXLHSInRHSPart; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// v = x; <update x>; /// <update x>; v = x; /// \endcode /// This field is true for the first(postfix) form of the expression and false /// otherwise. bool IsPostfixUpdate; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic, StartLoc, EndLoc, NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPAtomicDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic, SourceLocation(), SourceLocation(), NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Set 'x' part of the associated expression/statement. void setX(Expr *X) { *std::next(child_begin()) = X; } /// Set helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; } /// Set 'v' part of the associated expression/statement. void setV(Expr *V) { *std::next(child_begin(), 3) = V; } /// Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; } public: /// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' /// parts of the atomic construct (see Section 2.12.6, atomic Construct, for /// detailed description of 'x', 'v' and 'expr'). /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param X 'x' part of the associated expression/statement. /// \param V 'v' part of the associated expression/statement. /// \param E 'expr' part of the associated expression/statement. /// \param UE Helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the /// second. /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. static OMPAtomicDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPAtomicDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get 'x' part of the associated expression/statement. Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); } const Expr *getX() const { return cast_or_null<Expr>(*std::next(child_begin())); } /// Get helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. Expr *getUpdateExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } const Expr *getUpdateExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } /// Return true if helper update expression has form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; } /// Return true if 'v' expression must be updated to original value of /// 'x', false if 'v' must be updated to the new value of 'x'. bool isPostfixUpdate() const { return IsPostfixUpdate; } /// Get 'v' part of the associated expression/statement. Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } const Expr *getV() const { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } /// Get 'expr' part of the associated expression/statement. Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } const Expr *getExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPAtomicDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// condition 'a'. /// class OMPTargetDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, llvm::omp::OMPD_target, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, llvm::omp::OMPD_target, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, llvm::omp::OMPD_target_data, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, llvm::omp::OMPD_target_data, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDataDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetEnterDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, llvm::omp::OMPD_target_enter_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetEnterDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, llvm::omp::OMPD_target_enter_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetEnterDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetEnterDataDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetExitDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, llvm::omp::OMPD_target_exit_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetExitDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, llvm::omp::OMPD_target_exit_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetExitDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetExitDataDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// condition 'a'. /// class OMPTargetParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, llvm::omp::OMPD_target_parallel, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, llvm::omp::OMPD_target_parallel, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetParallelDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPTargetParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, llvm::omp::OMPD_target_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, llvm::omp::OMPD_target_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPTargetParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// condition 'a'. /// class OMPTeamsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, llvm::omp::OMPD_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, llvm::omp::OMPD_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// /// In this example a cancellation point is created for innermost 'for' region. class OMPCancellationPointDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, llvm::omp::OMPD_cancellation_point, StartLoc, EndLoc, 0, 0), CancelRegion(llvm::omp::OMPD_unknown) {} /// Build an empty directive. /// explicit OMPCancellationPointDirective() : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, llvm::omp::OMPD_cancellation_point, SourceLocation(), SourceLocation(), 0, 0), CancelRegion(llvm::omp::OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPCancellationPointDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancellationPointDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// /// In this example a cancel is created for innermost 'for' region. class OMPCancelDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, llvm::omp::OMPD_cancel, StartLoc, EndLoc, NumClauses, 0), CancelRegion(llvm::omp::OMPD_unknown) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. explicit OMPCancelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, llvm::omp::OMPD_cancel, SourceLocation(), SourceLocation(), NumClauses, 0), CancelRegion(llvm::omp::OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPCancelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCancelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancelDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, llvm::omp::OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, llvm::omp::OMPD_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopDirectiveClass, llvm::omp::OMPD_master_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPMasterTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopDirectiveClass, llvm::omp::OMPD_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPMasterTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \p NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass; } }; /// This represents ' /// /// \code /// /// num_tasks(num) /// \endcode /// In this example directive ' /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelMasterTaskLoopDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelMasterTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelMasterTaskLoopDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass; } }; /// This represents ' /// /// \code /// /// num_tasks(num) /// \endcode /// In this example directive ' /// clauses 'private' with the variables 'a' and 'b', 'grainsize' with /// expression 'val' and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelMasterTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// with the variables 'a' and 'b' /// class OMPDistributeDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, llvm::omp::OMPD_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, llvm::omp::OMPD_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// argument 'a', clause 'from' with argument 'b' and clause 'device' with /// argument '1'. /// class OMPTargetUpdateDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, llvm::omp::OMPD_target_update, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetUpdateDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, llvm::omp::OMPD_target_update, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetUpdateDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses The number of clauses. /// static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetUpdateDirectiveClass; } }; /// This represents ' /// directive. /// /// \code /// /// \endcode /// In this example directive ' /// 'private' with the variables 'a' and 'b' /// class OMPDistributeParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, llvm::omp::OMPD_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, llvm::omp::OMPD_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForDirectiveClass; } }; /// This represents ' /// directive. /// /// \code /// /// \endcode /// In this example directive ' /// clause 'private' with the variables 'x' /// class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeParallelForSimdDirective *Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForSimdDirective *CreateEmpty( const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// 'private' with the variables 'x' /// class OMPDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, llvm::omp::OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, llvm::omp::OMPD_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeSimdDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen' /// with the variable 'c'. /// class OMPTargetParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, llvm::omp::OMPD_target_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, llvm::omp::OMPD_target_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// with the variable 'a', 'map' with the variable 'b' and 'safelen' with /// the variable 'c'. /// class OMPTargetSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, llvm::omp::OMPD_target_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, llvm::omp::OMPD_target_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetSimdDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, llvm::omp::OMPD_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, llvm::omp::OMPD_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeDirectiveClass; } }; /// This represents ' /// combined directive. /// /// \code /// /// \endcode /// In this example directive ' /// has clause 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass; } }; /// This represents ' /// directive. /// /// \code /// /// \endcode /// In this example directive ' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents ' /// directive. /// /// \code /// /// \endcode /// In this example directive ' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// condition 'a>0'. /// class OMPTargetTeamsDirective final : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, llvm::omp::OMPD_target_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, llvm::omp::OMPD_target_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDirectiveClass; } }; /// This represents ' /// /// \code /// /// \endcode /// In this example directive ' /// 'private' with the variables 'x' /// class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, llvm::omp::OMPD_target_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, llvm::omp::OMPD_target_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass; } }; /// This represents ' /// directive. /// /// \code /// /// \endcode /// In this example directive ' /// for' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass; } }; /// This represents ' /// combined directive. /// /// \code /// /// \endcode /// In this example directive ' /// for simd' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForSimdDirective( unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents ' /// directive. /// /// \code /// /// \endcode /// In this example directive ' /// has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; } // end namespace clang #endif
//===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file /// This file defines OpenMP AST classes for executable directives and /// clauses. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMTOPENMP_H #define LLVM_CLANG_AST_STMTOPENMP_H #include "clang/AST/Expr.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for directives. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP executable /// directive. /// class OMPExecutableDirective : public Stmt { friend class ASTStmtReader; /// Kind of the directive. OpenMPDirectiveKind Kind; /// Starting location of the directive (directive keyword). SourceLocation StartLoc; /// Ending location of the directive. SourceLocation EndLoc; /// Numbers of clauses. const unsigned NumClauses; /// Number of child expressions/stmts. const unsigned NumChildren; /// Offset from this to the start of clauses. /// There are NumClauses pointers to clauses, they are followed by /// NumChildren pointers to child stmts/exprs (if the directive type /// requires an associated stmt, then it has to be the first of them). const unsigned ClausesOffset; /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>( reinterpret_cast<char *>(this) + ClausesOffset); return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses); } protected: /// Build instance of directive of class \a K. /// /// \param SC Statement class. /// \param K Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// template <typename T> OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses, unsigned NumChildren) : Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)), EndLoc(std::move(EndLoc)), NumClauses(NumClauses), NumChildren(NumChildren), ClausesOffset(llvm::alignTo(sizeof(T), alignof(OMPClause *))) {} /// Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// Set the associated statement for the directive. /// /// /param S Associated statement. /// void setAssociatedStmt(Stmt *S) { assert(hasAssociatedStmt() && "no associated statement."); *child_begin() = S; } public: /// Iterates over expressions/statements used in the construct. class used_clauses_child_iterator : public llvm::iterator_adaptor_base< used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator, std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> { ArrayRef<OMPClause *>::iterator End; OMPClause::child_iterator ChildI, ChildEnd; void MoveToNext() { if (ChildI != ChildEnd) return; while (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); if (ChildI != ChildEnd) return; } } } public: explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses) : used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); MoveToNext(); } } Stmt *operator*() const { return *ChildI; } Stmt *operator->() const { return **this; } used_clauses_child_iterator &operator++() { ++ChildI; if (ChildI != ChildEnd) return *this; if (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); } } MoveToNext(); return *this; } }; static llvm::iterator_range<used_clauses_child_iterator> used_clauses_children(ArrayRef<OMPClause *> Clauses) { return {used_clauses_child_iterator(Clauses), used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))}; } /// Iterates over a filtered subrange of clauses applied to a /// directive. /// /// This iterator visits only clauses of type SpecificClause. template <typename SpecificClause> class specific_clause_iterator : public llvm::iterator_adaptor_base< specific_clause_iterator<SpecificClause>, ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag, const SpecificClause *, ptrdiff_t, const SpecificClause *, const SpecificClause *> { ArrayRef<OMPClause *>::const_iterator End; void SkipToNextClause() { while (this->I != End && !isa<SpecificClause>(*this->I)) ++this->I; } public: explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses) : specific_clause_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { SkipToNextClause(); } const SpecificClause *operator*() const { return cast<SpecificClause>(*this->I); } const SpecificClause *operator->() const { return **this; } specific_clause_iterator &operator++() { ++this->I; SkipToNextClause(); return *this; } }; template <typename SpecificClause> static llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind(ArrayRef<OMPClause *> Clauses) { return {specific_clause_iterator<SpecificClause>(Clauses), specific_clause_iterator<SpecificClause>( llvm::makeArrayRef(Clauses.end(), 0))}; } template <typename SpecificClause> llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind() const { return getClausesOfKind<SpecificClause>(clauses()); } /// Gets a single clause of the specified kind associated with the /// current directive iff there is only one clause of this kind (and assertion /// is fired if there is more than one clause is associated with the /// directive). Returns nullptr if no clause of this kind is associated with /// the directive. template <typename SpecificClause> const SpecificClause *getSingleClause() const { auto Clauses = getClausesOfKind<SpecificClause>(); if (Clauses.begin() != Clauses.end()) { assert(std::next(Clauses.begin()) == Clauses.end() && "There are at least 2 clauses of the specified kind"); return *Clauses.begin(); } return nullptr; } /// Returns true if the current directive has one or more clauses of a /// specific kind. template <typename SpecificClause> bool hasClausesOfKind() const { auto Clauses = getClausesOfKind<SpecificClause>(); return Clauses.begin() != Clauses.end(); } /// Returns starting location of directive kind. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns ending location of directive. SourceLocation getEndLoc() const { return EndLoc; } /// Set starting location of directive kind. /// /// \param Loc New starting location of directive. /// void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Set ending location of directive. /// /// \param Loc New ending location of directive. /// void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Get number of clauses. unsigned getNumClauses() const { return NumClauses; } /// Returns specified clause. /// /// \param i Number of clause. /// OMPClause *getClause(unsigned i) const { return clauses()[i]; } /// Returns true if directive has associated statement. bool hasAssociatedStmt() const { return NumChildren > 0; } /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } Stmt *getAssociatedStmt() { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. // // \param RegionKind Component region kind. const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const { SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(std::any_of( CaptureRegions.begin(), CaptureRegions.end(), [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } /// Get innermost captured statement for the construct. CapturedStmt *getInnermostCapturedStmt() { assert(hasAssociatedStmt() && getAssociatedStmt() && "Must have associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(!CaptureRegions.empty() && "At least one captured statement must be provided."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (unsigned Level = CaptureRegions.size(); Level > 1; --Level) CS = cast<CapturedStmt>(CS->getCapturedStmt()); return CS; } const CapturedStmt *getInnermostCapturedStmt() const { return const_cast<OMPExecutableDirective *>(this) ->getInnermostCapturedStmt(); } OpenMPDirectiveKind getDirectiveKind() const { return Kind; } static bool classof(const Stmt *S) { return S->getStmtClass() >= firstOMPExecutableDirectiveConstant && S->getStmtClass() <= lastOMPExecutableDirectiveConstant; } child_range children() { if (!hasAssociatedStmt()) return child_range(child_iterator(), child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end()); /// Do not mark all the special expression/statements as children, except /// for the associated statement. return child_range(ChildStorage, ChildStorage + 1); } const_child_range children() const { if (!hasAssociatedStmt()) return const_child_range(const_child_iterator(), const_child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>( const_cast<OMPExecutableDirective *>(this)->getClauses().end()); return const_child_range(ChildStorage, ChildStorage + 1); } ArrayRef<OMPClause *> clauses() { return getClauses(); } ArrayRef<OMPClause *> clauses() const { return const_cast<OMPExecutableDirective *>(this)->getClauses(); } /// Returns whether or not this is a Standalone directive. /// /// Stand-alone directives are executable directives /// that have no associated user code. bool isStandaloneDirective() const; /// Returns the AST node representing OpenMP structured-block of this /// OpenMP executable directive, /// Prerequisite: Executable Directive must not be Standalone directive. const Stmt *getStructuredBlock() const; Stmt *getStructuredBlock() { return const_cast<Stmt *>( const_cast<const OMPExecutableDirective *>(this)->getStructuredBlock()); } }; /// This represents '#pragma omp parallel' directive. /// /// \code /// #pragma omp parallel private(a,b) reduction(+: c,d) /// \endcode /// In this example directive '#pragma omp parallel' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending Location of the directive. /// OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, llvm::omp::OMPD_parallel, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, llvm::omp::OMPD_parallel, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement associated with the directive. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelDirectiveClass; } }; /// This is a common base class for loop directives ('omp simd', 'omp /// for', 'omp for simd' etc.). It is responsible for the loop code generation. /// class OMPLoopDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Number of collapsed loops as specified by 'collapse' clause. unsigned CollapsedNum; /// Offsets to the stored exprs. /// This enumeration contains offsets to all the pointers to children /// expressions stored in OMPLoopDirective. /// The first 9 children are necessary for all the loop directives, /// the next 8 are specific to the worksharing ones, and the next 11 are /// used for combined constructs containing two pragmas associated to loops. /// After the fixed children, three arrays of length CollapsedNum are /// allocated: loop counters, their updates and final values. /// PrevLowerBound and PrevUpperBound are used to communicate blocking /// information in composite constructs which require loop blocking /// DistInc is used to generate the increment expression for the distribute /// loop when combined with a further nested loop /// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the /// for loop when combined with a previous distribute loop in the same pragma /// (e.g. 'distribute parallel for') /// enum { AssociatedStmtOffset = 0, IterationVariableOffset = 1, LastIterationOffset = 2, CalcLastIterationOffset = 3, PreConditionOffset = 4, CondOffset = 5, InitOffset = 6, IncOffset = 7, PreInitsOffset = 8, // The '...End' enumerators do not correspond to child expressions - they // specify the offset to the end (and start of the following counters/ // updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays). DefaultEnd = 9, // The following 8 exprs are used by worksharing and distribute loops only. IsLastIterVariableOffset = 9, LowerBoundVariableOffset = 10, UpperBoundVariableOffset = 11, StrideVariableOffset = 12, EnsureUpperBoundOffset = 13, NextLowerBoundOffset = 14, NextUpperBoundOffset = 15, NumIterationsOffset = 16, // Offset to the end for worksharing loop directives. WorksharingEnd = 17, PrevLowerBoundVariableOffset = 17, PrevUpperBoundVariableOffset = 18, DistIncOffset = 19, PrevEnsureUpperBoundOffset = 20, CombinedLowerBoundVariableOffset = 21, CombinedUpperBoundVariableOffset = 22, CombinedEnsureUpperBoundOffset = 23, CombinedInitOffset = 24, CombinedConditionOffset = 25, CombinedNextLowerBoundOffset = 26, CombinedNextUpperBoundOffset = 27, CombinedDistConditionOffset = 28, CombinedParForInDistConditionOffset = 29, // Offset to the end (and start of the following // counters/updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays) for combined distribute loop directives. CombinedDistributeEnd = 30, }; /// Get the counters storage. MutableArrayRef<Expr *> getCounters() { Expr **Storage = reinterpret_cast<Expr **>( &(*(std::next(child_begin(), getArraysOffset(getDirectiveKind()))))); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the private counters storage. MutableArrayRef<Expr *> getPrivateCounters() { Expr **Storage = reinterpret_cast<Expr **>(&*std::next( child_begin(), getArraysOffset(getDirectiveKind()) + CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getInits() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getUpdates() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the final counter updates storage. MutableArrayRef<Expr *> getFinals() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 4 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the dependent counters storage. MutableArrayRef<Expr *> getDependentCounters() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 5 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the dependent inits storage. MutableArrayRef<Expr *> getDependentInits() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 6 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the finals conditions storage. MutableArrayRef<Expr *> getFinalsConditions() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 7 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } protected: /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed loops from 'collapse' clause. /// \param NumClauses Number of clauses. /// \param NumSpecialChildren Number of additional directive-specific stmts. /// template <typename T> OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses, unsigned NumSpecialChildren = 0) : OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses, numLoopChildren(CollapsedNum, Kind) + NumSpecialChildren), CollapsedNum(CollapsedNum) {} /// Offset to the start of children expression arrays. static unsigned getArraysOffset(OpenMPDirectiveKind Kind) { if (isOpenMPLoopBoundSharingDirective(Kind)) return CombinedDistributeEnd; if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) || isOpenMPDistributeDirective(Kind)) return WorksharingEnd; return DefaultEnd; } /// Children number. static unsigned numLoopChildren(unsigned CollapsedNum, OpenMPDirectiveKind Kind) { return getArraysOffset(Kind) + 8 * CollapsedNum; // Counters, PrivateCounters, Inits, // Updates, Finals, DependentCounters, // DependentInits, FinalsConditions. } void setIterationVariable(Expr *IV) { *std::next(child_begin(), IterationVariableOffset) = IV; } void setLastIteration(Expr *LI) { *std::next(child_begin(), LastIterationOffset) = LI; } void setCalcLastIteration(Expr *CLI) { *std::next(child_begin(), CalcLastIterationOffset) = CLI; } void setPreCond(Expr *PC) { *std::next(child_begin(), PreConditionOffset) = PC; } void setCond(Expr *Cond) { *std::next(child_begin(), CondOffset) = Cond; } void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; } void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; } void setPreInits(Stmt *PreInits) { *std::next(child_begin(), PreInitsOffset) = PreInits; } void setIsLastIterVariable(Expr *IL) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), IsLastIterVariableOffset) = IL; } void setLowerBoundVariable(Expr *LB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), LowerBoundVariableOffset) = LB; } void setUpperBoundVariable(Expr *UB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), UpperBoundVariableOffset) = UB; } void setStrideVariable(Expr *ST) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), StrideVariableOffset) = ST; } void setEnsureUpperBound(Expr *EUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), EnsureUpperBoundOffset) = EUB; } void setNextLowerBound(Expr *NLB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextLowerBoundOffset) = NLB; } void setNextUpperBound(Expr *NUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextUpperBoundOffset) = NUB; } void setNumIterations(Expr *NI) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NumIterationsOffset) = NI; } void setPrevLowerBoundVariable(Expr *PrevLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevLowerBoundVariableOffset) = PrevLB; } void setPrevUpperBoundVariable(Expr *PrevUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevUpperBoundVariableOffset) = PrevUB; } void setDistInc(Expr *DistInc) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), DistIncOffset) = DistInc; } void setPrevEnsureUpperBound(Expr *PrevEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevEnsureUpperBoundOffset) = PrevEUB; } void setCombinedLowerBoundVariable(Expr *CombLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedLowerBoundVariableOffset) = CombLB; } void setCombinedUpperBoundVariable(Expr *CombUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedUpperBoundVariableOffset) = CombUB; } void setCombinedEnsureUpperBound(Expr *CombEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedEnsureUpperBoundOffset) = CombEUB; } void setCombinedInit(Expr *CombInit) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedInitOffset) = CombInit; } void setCombinedCond(Expr *CombCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedConditionOffset) = CombCond; } void setCombinedNextLowerBound(Expr *CombNLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextLowerBoundOffset) = CombNLB; } void setCombinedNextUpperBound(Expr *CombNUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextUpperBoundOffset) = CombNUB; } void setCombinedDistCond(Expr *CombDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedDistConditionOffset) = CombDistCond; } void setCombinedParForInDistCond(Expr *CombParForInDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedParForInDistConditionOffset) = CombParForInDistCond; } void setCounters(ArrayRef<Expr *> A); void setPrivateCounters(ArrayRef<Expr *> A); void setInits(ArrayRef<Expr *> A); void setUpdates(ArrayRef<Expr *> A); void setFinals(ArrayRef<Expr *> A); void setDependentCounters(ArrayRef<Expr *> A); void setDependentInits(ArrayRef<Expr *> A); void setFinalsConditions(ArrayRef<Expr *> A); public: /// The expressions built to support OpenMP loops in combined/composite /// pragmas (e.g. pragma omp distribute parallel for) struct DistCombinedHelperExprs { /// DistributeLowerBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *LB; /// DistributeUpperBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *UB; /// DistributeEnsureUpperBound - used when composing 'omp distribute' /// with 'omp for' in a same construct, EUB depends on DistUB Expr *EUB; /// Distribute loop iteration variable init used when composing 'omp /// distribute' /// with 'omp for' in a same construct Expr *Init; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct Expr *Cond; /// Update of LowerBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NLB; /// Update of UpperBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NUB; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct when schedule is chunked. Expr *DistCond; /// 'omp parallel for' loop condition used when composed with /// 'omp distribute' in the same construct and when schedule is /// chunked and the chunk size is 1. Expr *ParForInDistCond; }; /// The expressions built for the OpenMP loop CodeGen for the /// whole collapsed loop nest. struct HelperExprs { /// Loop iteration variable. Expr *IterationVarRef; /// Loop last iteration number. Expr *LastIteration; /// Loop number of iterations. Expr *NumIterations; /// Calculation of last iteration. Expr *CalcLastIteration; /// Loop pre-condition. Expr *PreCond; /// Loop condition. Expr *Cond; /// Loop iteration variable init. Expr *Init; /// Loop increment. Expr *Inc; /// IsLastIteration - local flag variable passed to runtime. Expr *IL; /// LowerBound - local variable passed to runtime. Expr *LB; /// UpperBound - local variable passed to runtime. Expr *UB; /// Stride - local variable passed to runtime. Expr *ST; /// EnsureUpperBound -- expression UB = min(UB, NumIterations). Expr *EUB; /// Update of LowerBound for statically scheduled 'omp for' loops. Expr *NLB; /// Update of UpperBound for statically scheduled 'omp for' loops. Expr *NUB; /// PreviousLowerBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevLB; /// PreviousUpperBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevUB; /// DistInc - increment expression for distribute loop when found /// combined with a further loop level (e.g. in 'distribute parallel for') /// expression IV = IV + ST Expr *DistInc; /// PrevEUB - expression similar to EUB but to be used when loop /// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for' /// when ensuring that the UB is either the calculated UB by the runtime or /// the end of the assigned distribute chunk) /// expression UB = min (UB, PrevUB) Expr *PrevEUB; /// Counters Loop counters. SmallVector<Expr *, 4> Counters; /// PrivateCounters Loop counters. SmallVector<Expr *, 4> PrivateCounters; /// Expressions for loop counters inits for CodeGen. SmallVector<Expr *, 4> Inits; /// Expressions for loop counters update for CodeGen. SmallVector<Expr *, 4> Updates; /// Final loop counter values for GodeGen. SmallVector<Expr *, 4> Finals; /// List of counters required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentCounters; /// List of initializers required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentInits; /// List of final conditions required for the generation of the /// non-rectangular loops. SmallVector<Expr *, 4> FinalsConditions; /// Init statement for all captured expressions. Stmt *PreInits; /// Expressions used when combining OpenMP loop pragmas DistCombinedHelperExprs DistCombinedFields; /// Check if all the expressions are built (does not check the /// worksharing ones). bool builtAll() { return IterationVarRef != nullptr && LastIteration != nullptr && NumIterations != nullptr && PreCond != nullptr && Cond != nullptr && Init != nullptr && Inc != nullptr; } /// Initialize all the fields to null. /// \param Size Number of elements in the /// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions /// arrays. void clear(unsigned Size) { IterationVarRef = nullptr; LastIteration = nullptr; CalcLastIteration = nullptr; PreCond = nullptr; Cond = nullptr; Init = nullptr; Inc = nullptr; IL = nullptr; LB = nullptr; UB = nullptr; ST = nullptr; EUB = nullptr; NLB = nullptr; NUB = nullptr; NumIterations = nullptr; PrevLB = nullptr; PrevUB = nullptr; DistInc = nullptr; PrevEUB = nullptr; Counters.resize(Size); PrivateCounters.resize(Size); Inits.resize(Size); Updates.resize(Size); Finals.resize(Size); DependentCounters.resize(Size); DependentInits.resize(Size); FinalsConditions.resize(Size); for (unsigned i = 0; i < Size; ++i) { Counters[i] = nullptr; PrivateCounters[i] = nullptr; Inits[i] = nullptr; Updates[i] = nullptr; Finals[i] = nullptr; DependentCounters[i] = nullptr; DependentInits[i] = nullptr; FinalsConditions[i] = nullptr; } PreInits = nullptr; DistCombinedFields.LB = nullptr; DistCombinedFields.UB = nullptr; DistCombinedFields.EUB = nullptr; DistCombinedFields.Init = nullptr; DistCombinedFields.Cond = nullptr; DistCombinedFields.NLB = nullptr; DistCombinedFields.NUB = nullptr; DistCombinedFields.DistCond = nullptr; DistCombinedFields.ParForInDistCond = nullptr; } }; /// Get number of collapsed loops. unsigned getCollapsedNumber() const { return CollapsedNum; } Expr *getIterationVariable() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IterationVariableOffset))); } Expr *getLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LastIterationOffset))); } Expr *getCalcLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CalcLastIterationOffset))); } Expr *getPreCond() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PreConditionOffset))); } Expr *getCond() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset))); } Expr *getInit() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset))); } Expr *getInc() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset))); } const Stmt *getPreInits() const { return *std::next(child_begin(), PreInitsOffset); } Stmt *getPreInits() { return *std::next(child_begin(), PreInitsOffset); } Expr *getIsLastIterVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IsLastIterVariableOffset))); } Expr *getLowerBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LowerBoundVariableOffset))); } Expr *getUpperBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), UpperBoundVariableOffset))); } Expr *getStrideVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), StrideVariableOffset))); } Expr *getEnsureUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), EnsureUpperBoundOffset))); } Expr *getNextLowerBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextLowerBoundOffset))); } Expr *getNextUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextUpperBoundOffset))); } Expr *getNumIterations() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NumIterationsOffset))); } Expr *getPrevLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevLowerBoundVariableOffset))); } Expr *getPrevUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevUpperBoundVariableOffset))); } Expr *getDistInc() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), DistIncOffset))); } Expr *getPrevEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevEnsureUpperBoundOffset))); } Expr *getCombinedLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedLowerBoundVariableOffset))); } Expr *getCombinedUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedUpperBoundVariableOffset))); } Expr *getCombinedEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedEnsureUpperBoundOffset))); } Expr *getCombinedInit() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedInitOffset))); } Expr *getCombinedCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedConditionOffset))); } Expr *getCombinedNextLowerBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextLowerBoundOffset))); } Expr *getCombinedNextUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextUpperBoundOffset))); } Expr *getCombinedDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedDistConditionOffset))); } Expr *getCombinedParForInDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedParForInDistConditionOffset))); } /// Try to find the next loop sub-statement in the specified statement \p /// CurStmt. /// \param TryImperfectlyNestedLoops true, if we need to try to look for the /// imperfectly nested loop. static Stmt *tryToFindNextInnerLoop(Stmt *CurStmt, bool TryImperfectlyNestedLoops); static const Stmt *tryToFindNextInnerLoop(const Stmt *CurStmt, bool TryImperfectlyNestedLoops) { return tryToFindNextInnerLoop(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops); } Stmt *getBody(); const Stmt *getBody() const { return const_cast<OMPLoopDirective *>(this)->getBody(); } ArrayRef<Expr *> counters() { return getCounters(); } ArrayRef<Expr *> counters() const { return const_cast<OMPLoopDirective *>(this)->getCounters(); } ArrayRef<Expr *> private_counters() { return getPrivateCounters(); } ArrayRef<Expr *> private_counters() const { return const_cast<OMPLoopDirective *>(this)->getPrivateCounters(); } ArrayRef<Expr *> inits() { return getInits(); } ArrayRef<Expr *> inits() const { return const_cast<OMPLoopDirective *>(this)->getInits(); } ArrayRef<Expr *> updates() { return getUpdates(); } ArrayRef<Expr *> updates() const { return const_cast<OMPLoopDirective *>(this)->getUpdates(); } ArrayRef<Expr *> finals() { return getFinals(); } ArrayRef<Expr *> finals() const { return const_cast<OMPLoopDirective *>(this)->getFinals(); } ArrayRef<Expr *> dependent_counters() { return getDependentCounters(); } ArrayRef<Expr *> dependent_counters() const { return const_cast<OMPLoopDirective *>(this)->getDependentCounters(); } ArrayRef<Expr *> dependent_inits() { return getDependentInits(); } ArrayRef<Expr *> dependent_inits() const { return const_cast<OMPLoopDirective *>(this)->getDependentInits(); } ArrayRef<Expr *> finals_conditions() { return getFinalsConditions(); } ArrayRef<Expr *> finals_conditions() const { return const_cast<OMPLoopDirective *>(this)->getFinalsConditions(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass || T->getStmtClass() == OMPForDirectiveClass || T->getStmtClass() == OMPForSimdDirectiveClass || T->getStmtClass() == OMPParallelForDirectiveClass || T->getStmtClass() == OMPParallelForSimdDirectiveClass || T->getStmtClass() == OMPTaskLoopDirectiveClass || T->getStmtClass() == OMPTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPDistributeDirectiveClass || T->getStmtClass() == OMPTargetParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPDistributeSimdDirectiveClass || T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp simd' directive. /// /// \code /// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, llvm::omp::OMPD_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, llvm::omp::OMPD_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass; } }; /// This represents '#pragma omp for' directive. /// /// \code /// #pragma omp for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for' has clauses 'private' with the /// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c' /// and 'd'. /// class OMPForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, llvm::omp::OMPD_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, llvm::omp::OMPD_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForDirectiveClass; } }; /// This represents '#pragma omp for simd' directive. /// /// \code /// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForSimdDirectiveClass; } }; /// This represents '#pragma omp sections' directive. /// /// \code /// #pragma omp sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp sections' has clauses 'private' with /// the variables 'a' and 'b' and 'reduction' with operator '+' and variables /// 'c' and 'd'. /// class OMPSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, llvm::omp::OMPD_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, llvm::omp::OMPD_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSectionsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionsDirectiveClass; } }; /// This represents '#pragma omp section' directive. /// /// \code /// #pragma omp section /// \endcode /// class OMPSectionDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPSectionDirectiveClass, llvm::omp::OMPD_section, StartLoc, EndLoc, 0, 1), HasCancel(false) {} /// Build an empty directive. /// explicit OMPSectionDirective() : OMPExecutableDirective(this, OMPSectionDirectiveClass, llvm::omp::OMPD_section, SourceLocation(), SourceLocation(), 0, 1), HasCancel(false) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive. /// /// \param C AST context. /// static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionDirectiveClass; } }; /// This represents '#pragma omp single' directive. /// /// \code /// #pragma omp single private(a,b) copyprivate(c,d) /// \endcode /// In this example directive '#pragma omp single' has clauses 'private' with /// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'. /// class OMPSingleDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, llvm::omp::OMPD_single, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSingleDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, llvm::omp::OMPD_single, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSingleDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSingleDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSingleDirectiveClass; } }; /// This represents '#pragma omp master' directive. /// /// \code /// #pragma omp master /// \endcode /// class OMPMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPMasterDirectiveClass, llvm::omp::OMPD_master, StartLoc, EndLoc, 0, 1) { } /// Build an empty directive. /// explicit OMPMasterDirective() : OMPExecutableDirective(this, OMPMasterDirectiveClass, llvm::omp::OMPD_master, SourceLocation(), SourceLocation(), 0, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMasterDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterDirectiveClass; } }; /// This represents '#pragma omp critical' directive. /// /// \code /// #pragma omp critical /// \endcode /// class OMPCriticalDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Name of the directive. DeclarationNameInfo DirName; /// Build directive with the given start and end location. /// /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, llvm::omp::OMPD_critical, StartLoc, EndLoc, NumClauses, 1), DirName(Name) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPCriticalDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, llvm::omp::OMPD_critical, SourceLocation(), SourceLocation(), NumClauses, 1), DirName() {} /// Set name of the directive. /// /// \param Name Name of the directive. /// void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; } public: /// Creates directive. /// /// \param C AST context. /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPCriticalDirective * Create(const ASTContext &C, const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCriticalDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return name of the directive. /// DeclarationNameInfo getDirectiveName() const { return DirName; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCriticalDirectiveClass; } }; /// This represents '#pragma omp parallel for' directive. /// /// \code /// #pragma omp parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, llvm::omp::OMPD_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, llvm::omp::OMPD_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForDirectiveClass; } }; /// This represents '#pragma omp parallel for simd' directive. /// /// \code /// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for simd' has clauses /// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j' /// and linear step 's', 'reduction' with operator '+' and variables 'c' and /// 'd'. /// class OMPParallelForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, llvm::omp::OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, llvm::omp::OMPD_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp parallel master' directive. /// /// \code /// #pragma omp parallel master private(a,b) /// \endcode /// In this example directive '#pragma omp parallel master' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPParallelMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; OMPParallelMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelMasterDirectiveClass, llvm::omp::OMPD_parallel_master, StartLoc, EndLoc, NumClauses, 1) {} explicit OMPParallelMasterDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelMasterDirectiveClass, llvm::omp::OMPD_parallel_master, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPParallelMasterDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelMasterDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterDirectiveClass; } }; /// This represents '#pragma omp parallel sections' directive. /// /// \code /// #pragma omp parallel sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel sections' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPParallelSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, llvm::omp::OMPD_parallel_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, llvm::omp::OMPD_parallel_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelSectionsDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelSectionsDirectiveClass; } }; /// This represents '#pragma omp task' directive. /// /// \code /// #pragma omp task private(a,b) final(d) /// \endcode /// In this example directive '#pragma omp task' has clauses 'private' with the /// variables 'a' and 'b' and 'final' with condition 'd'. /// class OMPTaskDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if this directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, llvm::omp::OMPD_task, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTaskDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, llvm::omp::OMPD_task, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true, if current directive has inner cancel directive. /// static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskDirectiveClass; } }; /// This represents '#pragma omp taskyield' directive. /// /// \code /// #pragma omp taskyield /// \endcode /// class OMPTaskyieldDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, llvm::omp::OMPD_taskyield, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskyieldDirective() : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, llvm::omp::OMPD_taskyield, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskyieldDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskyieldDirectiveClass; } }; /// This represents '#pragma omp barrier' directive. /// /// \code /// #pragma omp barrier /// \endcode /// class OMPBarrierDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPBarrierDirectiveClass, llvm::omp::OMPD_barrier, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPBarrierDirective() : OMPExecutableDirective(this, OMPBarrierDirectiveClass, llvm::omp::OMPD_barrier, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPBarrierDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPBarrierDirectiveClass; } }; /// This represents '#pragma omp taskwait' directive. /// /// \code /// #pragma omp taskwait /// \endcode /// class OMPTaskwaitDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, llvm::omp::OMPD_taskwait, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskwaitDirective() : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, llvm::omp::OMPD_taskwait, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskwaitDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskwaitDirectiveClass; } }; /// This represents '#pragma omp taskgroup' directive. /// /// \code /// #pragma omp taskgroup /// \endcode /// class OMPTaskgroupDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, llvm::omp::OMPD_taskgroup, StartLoc, EndLoc, NumClauses, 2) {} /// Build an empty directive. /// \param NumClauses Number of clauses. /// explicit OMPTaskgroupDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, llvm::omp::OMPD_taskgroup, SourceLocation(), SourceLocation(), NumClauses, 2) {} /// Sets the task_reduction return variable. void setReductionRef(Expr *RR) { *std::next(child_begin(), 1) = RR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param ReductionRef Reference to the task_reduction return variable. /// static OMPTaskgroupDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *ReductionRef); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns reference to the task_reduction return variable. const Expr *getReductionRef() const { return static_cast<const Expr *>(*std::next(child_begin(), 1)); } Expr *getReductionRef() { return static_cast<Expr *>(*std::next(child_begin(), 1)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskgroupDirectiveClass; } }; /// This represents '#pragma omp flush' directive. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has 2 arguments- variables 'a' /// and 'b'. /// 'omp flush' directive does not have clauses but have an optional list of /// variables to flush. This list of variables is stored within some fake clause /// FlushClause. class OMPFlushDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, llvm::omp::OMPD_flush, StartLoc, EndLoc, NumClauses, 0) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPFlushDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, llvm::omp::OMPD_flush, SourceLocation(), SourceLocation(), NumClauses, 0) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPFlushDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPFlushDirectiveClass; } }; /// This represents '#pragma omp ordered' directive. /// /// \code /// #pragma omp ordered /// \endcode /// class OMPOrderedDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, llvm::omp::OMPD_ordered, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPOrderedDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, llvm::omp::OMPD_ordered, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPOrderedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPOrderedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPOrderedDirectiveClass; } }; /// This represents '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has clause 'capture'. /// class OMPAtomicDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// x = x binop expr; /// x = expr binop x; /// \endcode /// This field is true for the first form of the expression and false for the /// second. Required for correct codegen of non-associative operations (like /// << or >>). bool IsXLHSInRHSPart; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// v = x; <update x>; /// <update x>; v = x; /// \endcode /// This field is true for the first(postfix) form of the expression and false /// otherwise. bool IsPostfixUpdate; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic, StartLoc, EndLoc, NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPAtomicDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic, SourceLocation(), SourceLocation(), NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Set 'x' part of the associated expression/statement. void setX(Expr *X) { *std::next(child_begin()) = X; } /// Set helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; } /// Set 'v' part of the associated expression/statement. void setV(Expr *V) { *std::next(child_begin(), 3) = V; } /// Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; } public: /// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' /// parts of the atomic construct (see Section 2.12.6, atomic Construct, for /// detailed description of 'x', 'v' and 'expr'). /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param X 'x' part of the associated expression/statement. /// \param V 'v' part of the associated expression/statement. /// \param E 'expr' part of the associated expression/statement. /// \param UE Helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the /// second. /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. static OMPAtomicDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPAtomicDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get 'x' part of the associated expression/statement. Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); } const Expr *getX() const { return cast_or_null<Expr>(*std::next(child_begin())); } /// Get helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. Expr *getUpdateExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } const Expr *getUpdateExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } /// Return true if helper update expression has form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; } /// Return true if 'v' expression must be updated to original value of /// 'x', false if 'v' must be updated to the new value of 'x'. bool isPostfixUpdate() const { return IsPostfixUpdate; } /// Get 'v' part of the associated expression/statement. Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } const Expr *getV() const { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } /// Get 'expr' part of the associated expression/statement. Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } const Expr *getExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPAtomicDirectiveClass; } }; /// This represents '#pragma omp target' directive. /// /// \code /// #pragma omp target if(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'if' with /// condition 'a'. /// class OMPTargetDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, llvm::omp::OMPD_target, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, llvm::omp::OMPD_target, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDirectiveClass; } }; /// This represents '#pragma omp target data' directive. /// /// \code /// #pragma omp target data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target data' has clauses 'device' /// with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, llvm::omp::OMPD_target_data, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, llvm::omp::OMPD_target_data, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDataDirectiveClass; } }; /// This represents '#pragma omp target enter data' directive. /// /// \code /// #pragma omp target enter data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target enter data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetEnterDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, llvm::omp::OMPD_target_enter_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetEnterDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, llvm::omp::OMPD_target_enter_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetEnterDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetEnterDataDirectiveClass; } }; /// This represents '#pragma omp target exit data' directive. /// /// \code /// #pragma omp target exit data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target exit data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetExitDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, llvm::omp::OMPD_target_exit_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetExitDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, llvm::omp::OMPD_target_exit_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetExitDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetExitDataDirectiveClass; } }; /// This represents '#pragma omp target parallel' directive. /// /// \code /// #pragma omp target parallel if(a) /// \endcode /// In this example directive '#pragma omp target parallel' has clause 'if' with /// condition 'a'. /// class OMPTargetParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, llvm::omp::OMPD_target_parallel, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, llvm::omp::OMPD_target_parallel, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetParallelDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelDirectiveClass; } }; /// This represents '#pragma omp target parallel for' directive. /// /// \code /// #pragma omp target parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp target parallel for' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPTargetParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, llvm::omp::OMPD_target_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, llvm::omp::OMPD_target_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPTargetParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForDirectiveClass; } }; /// This represents '#pragma omp teams' directive. /// /// \code /// #pragma omp teams if(a) /// \endcode /// In this example directive '#pragma omp teams' has clause 'if' with /// condition 'a'. /// class OMPTeamsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, llvm::omp::OMPD_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, llvm::omp::OMPD_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDirectiveClass; } }; /// This represents '#pragma omp cancellation point' directive. /// /// \code /// #pragma omp cancellation point for /// \endcode /// /// In this example a cancellation point is created for innermost 'for' region. class OMPCancellationPointDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, llvm::omp::OMPD_cancellation_point, StartLoc, EndLoc, 0, 0), CancelRegion(llvm::omp::OMPD_unknown) {} /// Build an empty directive. /// explicit OMPCancellationPointDirective() : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, llvm::omp::OMPD_cancellation_point, SourceLocation(), SourceLocation(), 0, 0), CancelRegion(llvm::omp::OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPCancellationPointDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancellationPointDirectiveClass; } }; /// This represents '#pragma omp cancel' directive. /// /// \code /// #pragma omp cancel for /// \endcode /// /// In this example a cancel is created for innermost 'for' region. class OMPCancelDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, llvm::omp::OMPD_cancel, StartLoc, EndLoc, NumClauses, 0), CancelRegion(llvm::omp::OMPD_unknown) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. explicit OMPCancelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, llvm::omp::OMPD_cancel, SourceLocation(), SourceLocation(), NumClauses, 0), CancelRegion(llvm::omp::OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPCancelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCancelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancelDirectiveClass; } }; /// This represents '#pragma omp taskloop' directive. /// /// \code /// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopDirectiveClass; } }; /// This represents '#pragma omp taskloop simd' directive. /// /// \code /// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop simd' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, llvm::omp::OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, llvm::omp::OMPD_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp master taskloop' directive. /// /// \code /// #pragma omp master taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp master taskloop' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopDirectiveClass, llvm::omp::OMPD_master_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPMasterTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopDirectiveClass, llvm::omp::OMPD_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopDirectiveClass; } }; /// This represents '#pragma omp master taskloop simd' directive. /// /// \code /// #pragma omp master taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp master taskloop simd' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPMasterTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \p NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp parallel master taskloop' directive. /// /// \code /// #pragma omp parallel master taskloop private(a,b) grainsize(val) /// num_tasks(num) /// \endcode /// In this example directive '#pragma omp parallel master taskloop' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelMasterTaskLoopDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelMasterTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelMasterTaskLoopDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass; } }; /// This represents '#pragma omp parallel master taskloop simd' directive. /// /// \code /// #pragma omp parallel master taskloop simd private(a,b) grainsize(val) /// num_tasks(num) /// \endcode /// In this example directive '#pragma omp parallel master taskloop simd' has /// clauses 'private' with the variables 'a' and 'b', 'grainsize' with /// expression 'val' and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelMasterTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp distribute' directive. /// /// \code /// #pragma omp distribute private(a,b) /// \endcode /// In this example directive '#pragma omp distribute' has clauses 'private' /// with the variables 'a' and 'b' /// class OMPDistributeDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, llvm::omp::OMPD_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, llvm::omp::OMPD_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeDirectiveClass; } }; /// This represents '#pragma omp target update' directive. /// /// \code /// #pragma omp target update to(a) from(b) device(1) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' with /// argument 'a', clause 'from' with argument 'b' and clause 'device' with /// argument '1'. /// class OMPTargetUpdateDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, llvm::omp::OMPD_target_update, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetUpdateDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, llvm::omp::OMPD_target_update, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetUpdateDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses The number of clauses. /// static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetUpdateDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for' composite /// directive. /// /// \code /// #pragma omp distribute parallel for private(a,b) /// \endcode /// In this example directive '#pragma omp distribute parallel for' has clause /// 'private' with the variables 'a' and 'b' /// class OMPDistributeParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, llvm::omp::OMPD_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, llvm::omp::OMPD_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp distribute parallel for simd' has /// clause 'private' with the variables 'x' /// class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeParallelForSimdDirective *Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForSimdDirective *CreateEmpty( const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp distribute simd' composite directive. /// /// \code /// #pragma omp distribute simd private(x) /// \endcode /// In this example directive '#pragma omp distribute simd' has clause /// 'private' with the variables 'x' /// class OMPDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, llvm::omp::OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, llvm::omp::OMPD_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp target parallel for simd' directive. /// /// \code /// #pragma omp target parallel for simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target parallel for simd' has clauses /// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen' /// with the variable 'c'. /// class OMPTargetParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, llvm::omp::OMPD_target_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, llvm::omp::OMPD_target_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target simd' directive. /// /// \code /// #pragma omp target simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target simd' has clauses 'private' /// with the variable 'a', 'map' with the variable 'b' and 'safelen' with /// the variable 'c'. /// class OMPTargetSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, llvm::omp::OMPD_target_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, llvm::omp::OMPD_target_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute' directive. /// /// \code /// #pragma omp teams distribute private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, llvm::omp::OMPD_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, llvm::omp::OMPD_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp teams distribute simd' /// combined directive. /// /// \code /// #pragma omp teams distribute simd private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute simd' /// has clause 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for simd' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams' directive. /// /// \code /// #pragma omp target teams if(a>0) /// \endcode /// In this example directive '#pragma omp target teams' has clause 'if' with /// condition 'a>0'. /// class OMPTargetTeamsDirective final : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, llvm::omp::OMPD_target_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, llvm::omp::OMPD_target_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDirectiveClass; } }; /// This represents '#pragma omp target teams distribute' combined directive. /// /// \code /// #pragma omp target teams distribute private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute' has clause /// 'private' with the variables 'x' /// class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, llvm::omp::OMPD_target_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, llvm::omp::OMPD_target_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for' combined /// directive. /// /// \code /// #pragma omp target teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for simd' /// combined directive. /// /// \code /// #pragma omp target teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for simd' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForSimdDirective( unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target teams distribute simd' combined /// directive. /// /// \code /// #pragma omp target teams distribute simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute simd' /// has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; } // end namespace clang #endif
2001.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose void kernel_heat_3d(int tsteps, int n, double A[120 + 0][120 + 0][120 + 0], double B[120 + 0][120 + 0][120 + 0]) { int t14; int t12; int t10; int t8; int t6; int t4; int t2; for (t2 = 1; t2 <= 500; t2 += 1) { #pragma omp parallel for private(t4,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 16) for (t10 = t8; t10 <= (n - 2 < t8 + 15 ? n - 2 : t8 + 15); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) B[t6][t10][t14] = 0.125 * (A[t6 + 1][t10][t14] - 2 * A[t6][t10][t14] + A[t6 - 1][t10][t14]) + 0.125 * (A[t6][t10 + 1][t14] - 2 * A[t6][t10][t14] + A[t6][t10 - 1][t14]) + 0.125 * (A[t6][t10][t14 + 1] - 2 * A[t6][t10][t14] + A[t6][t10][t14 - 1]) + A[t6][t10][t14]; #pragma omp parallel for private(t4,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 16) for (t10 = t8; t10 <= (n - 2 < t8 + 15 ? n - 2 : t8 + 15); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) A[t6][t10][t14] = 0.125 * (B[t6 + 1][t10][t14] - 2 * B[t6][t10][t14] + B[t6 - 1][t10][t14]) + 0.125 * (B[t6][t10 + 1][t14] - 2 * B[t6][t10][t14] + B[t6][t10 - 1][t14]) + 0.125 * (B[t6][t10][t14 + 1] - 2 * B[t6][t10][t14] + B[t6][t10][t14 - 1]) + B[t6][t10][t14]; } }
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose void kernel_heat_3d(int tsteps, int n, double A[120 + 0][120 + 0][120 + 0], double B[120 + 0][120 + 0][120 + 0]) { int t14; int t12; int t10; int t8; int t6; int t4; int t2; for (t2 = 1; t2 <= 500; t2 += 1) { for (t4 = 1; t4 <= n - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 16) for (t10 = t8; t10 <= (n - 2 < t8 + 15 ? n - 2 : t8 + 15); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) B[t6][t10][t14] = 0.125 * (A[t6 + 1][t10][t14] - 2 * A[t6][t10][t14] + A[t6 - 1][t10][t14]) + 0.125 * (A[t6][t10 + 1][t14] - 2 * A[t6][t10][t14] + A[t6][t10 - 1][t14]) + 0.125 * (A[t6][t10][t14 + 1] - 2 * A[t6][t10][t14] + A[t6][t10][t14 - 1]) + A[t6][t10][t14]; for (t4 = 1; t4 <= n - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 16) for (t10 = t8; t10 <= (n - 2 < t8 + 15 ? n - 2 : t8 + 15); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) A[t6][t10][t14] = 0.125 * (B[t6 + 1][t10][t14] - 2 * B[t6][t10][t14] + B[t6 - 1][t10][t14]) + 0.125 * (B[t6][t10 + 1][t14] - 2 * B[t6][t10][t14] + B[t6][t10 - 1][t14]) + 0.125 * (B[t6][t10][t14 + 1] - 2 * B[t6][t10][t14] + B[t6][t10][t14 - 1]) + B[t6][t10][t14]; } }
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose void kernel_heat_3d(int tsteps, int n, double A[120 + 0][120 + 0][120 + 0], double B[120 + 0][120 + 0][120 + 0]) { int t14; int t12; int t10; int t8; int t6; int t4; int t2; for (t2 = 1; t2 <= 500; t2 += 1) { #pragma omp parallel for private(t4,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 16) for (t10 = t8; t10 <= (n - 2 < t8 + 15 ? n - 2 : t8 + 15); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) B[t6][t10][t14] = 0.125 * (A[t6 + 1][t10][t14] - 2 * A[t6][t10][t14] + A[t6 - 1][t10][t14]) + 0.125 * (A[t6][t10 + 1][t14] - 2 * A[t6][t10][t14] + A[t6][t10 - 1][t14]) + 0.125 * (A[t6][t10][t14 + 1] - 2 * A[t6][t10][t14] + A[t6][t10][t14 - 1]) + A[t6][t10][t14]; #pragma omp parallel for private(t4,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 16) for (t10 = t8; t10 <= (n - 2 < t8 + 15 ? n - 2 : t8 + 15); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) A[t6][t10][t14] = 0.125 * (B[t6 + 1][t10][t14] - 2 * B[t6][t10][t14] + B[t6 - 1][t10][t14]) + 0.125 * (B[t6][t10 + 1][t14] - 2 * B[t6][t10][t14] + B[t6][t10 - 1][t14]) + 0.125 * (B[t6][t10][t14 + 1] - 2 * B[t6][t10][t14] + B[t6][t10][t14 - 1]) + B[t6][t10][t14]; } }
nr_incore.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Incore version of non-relativistic integrals JK contraction * ic in CVHFic... is short for incore */ #include <stdlib.h> #include <math.h> //#include <omp.h> #include "config.h" #include "cvhf.h" #include "np_helper/np_helper.h" #include "fblas.h" /* * J */ void CVHFics8_ij_s2kl_o0(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ij; double vj_ij = 0; if (ic > jc) { dm_ij = dm[ic*nao+jc] + dm[jc*nao+ic]; } else if (ic == jc) { dm_ij = dm[ic*nao+ic]; } else { return; } for (i = 0, ij = 0; i < ic; i++) { for (j = 0; j < i; j++, ij++) { vj_ij += eri[ij] *(dm[i*nao+j]+dm[j*nao+i]); vj[i*nao+j] += eri[ij] * dm_ij; } vj_ij += eri[ij] * dm[i*nao+i]; vj[i*nao+i] += eri[ij] * dm_ij; ij++; } // i == ic for (j = 0; j < jc; j++, ij++) { vj_ij += eri[ij] *(dm[i*nao+j]+dm[j*nao+i]); vj[i*nao+j] += eri[ij] * dm_ij; } vj_ij += eri[ij] * dm_ij; vj[ic*nao+jc] += vj_ij; } void CVHFics4_ij_s2kl_o0(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ij; if (ic > jc) { dm_ij = dm[ic*nao+jc] + dm[jc*nao+ic]; } else if (ic == jc) { dm_ij = dm[ic*nao+ic]; } else { return; } for (i = 0, ij = 0; i < nao; i++) { for (j = 0; j <= i; j++, ij++) { vj[i*nao+j] += eri[ij] * dm_ij; } } } void CVHFics2kl_kl_s1ij_o0(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double vj_ij = 0; for (i = 0, ij = 0; i < nao; i++) { for (j = 0; j < i; j++, ij++) { vj_ij += eri[ij] *(dm[i*nao+j]+dm[j*nao+i]); } vj_ij += eri[ij] * dm[i*nao+i]; ij++; } vj[ic*nao+jc] += vj_ij; } /* * K */ void CVHFics8_jk_s1il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < ic; k++) { for (l = 0; l < k; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; vk[l*nao+jc] += eri[kl] * dm[k*nao+ic]; vk[k*nao+jc] += eri[kl] * dm[l*nao+ic]; vk[l*nao+ic] += eri[kl] * dm[k*nao+jc]; vk[k*nao+ic] += eri[kl] * dm[l*nao+jc]; } vk[jc*nao+k] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+k]; vk[k*nao+jc] += eri[kl] * dm[k*nao+ic]; vk[k*nao+ic] += eri[kl] * dm[k*nao+jc]; kl++; } k = ic; for (l = 0; l < jc; l++, kl++) { // l<k vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; vk[l*nao+jc] += eri[kl] * dm[k*nao+ic]; vk[k*nao+jc] += eri[kl] * dm[l*nao+ic]; vk[l*nao+ic] += eri[kl] * dm[k*nao+jc]; vk[k*nao+ic] += eri[kl] * dm[l*nao+jc]; } // ic = k, jc = l; vk[jc*nao+jc] += eri[kl] * dm[ic*nao+ic]; vk[ic*nao+jc] += eri[kl] * dm[jc*nao+ic]; vk[jc*nao+ic] += eri[kl] * dm[ic*nao+jc]; vk[ic*nao+ic] += eri[kl] * dm[jc*nao+jc]; } else if (ic == jc) { for (k = 0, kl = 0; k < ic; k++) { for (l = 0; l < k; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[ic*nao+l]; vk[l*nao+ic] += eri[kl] * dm[k*nao+ic]; vk[k*nao+ic] += eri[kl] * dm[l*nao+ic]; } vk[ic*nao+k] += eri[kl] * dm[ic*nao+k]; vk[k*nao+ic] += eri[kl] * dm[k*nao+ic]; kl++; } k = ic; for (l = 0; l < k; l++, kl++) { // l<k vk[ic*nao+l] += eri[kl] * dm[ic*nao+ic]; vk[l*nao+ic] += eri[kl] * dm[ic*nao+ic]; vk[ic*nao+ic] += eri[kl] * dm[ic*nao+l]; vk[ic*nao+ic] += eri[kl] * dm[l*nao+ic]; } // ic = jc = k = l vk[ic*nao+ic] += eri[kl] * dm[ic*nao+ic]; } } void CVHFics8_jk_s2il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l; //double vk_jj = 0; //double vk_ij = 0; if (ic > jc) { // k < jc for (k=0; k < jc; k++) { for (l = 0; l < k; l++) { vk[jc*nao+l] += eri[l] * dm[ic*nao+k]; vk[jc*nao+k] += eri[l] * dm[ic*nao+l]; vk[ic*nao+l] += eri[l] * dm[jc*nao+k]; vk[ic*nao+k] += eri[l] * dm[jc*nao+l]; } // l = k vk[jc*nao+k] += eri[k] * dm[ic*nao+k]; vk[ic*nao+k] += eri[k] * dm[jc*nao+k]; eri += k + 1; } // k = jc for (l = 0; l < k; l++) { vk[jc*nao+l ] += eri[l] * dm[ic*nao+jc]; vk[ic*nao+l ] += eri[l] * dm[jc*nao+jc]; vk[jc*nao+jc] += eri[l] *(dm[ic*nao+l] + dm[l*nao+ic]); vk[ic*nao+jc] += eri[l] * dm[jc*nao+l]; } // l = k = jc vk[jc*nao+jc] += eri[l] *(dm[ic*nao+jc] + dm[jc*nao+ic]); vk[ic*nao+jc] += eri[l] * dm[jc*nao+jc]; eri += k + 1; // k > jc for (k=jc+1; k < ic; k++) { // l < jc for (l = 0; l < jc; l++) { vk[jc*nao+l] += eri[l] * dm[ic*nao+k]; vk[ic*nao+l] += eri[l] * dm[jc*nao+k]; vk[ic*nao+k] += eri[l] * dm[jc*nao+l]; vk[k*nao+jc] += eri[l] * dm[l*nao+ic]; } // l = jc vk[jc*nao+jc] += eri[l] *(dm[ic*nao+k] + dm[k*nao+ic]); vk[ic*nao+jc] += eri[l] * dm[jc*nao+k]; vk[ic*nao+k] += eri[l] * dm[jc*nao+jc]; vk[k*nao+jc] += eri[l] * dm[jc*nao+ic]; //eri += jc+1; // l > jc for (l = jc+1; l < k; l++) { vk[ic*nao+l] += eri[l] * dm[jc*nao+k]; vk[ic*nao+k] += eri[l] * dm[jc*nao+l]; vk[l*nao+jc] += eri[l] * dm[k*nao+ic]; vk[k*nao+jc] += eri[l] * dm[l*nao+ic]; } // l = k vk[jc*nao+k] += eri[l] * dm[ic*nao+k]; vk[ic*nao+k] += eri[l] * dm[jc*nao+k]; vk[k*nao+jc] += eri[l] * dm[k*nao+ic]; eri += k + 1; } // k = ic for (l = 0; l < jc; l++) { vk[jc*nao+l] += eri[l] * dm[ic*nao+ic]; vk[ic*nao+l] += eri[l] * dm[jc*nao+ic]; vk[ic*nao+ic] += eri[l] *(dm[jc*nao+l] + dm[l*nao+jc]); vk[ic*nao+jc] += eri[l] * dm[l*nao+ic]; } // ic = k, jc = l; vk[jc*nao+jc] += eri[l] * dm[ic*nao+ic]; vk[ic*nao+jc] += eri[l] * dm[jc*nao+ic]; vk[ic*nao+ic] += eri[l] * dm[jc*nao+jc]; eri += jc + 1; } else if (ic == jc) { for (k = 0; k < ic-1; k+=2) { for (l = 0; l < k; l++) { vk[ic*nao+l] += eri[l] * dm[ic*nao+k]; vk[ic*nao+k] += eri[l] * dm[ic*nao+l]; vk[ic*nao+l ] += eri[l+k+1] * dm[ic*nao+k+1]; vk[ic*nao+k+1] += eri[l+k+1] * dm[ic*nao+l ]; } vk[ic*nao+k] += eri[k] * dm[ic*nao+k]; eri += k+1; vk[ic*nao+k ] += eri[k] * dm[ic*nao+k+1]; vk[ic*nao+k+1] += eri[k] * dm[ic*nao+k ]; vk[ic*nao+k+1] += eri[k+1] * dm[ic*nao+k+1]; eri += k+2; } for (; k < ic; k++) { for (l = 0; l < k; l++) { vk[ic*nao+l] += eri[l] * dm[ic*nao+k]; vk[ic*nao+k] += eri[l] * dm[ic*nao+l]; } vk[ic*nao+k] += eri[k] * dm[ic*nao+k]; eri += k+1; } for (l = 0; l < k; l++) { // l<k vk[ic*nao+l] += eri[l] * dm[ic*nao+ic]; vk[ic*nao+ic] += eri[l] *(dm[ic*nao+l] + dm[l*nao+ic]); } // ic = jc = k = l vk[ic*nao+ic] += eri[l] * dm[ic*nao+ic]; eri += k + 1; } } void CVHFics4_jk_s1il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } vk[jc*nao+k] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+k]; kl++; } } else if (ic == jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[ic*nao+l]; } vk[ic*nao+k] += eri[kl] * dm[ic*nao+k]; kl++; } } } void CVHFics4_il_s1jk_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_jk_s2il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k <= jc; k++) { for (l = 0; l < k; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } vk[jc*nao+k] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+k]; kl++; } for (k = jc+1; k <= ic; k++) { for (l = 0; l <= jc; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } for (l = jc+1; l < k; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } vk[ic*nao+k] += eri[kl] * dm[jc*nao+k]; kl++; } for (k = ic+1; k < nao; k++) { for (l = 0, kl = k*(k+1)/2; l <= jc; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; } for (l = jc+1; l <= ic; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; } } } else if (ic == jc) { for (k = 0, kl = 0; k <= ic; k++) { for (l = 0; l < k; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[ic*nao+l]; } vk[ic*nao+k] += eri[kl] * dm[ic*nao+k]; kl++; } for (k = ic+1; k < nao; k++) { for (l = 0, kl = k*(k+1)/2; l <= ic; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[ic*nao+k]; } } } } void CVHFics4_il_s2jk_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } /* * einsum ijkl,ij->(s2)kl * 8-fold symmetry for eri: i>=j,k>=l,ij>=kl * input address eri of the first element for pair ij=ic*(ic+1)/2+jc * i.e. ~ &eri_ao[ij*(ij+1)/2] * dm can be non-Hermitian, * output vk might not be Hermitian * * NOTE all _s2kl (nrs8_, nrs4_, nrs2kl_) assumes the tril part of eri * being stored in C-order *contiguously*. so call CVHFunpack_nrblock2tril * to generate eris */ void CVHFics8_ij_s2kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { CVHFics8_ij_s2kl_o0(eri, dm, vj, nao, ic, jc); } // tri_dm: fold upper triangular dm to lower triangle, // tri_dm[i*(i+1)/2+j] = dm[i*nao+j] + dm[j*nao+i] for i > j void CVHFics8_tridm_vj(double *eri, double *tri_dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ijc = tri_dm[ic*(ic+1)/2+jc]; double vj_ij = 0; const int INC1 = 1; int i1; for (i = 0, ij = 0; i < ic; i++) { i1 = i + 1; vj_ij += ddot_(&i1, eri+ij, &INC1, tri_dm+ij, &INC1); daxpy_(&i1, &dm_ijc, eri+ij, &INC1, vj+i*nao, &INC1); ij += i1; } // i == ic for (j = 0; j < jc; j++, ij++) { vj_ij += eri[ij] * tri_dm[ij]; vj[i*nao+j] += eri[ij] * dm_ijc; } vj_ij += eri[ij] * dm_ijc; vj[ic*nao+jc] += vj_ij; } void CVHFics8_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics8_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } /* * einsum ijkl,jk->(s2)il * output vk should be Hermitian */ void CVHFics8_jk_s2il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics8_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } /* * einsum ijkl,jk->il * 4-fold symmetry for eri: i>=j,k>=l */ void CVHFics4_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } /* * output vk should be Hermitian */ void CVHFics4_jk_s2il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_il_s2jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_ij_s2kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { CVHFics4_ij_s2kl_o0(eri, dm, vj, nao, ic, jc); } void CVHFics4_kl_s2ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { if (ic >= jc) { CVHFics2kl_kl_s1ij_o0(eri, dm, vj, nao, ic, jc); } } void CVHFics1_ij_s1kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i; double dm_ij = dm[ic*nao+jc]; for (i = 0; i < nao*nao; i++) { vj[i] += eri[i] * dm_ij; } } void CVHFics1_kl_s1ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { const int INC1 = 1; int nn = nao * nao; vj[ic*nao+jc] += ddot_(&nn, eri, &INC1, dm, &INC1); } void CVHFics1_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; } } } void CVHFics1_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; } } } void CVHFics2ij_ij_s1kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i; double dm_ij; if (ic > jc) { dm_ij = dm[ic*nao+jc] + dm[jc*nao+ic]; } else if (ic == jc) { dm_ij = dm[ic*nao+ic]; } else { return; } for (i = 0; i < nao*nao; i++) { vj[i] += eri[i] * dm_ij; } } void CVHFics2ij_kl_s2ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { if (ic < jc) { return; } CVHFics1_kl_s1ij(eri, dm, vj, nao, ic, jc); } void CVHFics2ij_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; } } } else if (ic == jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[ic*nao+k]; } } } } void CVHFics2ij_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } } } else if (ic == jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[ic*nao+k] += eri[kl] * dm[ic*nao+l]; } } } } void CVHFics2kl_ij_s2kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ij = dm[ic*nao+jc]; for (i = 0, ij = 0; i < nao; i++) { for (j = 0; j <= i; j++, ij++) { vj[i*nao+j] += eri[ij] * dm_ij; } } } void CVHFics2kl_kl_s1ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { CVHFics2kl_kl_s1ij_o0(eri, dm, vj, nao, ic, jc); } void CVHFics2kl_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } vk[ic*nao+k] += eri[kl] * dm[jc*nao+k]; kl++; } } void CVHFics2kl_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; } vk[jc*nao+k] += eri[kl] * dm[ic*nao+k]; kl++; } } /************************************************** * s8 8-fold symmetry: i>=j,k>=l,ij>=kl * s4 4-fold symmetry: i>=j,k>=l * s2ij 2-fold symmetry: i>=j * s2kl 2-fold symmetry: k>=l * s1 no permutation symmetry **************************************************/ typedef void (*FjkPtr)(double *eri, double *dm, double *vk, int nao, int ic, int jc); void CVHFnrs8_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk)()) { #pragma omp parallel default(none) \ shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t npair = nao*(nao+1)/2; size_t nn = nao * nao; double *v_priv = calloc(nn*n_dm, sizeof(double)); FjkPtr pf; double *pv; #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < npair; ij++) { i = (int)(sqrt(2*ij+.25) - .5 + 1e-7); j = ij - i*(i+1)/2; off = ij*(ij+1)/2; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic*nn; (*pf)(eri+off, dms[ic], pv, nao, i, j); } } #pragma omp critical { for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic*nn+i]; } } } free(v_priv); } } void CVHFnrs4_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk)()) { #pragma omp parallel default(none) \ shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t npair = nao*(nao+1)/2; size_t nn = nao * nao; double *v_priv = calloc(nn*n_dm, sizeof(double)); FjkPtr pf; double *pv; #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < npair; ij++) { i = (int)(sqrt(2*ij+.25) - .5 + 1e-7); j = ij - i*(i+1)/2; off = ij * npair; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic*nn; (*pf)(eri+off, dms[ic], pv, nao, i, j); } } #pragma omp critical { for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic*nn+i]; } } } free(v_priv); } } void CVHFnrs2ij_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk)()) { #pragma omp parallel default(none) \ shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t npair = nao*(nao+1)/2; size_t nn = nao * nao; double *v_priv = calloc(nn*n_dm, sizeof(double)); FjkPtr pf; double *pv; #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < npair; ij++) { i = (int)(sqrt(2*ij+.25) - .5 + 1e-7); j = ij - i*(i+1)/2; off = ij * nn; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic*nn; (*pf)(eri+off, dms[ic], pv, nao, i, j); } } #pragma omp critical { for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic*nn+i]; } } } free(v_priv); } } void CVHFnrs2kl_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk)()) { #pragma omp parallel default(none) \ shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t npair = nao*(nao+1)/2; size_t nn = nao * nao; double *v_priv = calloc(nn*n_dm, sizeof(double)); FjkPtr pf; double *pv; #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < nn; ij++) { i = ij / nao; j = ij - i * nao; off = ij * npair; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic*nn; (*pf)(eri+off, dms[ic], pv, nao, i, j); } } #pragma omp critical { for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic*nn+i]; } } } free(v_priv); } } void CVHFnrs1_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk)()) { #pragma omp parallel default(none) \ shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t nn = nao * nao; double *v_priv = calloc(nn*n_dm, sizeof(double)); FjkPtr pf; double *pv; #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < nn; ij++) { i = ij / nao; j = ij - i * nao; off = ij * nn; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic*nn; (*pf)(eri+off, dms[ic], pv, nao, i, j); } } #pragma omp critical { for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic*nn+i]; } } } free(v_priv); } }
#include <stdlib.h> #include <math.h> // #include <omp.h> #include "config.h" #include "cvhf.h" #include "np_helper/np_helper.h" #include "fblas.h" /* * J */ void CVHFics8_ij_s2kl_o0(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ij; double vj_ij = 0; if (ic > jc) { dm_ij = dm[ic * nao + jc] + dm[jc * nao + ic]; } else if (ic == jc) { dm_ij = dm[ic * nao + ic]; } else { return; } for (i = 0, ij = 0; i < ic; i++) { for (j = 0; j < i; j++, ij++) { vj_ij += eri[ij] * (dm[i * nao + j] + dm[j * nao + i]); vj[i * nao + j] += eri[ij] * dm_ij; } vj_ij += eri[ij] * dm[i * nao + i]; vj[i * nao + i] += eri[ij] * dm_ij; ij++; } //i == ic for (j = 0; j < jc; j++, ij++) { vj_ij += eri[ij] * (dm[i * nao + j] + dm[j * nao + i]); vj[i * nao + j] += eri[ij] * dm_ij; } vj_ij += eri[ij] * dm_ij; vj[ic * nao + jc] += vj_ij; } void CVHFics4_ij_s2kl_o0(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ij; if (ic > jc) { dm_ij = dm[ic * nao + jc] + dm[jc * nao + ic]; } else if (ic == jc) { dm_ij = dm[ic * nao + ic]; } else { return; } for (i = 0, ij = 0; i < nao; i++) { for (j = 0; j <= i; j++, ij++) { vj[i * nao + j] += eri[ij] * dm_ij; } } } void CVHFics2kl_kl_s1ij_o0(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double vj_ij = 0; for (i = 0, ij = 0; i < nao; i++) { for (j = 0; j < i; j++, ij++) { vj_ij += eri[ij] * (dm[i * nao + j] + dm[j * nao + i]); } vj_ij += eri[ij] * dm[i * nao + i]; ij++; } vj[ic * nao + jc] += vj_ij; } /* * K */ void CVHFics8_jk_s1il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < ic; k++) { for (l = 0; l < k; l++, kl++) { vk[jc * nao + l] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; vk[jc * nao + k] += eri[kl] * dm[ic * nao + l]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + l]; vk[l * nao + jc] += eri[kl] * dm[k * nao + ic]; vk[k * nao + jc] += eri[kl] * dm[l * nao + ic]; vk[l * nao + ic] += eri[kl] * dm[k * nao + jc]; vk[k * nao + ic] += eri[kl] * dm[l * nao + jc]; } vk[jc * nao + k] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + k]; vk[k * nao + jc] += eri[kl] * dm[k * nao + ic]; vk[k * nao + ic] += eri[kl] * dm[k * nao + jc]; kl++; } k = ic; for (l = 0; l < jc; l++, kl++) { //l < k vk[jc * nao + l] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; vk[jc * nao + k] += eri[kl] * dm[ic * nao + l]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + l]; vk[l * nao + jc] += eri[kl] * dm[k * nao + ic]; vk[k * nao + jc] += eri[kl] * dm[l * nao + ic]; vk[l * nao + ic] += eri[kl] * dm[k * nao + jc]; vk[k * nao + ic] += eri[kl] * dm[l * nao + jc]; } //ic = k, jc = l; vk[jc * nao + jc] += eri[kl] * dm[ic * nao + ic]; vk[ic * nao + jc] += eri[kl] * dm[jc * nao + ic]; vk[jc * nao + ic] += eri[kl] * dm[ic * nao + jc]; vk[ic * nao + ic] += eri[kl] * dm[jc * nao + jc]; } else if (ic == jc) { for (k = 0, kl = 0; k < ic; k++) { for (l = 0; l < k; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + k] += eri[kl] * dm[ic * nao + l]; vk[l * nao + ic] += eri[kl] * dm[k * nao + ic]; vk[k * nao + ic] += eri[kl] * dm[l * nao + ic]; } vk[ic * nao + k] += eri[kl] * dm[ic * nao + k]; vk[k * nao + ic] += eri[kl] * dm[k * nao + ic]; kl++; } k = ic; for (l = 0; l < k; l++, kl++) { //l < k vk[ic * nao + l] += eri[kl] * dm[ic * nao + ic]; vk[l * nao + ic] += eri[kl] * dm[ic * nao + ic]; vk[ic * nao + ic] += eri[kl] * dm[ic * nao + l]; vk[ic * nao + ic] += eri[kl] * dm[l * nao + ic]; } //ic = jc = k = l vk[ic * nao + ic] += eri[kl] * dm[ic * nao + ic]; } } void CVHFics8_jk_s2il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l; //double vk_jj = 0; //double vk_ij = 0; if (ic > jc) { //k < jc for (k = 0; k < jc; k++) { for (l = 0; l < k; l++) { vk[jc * nao + l] += eri[l] * dm[ic * nao + k]; vk[jc * nao + k] += eri[l] * dm[ic * nao + l]; vk[ic * nao + l] += eri[l] * dm[jc * nao + k]; vk[ic * nao + k] += eri[l] * dm[jc * nao + l]; } //l = k vk[jc * nao + k] += eri[k] * dm[ic * nao + k]; vk[ic * nao + k] += eri[k] * dm[jc * nao + k]; eri += k + 1; } //k = jc for (l = 0; l < k; l++) { vk[jc * nao + l] += eri[l] * dm[ic * nao + jc]; vk[ic * nao + l] += eri[l] * dm[jc * nao + jc]; vk[jc * nao + jc] += eri[l] * (dm[ic * nao + l] + dm[l * nao + ic]); vk[ic * nao + jc] += eri[l] * dm[jc * nao + l]; } //l = k = jc vk[jc * nao + jc] += eri[l] * (dm[ic * nao + jc] + dm[jc * nao + ic]); vk[ic * nao + jc] += eri[l] * dm[jc * nao + jc]; eri += k + 1; //k > jc for (k = jc + 1; k < ic; k++) { //l < jc for (l = 0; l < jc; l++) { vk[jc * nao + l] += eri[l] * dm[ic * nao + k]; vk[ic * nao + l] += eri[l] * dm[jc * nao + k]; vk[ic * nao + k] += eri[l] * dm[jc * nao + l]; vk[k * nao + jc] += eri[l] * dm[l * nao + ic]; } //l = jc vk[jc * nao + jc] += eri[l] * (dm[ic * nao + k] + dm[k * nao + ic]); vk[ic * nao + jc] += eri[l] * dm[jc * nao + k]; vk[ic * nao + k] += eri[l] * dm[jc * nao + jc]; vk[k * nao + jc] += eri[l] * dm[jc * nao + ic]; //eri += jc + 1; //l > jc for (l = jc + 1; l < k; l++) { vk[ic * nao + l] += eri[l] * dm[jc * nao + k]; vk[ic * nao + k] += eri[l] * dm[jc * nao + l]; vk[l * nao + jc] += eri[l] * dm[k * nao + ic]; vk[k * nao + jc] += eri[l] * dm[l * nao + ic]; } //l = k vk[jc * nao + k] += eri[l] * dm[ic * nao + k]; vk[ic * nao + k] += eri[l] * dm[jc * nao + k]; vk[k * nao + jc] += eri[l] * dm[k * nao + ic]; eri += k + 1; } //k = ic for (l = 0; l < jc; l++) { vk[jc * nao + l] += eri[l] * dm[ic * nao + ic]; vk[ic * nao + l] += eri[l] * dm[jc * nao + ic]; vk[ic * nao + ic] += eri[l] * (dm[jc * nao + l] + dm[l * nao + jc]); vk[ic * nao + jc] += eri[l] * dm[l * nao + ic]; } //ic = k, jc = l; vk[jc * nao + jc] += eri[l] * dm[ic * nao + ic]; vk[ic * nao + jc] += eri[l] * dm[jc * nao + ic]; vk[ic * nao + ic] += eri[l] * dm[jc * nao + jc]; eri += jc + 1; } else if (ic == jc) { for (k = 0; k < ic - 1; k += 2) { for (l = 0; l < k; l++) { vk[ic * nao + l] += eri[l] * dm[ic * nao + k]; vk[ic * nao + k] += eri[l] * dm[ic * nao + l]; vk[ic * nao + l] += eri[l + k + 1] * dm[ic * nao + k + 1]; vk[ic * nao + k + 1] += eri[l + k + 1] * dm[ic * nao + l]; } vk[ic * nao + k] += eri[k] * dm[ic * nao + k]; eri += k + 1; vk[ic * nao + k] += eri[k] * dm[ic * nao + k + 1]; vk[ic * nao + k + 1] += eri[k] * dm[ic * nao + k]; vk[ic * nao + k + 1] += eri[k + 1] * dm[ic * nao + k + 1]; eri += k + 2; } for (; k < ic; k++) { for (l = 0; l < k; l++) { vk[ic * nao + l] += eri[l] * dm[ic * nao + k]; vk[ic * nao + k] += eri[l] * dm[ic * nao + l]; } vk[ic * nao + k] += eri[k] * dm[ic * nao + k]; eri += k + 1; } for (l = 0; l < k; l++) { //l < k vk[ic * nao + l] += eri[l] * dm[ic * nao + ic]; vk[ic * nao + ic] += eri[l] * (dm[ic * nao + l] + dm[l * nao + ic]); } //ic = jc = k = l vk[ic * nao + ic] += eri[l] * dm[ic * nao + ic]; eri += k + 1; } } void CVHFics4_jk_s1il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[jc * nao + l] += eri[kl] * dm[ic * nao + k]; vk[jc * nao + k] += eri[kl] * dm[ic * nao + l]; vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + l]; } vk[jc * nao + k] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + k]; kl++; } } else if (ic == jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + k] += eri[kl] * dm[ic * nao + l]; } vk[ic * nao + k] += eri[kl] * dm[ic * nao + k]; kl++; } } } void CVHFics4_il_s1jk_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_jk_s2il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k <= jc; k++) { for (l = 0; l < k; l++, kl++) { vk[jc * nao + l] += eri[kl] * dm[ic * nao + k]; vk[jc * nao + k] += eri[kl] * dm[ic * nao + l]; vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + l]; } vk[jc * nao + k] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + k]; kl++; } for (k = jc + 1; k <= ic; k++) { for (l = 0; l <= jc; l++, kl++) { vk[jc * nao + l] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + l]; } for (l = jc + 1; l < k; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + l]; } vk[ic * nao + k] += eri[kl] * dm[jc * nao + k]; kl++; } for (k = ic + 1; k < nao; k++) { for (l = 0, kl = k * (k + 1) / 2; l <= jc; l++, kl++) { vk[jc * nao + l] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; } for (l = jc + 1; l <= ic; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; } } } else if (ic == jc) { for (k = 0, kl = 0; k <= ic; k++) { for (l = 0; l < k; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + k] += eri[kl] * dm[ic * nao + l]; } vk[ic * nao + k] += eri[kl] * dm[ic * nao + k]; kl++; } for (k = ic + 1; k < nao; k++) { for (l = 0, kl = k * (k + 1) / 2; l <= ic; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[ic * nao + k]; } } } } void CVHFics4_il_s2jk_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } /* * einsum ijkl,ij->(s2)kl 8-fold symmetry for eri: i>=j,k>=l,ij>=kl input * address eri of the first element for pair ij=ic*(ic+1)/2+jc i.e. ~ * &eri_ao[ij*(ij+1)/2] dm can be non-Hermitian, output vk might not be * Hermitian * * NOTE all _s2kl (nrs8_, nrs4_, nrs2kl_) assumes the tril part of eri being * stored in C-order *contiguously*. so call CVHFunpack_nrblock2tril to * generate eris */ void CVHFics8_ij_s2kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { CVHFics8_ij_s2kl_o0(eri, dm, vj, nao, ic, jc); } //tri_dm:fold upper triangular dm to lower triangle, //tri_dm[i * (i + 1) / 2 + j] = dm[i * nao + j] + dm[j * nao + i] for i >j void CVHFics8_tridm_vj(double *eri, double *tri_dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ijc = tri_dm[ic * (ic + 1) / 2 + jc]; double vj_ij = 0; const int INC1 = 1; int i1; for (i = 0, ij = 0; i < ic; i++) { i1 = i + 1; vj_ij += ddot_(&i1, eri + ij, &INC1, tri_dm + ij, &INC1); daxpy_(&i1, &dm_ijc, eri + ij, &INC1, vj + i * nao, &INC1); ij += i1; } //i == ic for (j = 0; j < jc; j++, ij++) { vj_ij += eri[ij] * tri_dm[ij]; vj[i * nao + j] += eri[ij] * dm_ijc; } vj_ij += eri[ij] * dm_ijc; vj[ic * nao + jc] += vj_ij; } void CVHFics8_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics8_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } /* * einsum ijkl,jk->(s2)il output vk should be Hermitian */ void CVHFics8_jk_s2il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics8_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } /* * einsum ijkl,jk->il 4-fold symmetry for eri: i>=j,k>=l */ void CVHFics4_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } /* * output vk should be Hermitian */ void CVHFics4_jk_s2il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_il_s2jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_ij_s2kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { CVHFics4_ij_s2kl_o0(eri, dm, vj, nao, ic, jc); } void CVHFics4_kl_s2ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { if (ic >= jc) { CVHFics2kl_kl_s1ij_o0(eri, dm, vj, nao, ic, jc); } } void CVHFics1_ij_s1kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i; double dm_ij = dm[ic * nao + jc]; for (i = 0; i < nao * nao; i++) { vj[i] += eri[i] * dm_ij; } } void CVHFics1_kl_s1ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { const int INC1 = 1; int nn = nao * nao; vj[ic * nao + jc] += ddot_(&nn, eri, &INC1, dm, &INC1); } void CVHFics1_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; } } } void CVHFics1_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[jc * nao + k] += eri[kl] * dm[ic * nao + l]; } } } void CVHFics2ij_ij_s1kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i; double dm_ij; if (ic > jc) { dm_ij = dm[ic * nao + jc] + dm[jc * nao + ic]; } else if (ic == jc) { dm_ij = dm[ic * nao + ic]; } else { return; } for (i = 0; i < nao * nao; i++) { vj[i] += eri[i] * dm_ij; } } void CVHFics2ij_kl_s2ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { if (ic < jc) { return; } CVHFics1_kl_s1ij(eri, dm, vj, nao, ic, jc); } void CVHFics2ij_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[jc * nao + l] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; } } } else if (ic == jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[ic * nao + k]; } } } } void CVHFics2ij_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[jc * nao + k] += eri[kl] * dm[ic * nao + l]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + l]; } } } else if (ic == jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[ic * nao + k] += eri[kl] * dm[ic * nao + l]; } } } } void CVHFics2kl_ij_s2kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ij = dm[ic * nao + jc]; for (i = 0, ij = 0; i < nao; i++) { for (j = 0; j <= i; j++, ij++) { vj[i * nao + j] += eri[ij] * dm_ij; } } } void CVHFics2kl_kl_s1ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { CVHFics2kl_kl_s1ij_o0(eri, dm, vj, nao, ic, jc); } void CVHFics2kl_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + l]; } vk[ic * nao + k] += eri[kl] * dm[jc * nao + k]; kl++; } } void CVHFics2kl_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[jc * nao + l] += eri[kl] * dm[ic * nao + k]; vk[jc * nao + k] += eri[kl] * dm[ic * nao + l]; } vk[jc * nao + k] += eri[kl] * dm[ic * nao + k]; kl++; } } /************************************************** * s8 8-fold symmetry: i>=j,k>=l,ij>=kl * s4 4-fold symmetry: i>=j,k>=l * s2ij 2-fold symmetry: i>=j * s2kl 2-fold symmetry: k>=l * s1 no permutation symmetry **************************************************/ typedef void (*FjkPtr) (double *eri, double *dm, double *vk, int nao, int ic, int jc); void CVHFnrs8_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk) ()) { shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t npair = nao * (nao + 1) / 2; size_t nn = nao * nao; double *v_priv = calloc(nn * n_dm, sizeof(double)); FjkPtr pf; double *pv; for (ij = 0; ij < npair; ij++) { i = (int)(sqrt(2 * ij + .25) - .5 + 1e-7); j = ij - i * (i + 1) / 2; off = ij * (ij + 1) / 2; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic * nn; (*pf) (eri + off, dms[ic], pv, nao, i, j); } } for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic * nn + i]; } } free(v_priv); } } void CVHFnrs4_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk) ()) { shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t npair = nao * (nao + 1) / 2; size_t nn = nao * nao; double *v_priv = calloc(nn * n_dm, sizeof(double)); FjkPtr pf; double *pv; for (ij = 0; ij < npair; ij++) { i = (int)(sqrt(2 * ij + .25) - .5 + 1e-7); j = ij - i * (i + 1) / 2; off = ij * npair; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic * nn; (*pf) (eri + off, dms[ic], pv, nao, i, j); } } for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic * nn + i]; } } free(v_priv); } } void CVHFnrs2ij_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk) ()) { shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t npair = nao * (nao + 1) / 2; size_t nn = nao * nao; double *v_priv = calloc(nn * n_dm, sizeof(double)); FjkPtr pf; double *pv; for (ij = 0; ij < npair; ij++) { i = (int)(sqrt(2 * ij + .25) - .5 + 1e-7); j = ij - i * (i + 1) / 2; off = ij * nn; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic * nn; (*pf) (eri + off, dms[ic], pv, nao, i, j); } } for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic * nn + i]; } } free(v_priv); } } void CVHFnrs2kl_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk) ()) { shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t npair = nao * (nao + 1) / 2; size_t nn = nao * nao; double *v_priv = calloc(nn * n_dm, sizeof(double)); FjkPtr pf; double *pv; for (ij = 0; ij < nn; ij++) { i = ij / nao; j = ij - i * nao; off = ij * npair; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic * nn; (*pf) (eri + off, dms[ic], pv, nao, i, j); } } for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic * nn + i]; } } free(v_priv); } } void CVHFnrs1_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk) ()) { shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t nn = nao * nao; double *v_priv = calloc(nn * n_dm, sizeof(double)); FjkPtr pf; double *pv; for (ij = 0; ij < nn; ij++) { i = ij / nao; j = ij - i * nao; off = ij * nn; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic * nn; (*pf) (eri + off, dms[ic], pv, nao, i, j); } } for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic * nn + i]; } } free(v_priv); } }
#include <stdlib.h> #include <math.h> // #include <omp.h> #include "config.h" #include "cvhf.h" #include "np_helper/np_helper.h" #include "fblas.h" /* * J */ void CVHFics8_ij_s2kl_o0(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ij; double vj_ij = 0; if (ic > jc) { dm_ij = dm[ic * nao + jc] + dm[jc * nao + ic]; } else if (ic == jc) { dm_ij = dm[ic * nao + ic]; } else { return; } for (i = 0, ij = 0; i < ic; i++) { for (j = 0; j < i; j++, ij++) { vj_ij += eri[ij] * (dm[i * nao + j] + dm[j * nao + i]); vj[i * nao + j] += eri[ij] * dm_ij; } vj_ij += eri[ij] * dm[i * nao + i]; vj[i * nao + i] += eri[ij] * dm_ij; ij++; } //i == ic for (j = 0; j < jc; j++, ij++) { vj_ij += eri[ij] * (dm[i * nao + j] + dm[j * nao + i]); vj[i * nao + j] += eri[ij] * dm_ij; } vj_ij += eri[ij] * dm_ij; vj[ic * nao + jc] += vj_ij; } void CVHFics4_ij_s2kl_o0(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ij; if (ic > jc) { dm_ij = dm[ic * nao + jc] + dm[jc * nao + ic]; } else if (ic == jc) { dm_ij = dm[ic * nao + ic]; } else { return; } for (i = 0, ij = 0; i < nao; i++) { for (j = 0; j <= i; j++, ij++) { vj[i * nao + j] += eri[ij] * dm_ij; } } } void CVHFics2kl_kl_s1ij_o0(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double vj_ij = 0; for (i = 0, ij = 0; i < nao; i++) { for (j = 0; j < i; j++, ij++) { vj_ij += eri[ij] * (dm[i * nao + j] + dm[j * nao + i]); } vj_ij += eri[ij] * dm[i * nao + i]; ij++; } vj[ic * nao + jc] += vj_ij; } /* * K */ void CVHFics8_jk_s1il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < ic; k++) { for (l = 0; l < k; l++, kl++) { vk[jc * nao + l] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; vk[jc * nao + k] += eri[kl] * dm[ic * nao + l]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + l]; vk[l * nao + jc] += eri[kl] * dm[k * nao + ic]; vk[k * nao + jc] += eri[kl] * dm[l * nao + ic]; vk[l * nao + ic] += eri[kl] * dm[k * nao + jc]; vk[k * nao + ic] += eri[kl] * dm[l * nao + jc]; } vk[jc * nao + k] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + k]; vk[k * nao + jc] += eri[kl] * dm[k * nao + ic]; vk[k * nao + ic] += eri[kl] * dm[k * nao + jc]; kl++; } k = ic; for (l = 0; l < jc; l++, kl++) { //l < k vk[jc * nao + l] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; vk[jc * nao + k] += eri[kl] * dm[ic * nao + l]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + l]; vk[l * nao + jc] += eri[kl] * dm[k * nao + ic]; vk[k * nao + jc] += eri[kl] * dm[l * nao + ic]; vk[l * nao + ic] += eri[kl] * dm[k * nao + jc]; vk[k * nao + ic] += eri[kl] * dm[l * nao + jc]; } //ic = k, jc = l; vk[jc * nao + jc] += eri[kl] * dm[ic * nao + ic]; vk[ic * nao + jc] += eri[kl] * dm[jc * nao + ic]; vk[jc * nao + ic] += eri[kl] * dm[ic * nao + jc]; vk[ic * nao + ic] += eri[kl] * dm[jc * nao + jc]; } else if (ic == jc) { for (k = 0, kl = 0; k < ic; k++) { for (l = 0; l < k; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + k] += eri[kl] * dm[ic * nao + l]; vk[l * nao + ic] += eri[kl] * dm[k * nao + ic]; vk[k * nao + ic] += eri[kl] * dm[l * nao + ic]; } vk[ic * nao + k] += eri[kl] * dm[ic * nao + k]; vk[k * nao + ic] += eri[kl] * dm[k * nao + ic]; kl++; } k = ic; for (l = 0; l < k; l++, kl++) { //l < k vk[ic * nao + l] += eri[kl] * dm[ic * nao + ic]; vk[l * nao + ic] += eri[kl] * dm[ic * nao + ic]; vk[ic * nao + ic] += eri[kl] * dm[ic * nao + l]; vk[ic * nao + ic] += eri[kl] * dm[l * nao + ic]; } //ic = jc = k = l vk[ic * nao + ic] += eri[kl] * dm[ic * nao + ic]; } } void CVHFics8_jk_s2il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l; //double vk_jj = 0; //double vk_ij = 0; if (ic > jc) { //k < jc for (k = 0; k < jc; k++) { for (l = 0; l < k; l++) { vk[jc * nao + l] += eri[l] * dm[ic * nao + k]; vk[jc * nao + k] += eri[l] * dm[ic * nao + l]; vk[ic * nao + l] += eri[l] * dm[jc * nao + k]; vk[ic * nao + k] += eri[l] * dm[jc * nao + l]; } //l = k vk[jc * nao + k] += eri[k] * dm[ic * nao + k]; vk[ic * nao + k] += eri[k] * dm[jc * nao + k]; eri += k + 1; } //k = jc for (l = 0; l < k; l++) { vk[jc * nao + l] += eri[l] * dm[ic * nao + jc]; vk[ic * nao + l] += eri[l] * dm[jc * nao + jc]; vk[jc * nao + jc] += eri[l] * (dm[ic * nao + l] + dm[l * nao + ic]); vk[ic * nao + jc] += eri[l] * dm[jc * nao + l]; } //l = k = jc vk[jc * nao + jc] += eri[l] * (dm[ic * nao + jc] + dm[jc * nao + ic]); vk[ic * nao + jc] += eri[l] * dm[jc * nao + jc]; eri += k + 1; //k > jc for (k = jc + 1; k < ic; k++) { //l < jc for (l = 0; l < jc; l++) { vk[jc * nao + l] += eri[l] * dm[ic * nao + k]; vk[ic * nao + l] += eri[l] * dm[jc * nao + k]; vk[ic * nao + k] += eri[l] * dm[jc * nao + l]; vk[k * nao + jc] += eri[l] * dm[l * nao + ic]; } //l = jc vk[jc * nao + jc] += eri[l] * (dm[ic * nao + k] + dm[k * nao + ic]); vk[ic * nao + jc] += eri[l] * dm[jc * nao + k]; vk[ic * nao + k] += eri[l] * dm[jc * nao + jc]; vk[k * nao + jc] += eri[l] * dm[jc * nao + ic]; //eri += jc + 1; //l > jc for (l = jc + 1; l < k; l++) { vk[ic * nao + l] += eri[l] * dm[jc * nao + k]; vk[ic * nao + k] += eri[l] * dm[jc * nao + l]; vk[l * nao + jc] += eri[l] * dm[k * nao + ic]; vk[k * nao + jc] += eri[l] * dm[l * nao + ic]; } //l = k vk[jc * nao + k] += eri[l] * dm[ic * nao + k]; vk[ic * nao + k] += eri[l] * dm[jc * nao + k]; vk[k * nao + jc] += eri[l] * dm[k * nao + ic]; eri += k + 1; } //k = ic for (l = 0; l < jc; l++) { vk[jc * nao + l] += eri[l] * dm[ic * nao + ic]; vk[ic * nao + l] += eri[l] * dm[jc * nao + ic]; vk[ic * nao + ic] += eri[l] * (dm[jc * nao + l] + dm[l * nao + jc]); vk[ic * nao + jc] += eri[l] * dm[l * nao + ic]; } //ic = k, jc = l; vk[jc * nao + jc] += eri[l] * dm[ic * nao + ic]; vk[ic * nao + jc] += eri[l] * dm[jc * nao + ic]; vk[ic * nao + ic] += eri[l] * dm[jc * nao + jc]; eri += jc + 1; } else if (ic == jc) { for (k = 0; k < ic - 1; k += 2) { for (l = 0; l < k; l++) { vk[ic * nao + l] += eri[l] * dm[ic * nao + k]; vk[ic * nao + k] += eri[l] * dm[ic * nao + l]; vk[ic * nao + l] += eri[l + k + 1] * dm[ic * nao + k + 1]; vk[ic * nao + k + 1] += eri[l + k + 1] * dm[ic * nao + l]; } vk[ic * nao + k] += eri[k] * dm[ic * nao + k]; eri += k + 1; vk[ic * nao + k] += eri[k] * dm[ic * nao + k + 1]; vk[ic * nao + k + 1] += eri[k] * dm[ic * nao + k]; vk[ic * nao + k + 1] += eri[k + 1] * dm[ic * nao + k + 1]; eri += k + 2; } for (; k < ic; k++) { for (l = 0; l < k; l++) { vk[ic * nao + l] += eri[l] * dm[ic * nao + k]; vk[ic * nao + k] += eri[l] * dm[ic * nao + l]; } vk[ic * nao + k] += eri[k] * dm[ic * nao + k]; eri += k + 1; } for (l = 0; l < k; l++) { //l < k vk[ic * nao + l] += eri[l] * dm[ic * nao + ic]; vk[ic * nao + ic] += eri[l] * (dm[ic * nao + l] + dm[l * nao + ic]); } //ic = jc = k = l vk[ic * nao + ic] += eri[l] * dm[ic * nao + ic]; eri += k + 1; } } void CVHFics4_jk_s1il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[jc * nao + l] += eri[kl] * dm[ic * nao + k]; vk[jc * nao + k] += eri[kl] * dm[ic * nao + l]; vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + l]; } vk[jc * nao + k] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + k]; kl++; } } else if (ic == jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + k] += eri[kl] * dm[ic * nao + l]; } vk[ic * nao + k] += eri[kl] * dm[ic * nao + k]; kl++; } } } void CVHFics4_il_s1jk_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_jk_s2il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k <= jc; k++) { for (l = 0; l < k; l++, kl++) { vk[jc * nao + l] += eri[kl] * dm[ic * nao + k]; vk[jc * nao + k] += eri[kl] * dm[ic * nao + l]; vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + l]; } vk[jc * nao + k] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + k]; kl++; } for (k = jc + 1; k <= ic; k++) { for (l = 0; l <= jc; l++, kl++) { vk[jc * nao + l] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + l]; } for (l = jc + 1; l < k; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + l]; } vk[ic * nao + k] += eri[kl] * dm[jc * nao + k]; kl++; } for (k = ic + 1; k < nao; k++) { for (l = 0, kl = k * (k + 1) / 2; l <= jc; l++, kl++) { vk[jc * nao + l] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; } for (l = jc + 1; l <= ic; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; } } } else if (ic == jc) { for (k = 0, kl = 0; k <= ic; k++) { for (l = 0; l < k; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + k] += eri[kl] * dm[ic * nao + l]; } vk[ic * nao + k] += eri[kl] * dm[ic * nao + k]; kl++; } for (k = ic + 1; k < nao; k++) { for (l = 0, kl = k * (k + 1) / 2; l <= ic; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[ic * nao + k]; } } } } void CVHFics4_il_s2jk_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } /* * einsum ijkl,ij->(s2)kl 8-fold symmetry for eri: i>=j,k>=l,ij>=kl input * address eri of the first element for pair ij=ic*(ic+1)/2+jc i.e. ~ * &eri_ao[ij*(ij+1)/2] dm can be non-Hermitian, output vk might not be * Hermitian * * NOTE all _s2kl (nrs8_, nrs4_, nrs2kl_) assumes the tril part of eri being * stored in C-order *contiguously*. so call CVHFunpack_nrblock2tril to * generate eris */ void CVHFics8_ij_s2kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { CVHFics8_ij_s2kl_o0(eri, dm, vj, nao, ic, jc); } //tri_dm:fold upper triangular dm to lower triangle, //tri_dm[i * (i + 1) / 2 + j] = dm[i * nao + j] + dm[j * nao + i] for i >j void CVHFics8_tridm_vj(double *eri, double *tri_dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ijc = tri_dm[ic * (ic + 1) / 2 + jc]; double vj_ij = 0; const int INC1 = 1; int i1; for (i = 0, ij = 0; i < ic; i++) { i1 = i + 1; vj_ij += ddot_(&i1, eri + ij, &INC1, tri_dm + ij, &INC1); daxpy_(&i1, &dm_ijc, eri + ij, &INC1, vj + i * nao, &INC1); ij += i1; } //i == ic for (j = 0; j < jc; j++, ij++) { vj_ij += eri[ij] * tri_dm[ij]; vj[i * nao + j] += eri[ij] * dm_ijc; } vj_ij += eri[ij] * dm_ijc; vj[ic * nao + jc] += vj_ij; } void CVHFics8_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics8_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } /* * einsum ijkl,jk->(s2)il output vk should be Hermitian */ void CVHFics8_jk_s2il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics8_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } /* * einsum ijkl,jk->il 4-fold symmetry for eri: i>=j,k>=l */ void CVHFics4_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } /* * output vk should be Hermitian */ void CVHFics4_jk_s2il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_il_s2jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_ij_s2kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { CVHFics4_ij_s2kl_o0(eri, dm, vj, nao, ic, jc); } void CVHFics4_kl_s2ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { if (ic >= jc) { CVHFics2kl_kl_s1ij_o0(eri, dm, vj, nao, ic, jc); } } void CVHFics1_ij_s1kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i; double dm_ij = dm[ic * nao + jc]; for (i = 0; i < nao * nao; i++) { vj[i] += eri[i] * dm_ij; } } void CVHFics1_kl_s1ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { const int INC1 = 1; int nn = nao * nao; vj[ic * nao + jc] += ddot_(&nn, eri, &INC1, dm, &INC1); } void CVHFics1_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; } } } void CVHFics1_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[jc * nao + k] += eri[kl] * dm[ic * nao + l]; } } } void CVHFics2ij_ij_s1kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i; double dm_ij; if (ic > jc) { dm_ij = dm[ic * nao + jc] + dm[jc * nao + ic]; } else if (ic == jc) { dm_ij = dm[ic * nao + ic]; } else { return; } for (i = 0; i < nao * nao; i++) { vj[i] += eri[i] * dm_ij; } } void CVHFics2ij_kl_s2ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { if (ic < jc) { return; } CVHFics1_kl_s1ij(eri, dm, vj, nao, ic, jc); } void CVHFics2ij_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[jc * nao + l] += eri[kl] * dm[ic * nao + k]; vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; } } } else if (ic == jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[ic * nao + k]; } } } } void CVHFics2ij_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[jc * nao + k] += eri[kl] * dm[ic * nao + l]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + l]; } } } else if (ic == jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[ic * nao + k] += eri[kl] * dm[ic * nao + l]; } } } } void CVHFics2kl_ij_s2kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ij = dm[ic * nao + jc]; for (i = 0, ij = 0; i < nao; i++) { for (j = 0; j <= i; j++, ij++) { vj[i * nao + j] += eri[ij] * dm_ij; } } } void CVHFics2kl_kl_s1ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { CVHFics2kl_kl_s1ij_o0(eri, dm, vj, nao, ic, jc); } void CVHFics2kl_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[ic * nao + l] += eri[kl] * dm[jc * nao + k]; vk[ic * nao + k] += eri[kl] * dm[jc * nao + l]; } vk[ic * nao + k] += eri[kl] * dm[jc * nao + k]; kl++; } } void CVHFics2kl_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[jc * nao + l] += eri[kl] * dm[ic * nao + k]; vk[jc * nao + k] += eri[kl] * dm[ic * nao + l]; } vk[jc * nao + k] += eri[kl] * dm[ic * nao + k]; kl++; } } /************************************************** * s8 8-fold symmetry: i>=j,k>=l,ij>=kl * s4 4-fold symmetry: i>=j,k>=l * s2ij 2-fold symmetry: i>=j * s2kl 2-fold symmetry: k>=l * s1 no permutation symmetry **************************************************/ typedef void (*FjkPtr) (double *eri, double *dm, double *vk, int nao, int ic, int jc); void CVHFnrs8_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk) ()) { #pragma omp parallel default(none) \ shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t npair = nao * (nao + 1) / 2; size_t nn = nao * nao; double *v_priv = calloc(nn * n_dm, sizeof(double)); FjkPtr pf; double *pv; #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < npair; ij++) { i = (int)(sqrt(2 * ij + .25) - .5 + 1e-7); j = ij - i * (i + 1) / 2; off = ij * (ij + 1) / 2; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic * nn; (*pf) (eri + off, dms[ic], pv, nao, i, j); } } #pragma omp critical { for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic * nn + i]; } } } free(v_priv); } } void CVHFnrs4_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk) ()) { #pragma omp parallel default(none) \ shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t npair = nao * (nao + 1) / 2; size_t nn = nao * nao; double *v_priv = calloc(nn * n_dm, sizeof(double)); FjkPtr pf; double *pv; #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < npair; ij++) { i = (int)(sqrt(2 * ij + .25) - .5 + 1e-7); j = ij - i * (i + 1) / 2; off = ij * npair; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic * nn; (*pf) (eri + off, dms[ic], pv, nao, i, j); } } #pragma omp critical { for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic * nn + i]; } } } free(v_priv); } } void CVHFnrs2ij_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk) ()) { #pragma omp parallel default(none) \ shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t npair = nao * (nao + 1) / 2; size_t nn = nao * nao; double *v_priv = calloc(nn * n_dm, sizeof(double)); FjkPtr pf; double *pv; #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < npair; ij++) { i = (int)(sqrt(2 * ij + .25) - .5 + 1e-7); j = ij - i * (i + 1) / 2; off = ij * nn; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic * nn; (*pf) (eri + off, dms[ic], pv, nao, i, j); } } #pragma omp critical { for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic * nn + i]; } } } free(v_priv); } } void CVHFnrs2kl_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk) ()) { #pragma omp parallel default(none) \ shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t npair = nao * (nao + 1) / 2; size_t nn = nao * nao; double *v_priv = calloc(nn * n_dm, sizeof(double)); FjkPtr pf; double *pv; #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < nn; ij++) { i = ij / nao; j = ij - i * nao; off = ij * npair; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic * nn; (*pf) (eri + off, dms[ic], pv, nao, i, j); } } #pragma omp critical { for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic * nn + i]; } } } free(v_priv); } } void CVHFnrs1_incore_drv(double *eri, double **dms, double **vjk, int n_dm, int nao, void (**fjk) ()) { #pragma omp parallel default(none) \ shared(eri, dms, vjk, n_dm, nao, fjk) { int i, j, ic; size_t ij, off; size_t nn = nao * nao; double *v_priv = calloc(nn * n_dm, sizeof(double)); FjkPtr pf; double *pv; #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < nn; ij++) { i = ij / nao; j = ij - i * nao; off = ij * nn; for (ic = 0; ic < n_dm; ic++) { pf = fjk[ic]; pv = v_priv + ic * nn; (*pf) (eri + off, dms[ic], pv, nao, i, j); } } #pragma omp critical { for (ic = 0; ic < n_dm; ic++) { pv = vjk[ic]; for (i = 0; i < nn; i++) { pv[i] += v_priv[ic * nn + i]; } } } free(v_priv); } }
ep.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - EP This benchmark is an OpenMP C version of the NPB EP code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to [email protected] Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Author: P. O. Frederickson D. H. Bailey A. C. Woo OpenMP C version: S. Satoh --------------------------------------------------------------------*/ //#define DEBUG_ON_x //#define DEBUG_ON_t1 //#define DEBUG_ON_q #include "npb-C.h" #include "npbparams.h" /* parameters */ #define MK 12 #define MM (M - MK) #define NN (1 << MM) #define NK (1 << MK) #define NQ 10 //#define EPSILON 1.0e-8 //#define A 1220703125.0 //#define S 271828183.0 #define EPSILON 1.0e-6 #define A 390625.0f #define S 28183.0f //#define TIMERS_ENABLED FALSE #if defined(USE_POW) #define r23 pow(0.5f, 11.0F) #define r46 (r23*r23) #define t23 pow(2.0f, 11.0F) #define t46 (t23*t23) #else #define r23 (0.5f*0.5f*0.5f*0.5f*0.5f*0.5f*0.5f*0.5f*0.5f*0.5f*0.5f) #define r46 (r23*r23) #define t23 (2.0f*2.0f*2.0f*2.0f*2.0f*2.0f*2.0f*2.0f*2.0f*2.0f*2.0f) #define t46 (t23*t23) #endif #ifndef _UNROLLFAC_ #define _UNROLLFAC_ 1 #endif #ifdef _OPENARC_ #if _UNROLLFAC_ == 1 #pragma openarc #define _UNROLLFAC_ 1 #elif _UNROLLFAC_ == 6 #pragma openarc #define _UNROLLFAC_ 6 #elif _UNROLLFAC_ == 8 #pragma openarc #define _UNROLLFAC_ 8 #elif _UNROLLFAC_ == 32 #pragma openarc #define _UNROLLFAC_ 32 #elif _UNROLLFAC_ == 128 #pragma openarc #define _UNROLLFAC_ 128 #elif _UNROLLFAC_ == 1024 #pragma openarc #define _UNROLLFAC_ 1024 #endif #pragma openarc #define NK 4096 #endif /* global variables */ /* common /storage/ */ static float x[2*NK]; //#pragma omp threadprivate(x) static float q[NQ]; /*-------------------------------------------------------------------- program EMBAR c-------------------------------------------------------------------*/ /* c This is the serial version of the APP Benchmark 1, c the "embarassingly parallel" benchmark. c c M is the Log_2 of the number of complex pairs of uniform (0, 1) random c numbers. MK is the Log_2 of the size of each batch of uniform random c numbers. MK can be set for convenience on a given system, since it does c not affect the results. */ int main(int argc, char **argv) { float Mops, t1, t2, t3, t4, x1, x2, sx, sy, tm, an, tt, gc; float dum[3] = { 1.0F, 1.0F, 1.0F }; int np, ierr, node, no_nodes, i, ik, kk, l, k, nit, ierrcode, no_large_nodes, np_add, k_offset, j; int nthreads = 1; boolean verified; char size[13+1]; /* character*13 */ //float t1, t2, t3, t4, x1, x2; //int kk, i, ik, l; //float qq[NQ]; /* private copy of q[0:NQ-1] */ float qq0; float qq1; float qq2; float qq3; float qq4; float qq5; float qq6; float qq7; float qq8; float qq9; float t1_randlc,t2_randlc,t3_randlc,t4_randlc,a1_randlc,a2_randlc,x1_randlc,x2_randlc,z_randlc, a_randlc; int i_vranlc; float x_vranlc; float (*xx)[(NN/_UNROLLFAC_)]; int m; /* c Because the size of the problem is too large to store in a 32-bit c integer for some classes, we put it into a string (for printing). c Have to strip off the decimal point put in there by the floating c point print statement (internal file) */ printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - EP Benchmark\n"); sprintf(size, "%12.0f", pow(2.0F, M+1)); for (j = 13; j >= 1; j--) { if (size[j] == '.') size[j] = ' '; } printf(" Number of random numbers generated: %13s\n", size); verified = FALSE; /* c Compute the number of "batches" of random number pairs generated c per processor. Adjust if the number of processors does not evenly c divide the total number */ np = NN; /* c Call the random number generator functions and initialize c the x-array to reduce the effects of paging on the timings. c Also, call all mathematical functions that are used. Make c sure these initializations cannot be eliminated as dead code. */ vranlc(0, &(dum[0]), dum[1], &(dum[2])); dum[0] = randlc(&(dum[1]), dum[2]); for (i = 0; i < 2*NK; i++) x[i] = -1.0e38; Mops = log(sqrt(fabs(max(1.0F, 1.0F)))); timer_clear(1); timer_clear(2); timer_clear(3); timer_start(1); vranlc(0, &t1, A, x); /* Compute AN = A ^ (2 * NK) (mod 2^46). */ t1 = A; for ( i = 1; i <= MK+1; i++) { t2 = randlc(&t1, t1); } an = t1; tt = S; gc = 0.0F; sx = 0.0F; sy = 0.0F; for ( i = 0; i <= NQ - 1; i++) { q[i] = 0.0F; } qq0 = 0.0F; qq1 = 0.0F; qq2 = 0.0F; qq3 = 0.0F; qq4 = 0.0F; qq5 = 0.0F; qq6 = 0.0F; qq7 = 0.0F; qq8 = 0.0F; qq9 = 0.0F; /* c Each instance of this loop may be performed independently. We compute c the k offsets separately to take into account the fact that some nodes c have more numbers to generate than others */ k_offset = -1; xx = (float (*)[NN/_UNROLLFAC_])malloc((2*NK)*(NN/_UNROLLFAC_)*sizeof(float)); #pragma acc kernels loop gang, worker, \ copyin(x[0:2*NK]), create(xx[0:2*NK][0:(NN/_UNROLLFAC_)]), \ private(t1, t2, t3, t4, x1, x2, k, kk, i, ik, m), \ private(l, t1_randlc, t2_randlc, t3_randlc, t4_randlc, a1_randlc, a2_randlc), \ private(x1_randlc, x2_randlc, z_randlc, a_randlc, i_vranlc, x_vranlc) for (m = 0; m < (NN/_UNROLLFAC_); m++) { for (i = 0; i < 2*NK; i++) xx[i][m] = x[i]; for (k = 0; k <_UNROLLFAC_; k++) { //for (i = 0; i < NQ; i++) qq[i] = 0.0f; //#pragma omp for reduction(+:sx,sy) schedule(static) nowait kk = k_offset + (m+k*(NN/_UNROLLFAC_)) + 1; t1 = S; t2 = an; /* Find starting seed t1 for this kk. */ for (i = 1; i <= 100; i++) { ik = kk / 2; if (2 * ik != kk) { //t3 = randlc(&t1, t2); a_randlc = t2; t1_randlc = r23 * a_randlc; a1_randlc = (int)t1_randlc; a2_randlc = a_randlc - t23 * a1_randlc; t1_randlc = r23 * t1; x1_randlc = (int)t1_randlc; x2_randlc = t1 - t23 * x1_randlc; t1_randlc = a1_randlc * x2_randlc + a2_randlc * x1_randlc; t2_randlc = (int)(r23 * t1_randlc); z_randlc = t1_randlc - t23 * t2_randlc; t3_randlc = t23 * z_randlc + a2_randlc * x2_randlc; t4_randlc = (int)(r46 * t3_randlc); t1 = t3_randlc - t46 * t4_randlc; t3 = (r46 * t1); } if (ik == 0) break; //t3 = randlc(&t2, t2); a_randlc = t2; t1_randlc = r23 * a_randlc; a1_randlc = (int)t1_randlc; a2_randlc = a_randlc - t23 * a1_randlc; t1_randlc = r23 * t2; x1_randlc = (int)t1_randlc; x2_randlc = t2 - t23 * x1_randlc; t1_randlc = a1_randlc * x2_randlc + a2_randlc * x1_randlc; t2_randlc = (int)(r23 * t1_randlc); z_randlc = t1_randlc - t23 * t2_randlc; t3_randlc = t23 * z_randlc + a2_randlc * x2_randlc; t4_randlc = (int)(r46 * t3_randlc); t2 = t3_randlc - t46 * t4_randlc; t3 = (r46 * t2); kk = ik; } #ifdef DEBUG_ON_t1 printf("k = %d: t1 = %f\n", k-1, t1); #endif /* Compute uniform pseudorandom numbers. */ #ifdef TIMERS_ENABLED if (TIMERS_ENABLED == TRUE) timer_start(3); #endif //vranlc(2*NK, &t1, A, x-1); t1_randlc = r23 * A; a1_randlc = (int)t1_randlc; a2_randlc = A - t23 * a1_randlc; x_vranlc = t1; for (i_vranlc = 1; i_vranlc <= 2*NK; i_vranlc++) { t1_randlc = r23 * x_vranlc; x1_randlc = (int)t1_randlc; x2_randlc = x_vranlc - t23 * x1_randlc; t1_randlc = a1_randlc * x2_randlc + a2_randlc * x1_randlc; t2_randlc = (int)(r23 * t1_randlc); z_randlc = t1_randlc - t23 * t2_randlc; t3_randlc = t23 * z_randlc + a2_randlc * x2_randlc; t4_randlc = (int)(r46 * t3_randlc); x_vranlc = t3_randlc - t46 * t4_randlc; xx[i_vranlc-1][m] = r46 * x_vranlc; } t1 = x_vranlc; #ifdef TIMERS_ENABLED if (TIMERS_ENABLED == TRUE) timer_stop(3); #endif #ifdef DEBUG_ON_x if( (3 <= k)&&(k <= 5) ) for (i = 30; i < 40; i++) printf("x[%d][%d] = %f\n",k-1,i,x[i]); #endif /* c Compute Gaussian deviates by acceptance-rejection method and c tally counts in concentric square annuli. This loop is not c vectorizable. */ #ifdef TIMERS_ENABLED if (TIMERS_ENABLED == TRUE) timer_start(2); #endif for ( i = 0; i < NK; i++) { x1 = 2.0F * xx[2*i][m] - 1.0F; x2 = 2.0F * xx[2*i+1][m] - 1.0F; t1 = pow2(x1) + pow2(x2); if (t1 <= 1.0F) { t2 = sqrtf(-2.0F * logf(t1) / t1); t3 = (x1 * t2); /* Xi */ t4 = (x2 * t2); /* Yi */ l = max(fabsf(t3), fabsf(t4)); //qq[l] += 1.0F; /* counts */ if( l == 0 ) { qq0 += 1.0F; } else if( l == 1 ) { qq1 += 1.0F; } else if( l == 2 ) { qq2 += 1.0F; } else if( l == 3 ) { qq3 += 1.0F; } else if( l == 4 ) { qq4 += 1.0F; } else if( l == 5 ) { qq5 += 1.0F; } else if( l == 6 ) { qq6 += 1.0F; } else if( l == 7 ) { qq7 += 1.0F; } else if( l == 8 ) { qq8 += 1.0F; } else { qq9 += 1.0F; } sx = sx + t3; /* sum of Xi */ sy = sy + t4; /* sum of Yi */ } } #ifdef TIMERS_ENABLED if (TIMERS_ENABLED == TRUE) timer_stop(2); #endif #ifdef DEBUG_ON_q printf("k = %d\n", k); for (i = 0; i <= NQ - 1; i++) printf("qq[%d] = %f\n",i,qq[i]); #endif /* //#pragma omp critical { for (i = 0; i <= NQ - 1; i++) q[i] += qq[i]; } */ } } /* end of parallel region */ q[0] = qq0; q[1] = qq1; q[2] = qq2; q[3] = qq3; q[4] = qq4; q[5] = qq5; q[6] = qq6; q[7] = qq7; q[8] = qq8; q[9] = qq9; for (i = 0; i <= NQ-1; i++) { gc = gc + q[i]; } timer_stop(1); tm = timer_read(1); nit = 0; if (M == 24) { if((fabs((sx- (2.554318847656250e+02))/sx) <= EPSILON) && (fabs((sy- (-2.176109161376953e+02))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 25) { if ((fabs((sx- (5.110573425292969e+02))/sx) <= EPSILON) && (fabs((sy- (-4.353658142089844e+02))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 28) { if ((fabs((sx- (3.994430908203125e+03))/sx) <= EPSILON) && (fabs((sy- (-3.514263671875000e+03))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 30) { if ((fabs((sx- (1.699876171875000e+04))/sx) <= EPSILON) && (fabs((sy- (-1.385202929687500e+04))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 32) { if ((fabs((sx- (4.520392968750000e+04))/sx) <= EPSILON) && (fabs((sy- (-4.611721093750000e+04))/sy) <= EPSILON)) { verified = TRUE; } } Mops = pow(2.0F, M+1)/tm/1000000.0F; printf("EP Benchmark Results: \n" "Accelerator Elapsed Time = %10.4f\n" "N = 2^%5d\n" "No. Gaussian Pairs = %15.0f\n" "Sums = %25.15e %25.15e\n" "Counts:\n", tm, M, gc, sx, sy); for (i = 0; i <= NQ-1; i++) { printf("%3d %15.0f\n", i, q[i]); } c_print_results("EP", CLASS, M+1, 0, 0, nit, nthreads, tm, Mops, "Random numbers generated", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); #ifdef TIMERS_ENABLED if (TIMERS_ENABLED == TRUE) { printf("Total time: %f", timer_read(1)); printf("Gaussian pairs: %f", timer_read(2)); printf("Random numbers: %f", timer_read(3)); } #endif return 0; }
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - EP This benchmark is an OpenMP C version of the NPB EP code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to [email protected] Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Author: P. O. Frederickson D. H. Bailey A. C. Woo OpenMP C version: S. Satoh --------------------------------------------------------------------*/ // #define DEBUG_ON_x // #define DEBUG_ON_t1 // #define DEBUG_ON_q #include "npb-C.h" #include "npbparams.h" /* parameters */ #define MK 12 #define MM (M - MK) #define NN (1 << MM) #define NK (1 << MK) #define NQ 10 // #define EPSILON 1.0e-8 // #define A 1220703125.0 // #define S 271828183.0 #define EPSILON 1.0e-6 #define A 390625.0f #define S 28183.0f // #define TIMERS_ENABLED FALSE #if defined(USE_POW) #define r23 pow(0.5f, 11.0F) #define r46 (r23*r23) #define t23 pow(2.0f, 11.0F) #define t46 (t23*t23) #else #define r23 (0.5f*0.5f*0.5f*0.5f*0.5f*0.5f*0.5f*0.5f*0.5f*0.5f*0.5f) #define r46 (r23*r23) #define t23 (2.0f*2.0f*2.0f*2.0f*2.0f*2.0f*2.0f*2.0f*2.0f*2.0f*2.0f) #define t46 (t23*t23) #endif #ifndef _UNROLLFAC_ #define _UNROLLFAC_ 1 #endif #ifdef _OPENARC_ #if _UNROLLFAC_ == 1 #pragma openarc #define _UNROLLFAC_ 1 #elif _UNROLLFAC_ == 6 #pragma openarc #define _UNROLLFAC_ 6 #elif _UNROLLFAC_ == 8 #pragma openarc #define _UNROLLFAC_ 8 #elif _UNROLLFAC_ == 32 #pragma openarc #define _UNROLLFAC_ 32 #elif _UNROLLFAC_ == 128 #pragma openarc #define _UNROLLFAC_ 128 #elif _UNROLLFAC_ == 1024 #pragma openarc #define _UNROLLFAC_ 1024 #endif #pragma openarc #define NK 4096 #endif /* global variables */ /* common /storage/ */ static float x[2 * NK]; // static float q[NQ]; /*-------------------------------------------------------------------- program EMBAR c-------------------------------------------------------------------*/ /* * c This is the serial version of the APP Benchmark 1, c the * "embarassingly parallel" benchmark. c c M is the Log_2 of the number of * complex pairs of uniform (0, 1) random c numbers. MK is the Log_2 of * the size of each batch of uniform random c numbers. MK can be set for * convenience on a given system, since it does c not affect the results. */ int main(int argc, char **argv) { float Mops, t1, t2, t3, t4, x1, x2, sx, sy, tm, an, tt, gc; float dum[3] = {1.0 F, 1.0 F, 1.0 F}; int np, ierr, node, no_nodes, i, ik, kk, l, k, nit, ierrcode, no_large_nodes, np_add, k_offset, j; int nthreads = 1; boolean verified; char size[13 + 1]; /* character*13 */ //float t1, t2, t3, t4, x1, x2; //int kk, i, ik, l; //float qq[NQ]; /* private copy of q[0:NQ-1] */ float qq0; float qq1; float qq2; float qq3; float qq4; float qq5; float qq6; float qq7; float qq8; float qq9; float t1_randlc, t2_randlc, t3_randlc, t4_randlc, a1_randlc, a2_randlc, x1_randlc, x2_randlc, z_randlc, a_randlc; int i_vranlc; float x_vranlc; float (*xx)[(NN / _UNROLLFAC_)]; int m; /* * c Because the size of the problem is too large to store in a 32-bit * c integer for some classes, we put it into a string (for printing). * c Have to strip off the decimal point put in there by the floating c * point print statement (internal file) */ printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - EP Benchmark\n"); sprintf(size, "%12.0f", pow(2.0 F, M + 1)); for (j = 13; j >= 1; j--) { if (size[j] == '.') size[j] = ' '; } printf(" Number of random numbers generated: %13s\n", size); verified = FALSE; /* * c Compute the number of "batches" of random number pairs generated c * per processor. Adjust if the number of processors does not evenly c * divide the total number */ np = NN; /* * c Call the random number generator functions and initialize c the * x-array to reduce the effects of paging on the timings. c Also, call * all mathematical functions that are used. Make c sure these * initializations cannot be eliminated as dead code. */ vranlc(0, &(dum[0]), dum[1], &(dum[2])); dum[0] = randlc(&(dum[1]), dum[2]); for (i = 0; i < 2 * NK; i++) x[i] = -1.0e38; Mops = log(sqrt(fabs(max(1.0 F, 1.0 F)))); timer_clear(1); timer_clear(2); timer_clear(3); timer_start(1); vranlc(0, &t1, A, x); /* Compute AN = A ^ (2 * NK) (mod 2^46). */ t1 = A; for (i = 1; i <= MK + 1; i++) { t2 = randlc(&t1, t1); } an = t1; tt = S; gc = 0.0 F; sx = 0.0 F; sy = 0.0 F; for (i = 0; i <= NQ - 1; i++) { q[i] = 0.0 F; } qq0 = 0.0 F; qq1 = 0.0 F; qq2 = 0.0 F; qq3 = 0.0 F; qq4 = 0.0 F; qq5 = 0.0 F; qq6 = 0.0 F; qq7 = 0.0 F; qq8 = 0.0 F; qq9 = 0.0 F; /* * c Each instance of this loop may be performed independently. We * compute c the k offsets separately to take into account the fact * that some nodes c have more numbers to generate than others */ k_offset = -1; xx = (float (*)[NN / _UNROLLFAC_])malloc((2 * NK) * (NN / _UNROLLFAC_) * sizeof(float)); #pragma acc kernels loop gang, worker, \ copyin(x[0:2*NK]), create(xx[0:2*NK][0:(NN/_UNROLLFAC_)]), \ private(t1, t2, t3, t4, x1, x2, k, kk, i, ik, m), \ private(l, t1_randlc, t2_randlc, t3_randlc, t4_randlc, a1_randlc, a2_randlc), \ private(x1_randlc, x2_randlc, z_randlc, a_randlc, i_vranlc, x_vranlc) for (m = 0; m < (NN / _UNROLLFAC_); m++) { for (i = 0; i < 2 * NK; i++) xx[i][m] = x[i]; for (k = 0; k < _UNROLLFAC_; k++) { //for (i = 0; i < NQ; i++) qq[i] = 0.0 f; // kk = k_offset + (m + k * (NN / _UNROLLFAC_)) + 1; t1 = S; t2 = an; /* Find starting seed t1 for this kk. */ for (i = 1; i <= 100; i++) { ik = kk / 2; if (2 * ik != kk) { //t3 = randlc(&t1, t2); a_randlc = t2; t1_randlc = r23 * a_randlc; a1_randlc = (int)t1_randlc; a2_randlc = a_randlc - t23 * a1_randlc; t1_randlc = r23 * t1; x1_randlc = (int)t1_randlc; x2_randlc = t1 - t23 * x1_randlc; t1_randlc = a1_randlc * x2_randlc + a2_randlc * x1_randlc; t2_randlc = (int)(r23 * t1_randlc); z_randlc = t1_randlc - t23 * t2_randlc; t3_randlc = t23 * z_randlc + a2_randlc * x2_randlc; t4_randlc = (int)(r46 * t3_randlc); t1 = t3_randlc - t46 * t4_randlc; t3 = (r46 * t1); } if (ik == 0) break; //t3 = randlc(&t2, t2); a_randlc = t2; t1_randlc = r23 * a_randlc; a1_randlc = (int)t1_randlc; a2_randlc = a_randlc - t23 * a1_randlc; t1_randlc = r23 * t2; x1_randlc = (int)t1_randlc; x2_randlc = t2 - t23 * x1_randlc; t1_randlc = a1_randlc * x2_randlc + a2_randlc * x1_randlc; t2_randlc = (int)(r23 * t1_randlc); z_randlc = t1_randlc - t23 * t2_randlc; t3_randlc = t23 * z_randlc + a2_randlc * x2_randlc; t4_randlc = (int)(r46 * t3_randlc); t2 = t3_randlc - t46 * t4_randlc; t3 = (r46 * t2); kk = ik; } #ifdef DEBUG_ON_t1 printf("k = %d: t1 = %f\n", k - 1, t1); #endif /* Compute uniform pseudorandom numbers. */ #ifdef TIMERS_ENABLED if (TIMERS_ENABLED == TRUE) timer_start(3); #endif //vranlc(2 * NK, &t1, A, x - 1); t1_randlc = r23 * A; a1_randlc = (int)t1_randlc; a2_randlc = A - t23 * a1_randlc; x_vranlc = t1; for (i_vranlc = 1; i_vranlc <= 2 * NK; i_vranlc++) { t1_randlc = r23 * x_vranlc; x1_randlc = (int)t1_randlc; x2_randlc = x_vranlc - t23 * x1_randlc; t1_randlc = a1_randlc * x2_randlc + a2_randlc * x1_randlc; t2_randlc = (int)(r23 * t1_randlc); z_randlc = t1_randlc - t23 * t2_randlc; t3_randlc = t23 * z_randlc + a2_randlc * x2_randlc; t4_randlc = (int)(r46 * t3_randlc); x_vranlc = t3_randlc - t46 * t4_randlc; xx[i_vranlc - 1][m] = r46 * x_vranlc; } t1 = x_vranlc; #ifdef TIMERS_ENABLED if (TIMERS_ENABLED == TRUE) timer_stop(3); #endif #ifdef DEBUG_ON_x if ((3 <= k) && (k <= 5)) for (i = 30; i < 40; i++) printf("x[%d][%d] = %f\n", k - 1, i, x[i]); #endif /* * c Compute Gaussian deviates by acceptance-rejection * method and c tally counts in concentric square annuli. * This loop is not c vectorizable. */ #ifdef TIMERS_ENABLED if (TIMERS_ENABLED == TRUE) timer_start(2); #endif for (i = 0; i < NK; i++) { x1 = 2.0 F * xx[2 * i][m] - 1.0 F; x2 = 2.0 F * xx[2 * i + 1][m] - 1.0 F; t1 = pow2(x1) + pow2(x2); if (t1 <= 1.0 F) { t2 = sqrtf(-2.0 F * logf(t1) / t1); t3 = (x1 * t2); /* Xi */ t4 = (x2 * t2); /* Yi */ l = max(fabsf(t3), fabsf(t4)); //qq[l] += 1.0 F; /* counts */ if (l == 0) { qq0 += 1.0 F; } else if (l == 1) { qq1 += 1.0 F; } else if (l == 2) { qq2 += 1.0 F; } else if (l == 3) { qq3 += 1.0 F; } else if (l == 4) { qq4 += 1.0 F; } else if (l == 5) { qq5 += 1.0 F; } else if (l == 6) { qq6 += 1.0 F; } else if (l == 7) { qq7 += 1.0 F; } else if (l == 8) { qq8 += 1.0 F; } else { qq9 += 1.0 F; } sx = sx + t3; /* sum of Xi */ sy = sy + t4; /* sum of Yi */ } } #ifdef TIMERS_ENABLED if (TIMERS_ENABLED == TRUE) timer_stop(2); #endif #ifdef DEBUG_ON_q printf("k = %d\n", k); for (i = 0; i <= NQ - 1; i++) printf("qq[%d] = %f\n", i, qq[i]); #endif /* * // for (i = 0; i <= NQ - 1; i++) q[i] += qq[i]; * */ } } /* end of parallel region */ q[0] = qq0; q[1] = qq1; q[2] = qq2; q[3] = qq3; q[4] = qq4; q[5] = qq5; q[6] = qq6; q[7] = qq7; q[8] = qq8; q[9] = qq9; for (i = 0; i <= NQ - 1; i++) { gc = gc + q[i]; } timer_stop(1); tm = timer_read(1); nit = 0; if (M == 24) { if ((fabs((sx - (2.554318847656250e+02)) / sx) <= EPSILON) && (fabs((sy - (-2.176109161376953e+02)) / sy) <= EPSILON)) { verified = TRUE; } } else if (M == 25) { if ((fabs((sx - (5.110573425292969e+02)) / sx) <= EPSILON) && (fabs((sy - (-4.353658142089844e+02)) / sy) <= EPSILON)) { verified = TRUE; } } else if (M == 28) { if ((fabs((sx - (3.994430908203125e+03)) / sx) <= EPSILON) && (fabs((sy - (-3.514263671875000e+03)) / sy) <= EPSILON)) { verified = TRUE; } } else if (M == 30) { if ((fabs((sx - (1.699876171875000e+04)) / sx) <= EPSILON) && (fabs((sy - (-1.385202929687500e+04)) / sy) <= EPSILON)) { verified = TRUE; } } else if (M == 32) { if ((fabs((sx - (4.520392968750000e+04)) / sx) <= EPSILON) && (fabs((sy - (-4.611721093750000e+04)) / sy) <= EPSILON)) { verified = TRUE; } } Mops = pow(2.0 F, M + 1) / tm / 1000000.0 F; printf("EP Benchmark Results: \n" "Accelerator Elapsed Time = %10.4f\n" "N = 2^%5d\n" "No. Gaussian Pairs = %15.0f\n" "Sums = %25.15e %25.15e\n" "Counts:\n", tm, M, gc, sx, sy); for (i = 0; i <= NQ - 1; i++) { printf("%3d %15.0f\n", i, q[i]); } c_print_results("EP", CLASS, M + 1, 0, 0, nit, nthreads, tm, Mops, "Random numbers generated", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); #ifdef TIMERS_ENABLED if (TIMERS_ENABLED == TRUE) { printf("Total time: %f", timer_read(1)); printf("Gaussian pairs: %f", timer_read(2)); printf("Random numbers: %f", timer_read(3)); } #endif return 0; }
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - EP This benchmark is an OpenMP C version of the NPB EP code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to [email protected] Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Author: P. O. Frederickson D. H. Bailey A. C. Woo OpenMP C version: S. Satoh --------------------------------------------------------------------*/ // #define DEBUG_ON_x // #define DEBUG_ON_t1 // #define DEBUG_ON_q #include "npb-C.h" #include "npbparams.h" /* parameters */ #define MK 12 #define MM (M - MK) #define NN (1 << MM) #define NK (1 << MK) #define NQ 10 // #define EPSILON 1.0e-8 // #define A 1220703125.0 // #define S 271828183.0 #define EPSILON 1.0e-6 #define A 390625.0f #define S 28183.0f // #define TIMERS_ENABLED FALSE #if defined(USE_POW) #define r23 pow(0.5f, 11.0F) #define r46 (r23*r23) #define t23 pow(2.0f, 11.0F) #define t46 (t23*t23) #else #define r23 (0.5f*0.5f*0.5f*0.5f*0.5f*0.5f*0.5f*0.5f*0.5f*0.5f*0.5f) #define r46 (r23*r23) #define t23 (2.0f*2.0f*2.0f*2.0f*2.0f*2.0f*2.0f*2.0f*2.0f*2.0f*2.0f) #define t46 (t23*t23) #endif #ifndef _UNROLLFAC_ #define _UNROLLFAC_ 1 #endif #ifdef _OPENARC_ #if _UNROLLFAC_ == 1 #pragma openarc #define _UNROLLFAC_ 1 #elif _UNROLLFAC_ == 6 #pragma openarc #define _UNROLLFAC_ 6 #elif _UNROLLFAC_ == 8 #pragma openarc #define _UNROLLFAC_ 8 #elif _UNROLLFAC_ == 32 #pragma openarc #define _UNROLLFAC_ 32 #elif _UNROLLFAC_ == 128 #pragma openarc #define _UNROLLFAC_ 128 #elif _UNROLLFAC_ == 1024 #pragma openarc #define _UNROLLFAC_ 1024 #endif #pragma openarc #define NK 4096 #endif /* global variables */ /* common /storage/ */ static float x[2 * NK]; // #pragma omp threadprivate(x) static float q[NQ]; /*-------------------------------------------------------------------- program EMBAR c-------------------------------------------------------------------*/ /* * c This is the serial version of the APP Benchmark 1, c the * "embarassingly parallel" benchmark. c c M is the Log_2 of the number of * complex pairs of uniform (0, 1) random c numbers. MK is the Log_2 of * the size of each batch of uniform random c numbers. MK can be set for * convenience on a given system, since it does c not affect the results. */ int main(int argc, char **argv) { float Mops, t1, t2, t3, t4, x1, x2, sx, sy, tm, an, tt, gc; float dum[3] = {1.0 F, 1.0 F, 1.0 F}; int np, ierr, node, no_nodes, i, ik, kk, l, k, nit, ierrcode, no_large_nodes, np_add, k_offset, j; int nthreads = 1; boolean verified; char size[13 + 1]; /* character*13 */ //float t1, t2, t3, t4, x1, x2; //int kk, i, ik, l; //float qq[NQ]; /* private copy of q[0:NQ-1] */ float qq0; float qq1; float qq2; float qq3; float qq4; float qq5; float qq6; float qq7; float qq8; float qq9; float t1_randlc, t2_randlc, t3_randlc, t4_randlc, a1_randlc, a2_randlc, x1_randlc, x2_randlc, z_randlc, a_randlc; int i_vranlc; float x_vranlc; float (*xx)[(NN / _UNROLLFAC_)]; int m; /* * c Because the size of the problem is too large to store in a 32-bit * c integer for some classes, we put it into a string (for printing). * c Have to strip off the decimal point put in there by the floating c * point print statement (internal file) */ printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - EP Benchmark\n"); sprintf(size, "%12.0f", pow(2.0 F, M + 1)); for (j = 13; j >= 1; j--) { if (size[j] == '.') size[j] = ' '; } printf(" Number of random numbers generated: %13s\n", size); verified = FALSE; /* * c Compute the number of "batches" of random number pairs generated c * per processor. Adjust if the number of processors does not evenly c * divide the total number */ np = NN; /* * c Call the random number generator functions and initialize c the * x-array to reduce the effects of paging on the timings. c Also, call * all mathematical functions that are used. Make c sure these * initializations cannot be eliminated as dead code. */ vranlc(0, &(dum[0]), dum[1], &(dum[2])); dum[0] = randlc(&(dum[1]), dum[2]); for (i = 0; i < 2 * NK; i++) x[i] = -1.0e38; Mops = log(sqrt(fabs(max(1.0 F, 1.0 F)))); timer_clear(1); timer_clear(2); timer_clear(3); timer_start(1); vranlc(0, &t1, A, x); /* Compute AN = A ^ (2 * NK) (mod 2^46). */ t1 = A; for (i = 1; i <= MK + 1; i++) { t2 = randlc(&t1, t1); } an = t1; tt = S; gc = 0.0 F; sx = 0.0 F; sy = 0.0 F; for (i = 0; i <= NQ - 1; i++) { q[i] = 0.0 F; } qq0 = 0.0 F; qq1 = 0.0 F; qq2 = 0.0 F; qq3 = 0.0 F; qq4 = 0.0 F; qq5 = 0.0 F; qq6 = 0.0 F; qq7 = 0.0 F; qq8 = 0.0 F; qq9 = 0.0 F; /* * c Each instance of this loop may be performed independently. We * compute c the k offsets separately to take into account the fact * that some nodes c have more numbers to generate than others */ k_offset = -1; xx = (float (*)[NN / _UNROLLFAC_])malloc((2 * NK) * (NN / _UNROLLFAC_) * sizeof(float)); #pragma acc kernels loop gang, worker, \ copyin(x[0:2*NK]), create(xx[0:2*NK][0:(NN/_UNROLLFAC_)]), \ private(t1, t2, t3, t4, x1, x2, k, kk, i, ik, m), \ private(l, t1_randlc, t2_randlc, t3_randlc, t4_randlc, a1_randlc, a2_randlc), \ private(x1_randlc, x2_randlc, z_randlc, a_randlc, i_vranlc, x_vranlc) for (m = 0; m < (NN / _UNROLLFAC_); m++) { for (i = 0; i < 2 * NK; i++) xx[i][m] = x[i]; for (k = 0; k < _UNROLLFAC_; k++) { //for (i = 0; i < NQ; i++) qq[i] = 0.0 f; // #pragma omp for reduction(+:sx,sy) schedule(static) nowait kk = k_offset + (m + k * (NN / _UNROLLFAC_)) + 1; t1 = S; t2 = an; /* Find starting seed t1 for this kk. */ for (i = 1; i <= 100; i++) { ik = kk / 2; if (2 * ik != kk) { //t3 = randlc(&t1, t2); a_randlc = t2; t1_randlc = r23 * a_randlc; a1_randlc = (int)t1_randlc; a2_randlc = a_randlc - t23 * a1_randlc; t1_randlc = r23 * t1; x1_randlc = (int)t1_randlc; x2_randlc = t1 - t23 * x1_randlc; t1_randlc = a1_randlc * x2_randlc + a2_randlc * x1_randlc; t2_randlc = (int)(r23 * t1_randlc); z_randlc = t1_randlc - t23 * t2_randlc; t3_randlc = t23 * z_randlc + a2_randlc * x2_randlc; t4_randlc = (int)(r46 * t3_randlc); t1 = t3_randlc - t46 * t4_randlc; t3 = (r46 * t1); } if (ik == 0) break; //t3 = randlc(&t2, t2); a_randlc = t2; t1_randlc = r23 * a_randlc; a1_randlc = (int)t1_randlc; a2_randlc = a_randlc - t23 * a1_randlc; t1_randlc = r23 * t2; x1_randlc = (int)t1_randlc; x2_randlc = t2 - t23 * x1_randlc; t1_randlc = a1_randlc * x2_randlc + a2_randlc * x1_randlc; t2_randlc = (int)(r23 * t1_randlc); z_randlc = t1_randlc - t23 * t2_randlc; t3_randlc = t23 * z_randlc + a2_randlc * x2_randlc; t4_randlc = (int)(r46 * t3_randlc); t2 = t3_randlc - t46 * t4_randlc; t3 = (r46 * t2); kk = ik; } #ifdef DEBUG_ON_t1 printf("k = %d: t1 = %f\n", k - 1, t1); #endif /* Compute uniform pseudorandom numbers. */ #ifdef TIMERS_ENABLED if (TIMERS_ENABLED == TRUE) timer_start(3); #endif //vranlc(2 * NK, &t1, A, x - 1); t1_randlc = r23 * A; a1_randlc = (int)t1_randlc; a2_randlc = A - t23 * a1_randlc; x_vranlc = t1; for (i_vranlc = 1; i_vranlc <= 2 * NK; i_vranlc++) { t1_randlc = r23 * x_vranlc; x1_randlc = (int)t1_randlc; x2_randlc = x_vranlc - t23 * x1_randlc; t1_randlc = a1_randlc * x2_randlc + a2_randlc * x1_randlc; t2_randlc = (int)(r23 * t1_randlc); z_randlc = t1_randlc - t23 * t2_randlc; t3_randlc = t23 * z_randlc + a2_randlc * x2_randlc; t4_randlc = (int)(r46 * t3_randlc); x_vranlc = t3_randlc - t46 * t4_randlc; xx[i_vranlc - 1][m] = r46 * x_vranlc; } t1 = x_vranlc; #ifdef TIMERS_ENABLED if (TIMERS_ENABLED == TRUE) timer_stop(3); #endif #ifdef DEBUG_ON_x if ((3 <= k) && (k <= 5)) for (i = 30; i < 40; i++) printf("x[%d][%d] = %f\n", k - 1, i, x[i]); #endif /* * c Compute Gaussian deviates by acceptance-rejection * method and c tally counts in concentric square annuli. * This loop is not c vectorizable. */ #ifdef TIMERS_ENABLED if (TIMERS_ENABLED == TRUE) timer_start(2); #endif for (i = 0; i < NK; i++) { x1 = 2.0 F * xx[2 * i][m] - 1.0 F; x2 = 2.0 F * xx[2 * i + 1][m] - 1.0 F; t1 = pow2(x1) + pow2(x2); if (t1 <= 1.0 F) { t2 = sqrtf(-2.0 F * logf(t1) / t1); t3 = (x1 * t2); /* Xi */ t4 = (x2 * t2); /* Yi */ l = max(fabsf(t3), fabsf(t4)); //qq[l] += 1.0 F; /* counts */ if (l == 0) { qq0 += 1.0 F; } else if (l == 1) { qq1 += 1.0 F; } else if (l == 2) { qq2 += 1.0 F; } else if (l == 3) { qq3 += 1.0 F; } else if (l == 4) { qq4 += 1.0 F; } else if (l == 5) { qq5 += 1.0 F; } else if (l == 6) { qq6 += 1.0 F; } else if (l == 7) { qq7 += 1.0 F; } else if (l == 8) { qq8 += 1.0 F; } else { qq9 += 1.0 F; } sx = sx + t3; /* sum of Xi */ sy = sy + t4; /* sum of Yi */ } } #ifdef TIMERS_ENABLED if (TIMERS_ENABLED == TRUE) timer_stop(2); #endif #ifdef DEBUG_ON_q printf("k = %d\n", k); for (i = 0; i <= NQ - 1; i++) printf("qq[%d] = %f\n", i, qq[i]); #endif /* * //#pragma omp critical { for (i = 0; i <= NQ - 1; i++) q[i] += * qq[i]; } */ } } /* end of parallel region */ q[0] = qq0; q[1] = qq1; q[2] = qq2; q[3] = qq3; q[4] = qq4; q[5] = qq5; q[6] = qq6; q[7] = qq7; q[8] = qq8; q[9] = qq9; for (i = 0; i <= NQ - 1; i++) { gc = gc + q[i]; } timer_stop(1); tm = timer_read(1); nit = 0; if (M == 24) { if ((fabs((sx - (2.554318847656250e+02)) / sx) <= EPSILON) && (fabs((sy - (-2.176109161376953e+02)) / sy) <= EPSILON)) { verified = TRUE; } } else if (M == 25) { if ((fabs((sx - (5.110573425292969e+02)) / sx) <= EPSILON) && (fabs((sy - (-4.353658142089844e+02)) / sy) <= EPSILON)) { verified = TRUE; } } else if (M == 28) { if ((fabs((sx - (3.994430908203125e+03)) / sx) <= EPSILON) && (fabs((sy - (-3.514263671875000e+03)) / sy) <= EPSILON)) { verified = TRUE; } } else if (M == 30) { if ((fabs((sx - (1.699876171875000e+04)) / sx) <= EPSILON) && (fabs((sy - (-1.385202929687500e+04)) / sy) <= EPSILON)) { verified = TRUE; } } else if (M == 32) { if ((fabs((sx - (4.520392968750000e+04)) / sx) <= EPSILON) && (fabs((sy - (-4.611721093750000e+04)) / sy) <= EPSILON)) { verified = TRUE; } } Mops = pow(2.0 F, M + 1) / tm / 1000000.0 F; printf("EP Benchmark Results: \n" "Accelerator Elapsed Time = %10.4f\n" "N = 2^%5d\n" "No. Gaussian Pairs = %15.0f\n" "Sums = %25.15e %25.15e\n" "Counts:\n", tm, M, gc, sx, sy); for (i = 0; i <= NQ - 1; i++) { printf("%3d %15.0f\n", i, q[i]); } c_print_results("EP", CLASS, M + 1, 0, 0, nit, nthreads, tm, Mops, "Random numbers generated", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); #ifdef TIMERS_ENABLED if (TIMERS_ENABLED == TRUE) { printf("Total time: %f", timer_read(1)); printf("Gaussian pairs: %f", timer_read(2)); printf("Random numbers: %f", timer_read(3)); } #endif return 0; }
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return omp_get_max_threads(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { int ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (int i = ndim-1, j = idx; i >=0; --i) { int tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { int ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) ret += coord[i] * stride[i]; return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { int ret = 0; #pragma unroll for (int i = ndim-1, j = idx; i >=0; --i) { int tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(), from.FlatTo1D<xpu, DType>(), s); } else { MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } }; /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType* out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! \brief Special-case kernel shortcut for setting to zero */ using set_zero = set_to_int<0>; template<typename OP, typename xpu> struct Kernel; template<typename OP> struct Kernel<OP, cpu> { template<typename ...Args> inline static void Launch(mshadow::Stream<cpu> *s, const int N, Args... args) { #ifdef _OPENMP const int omp_cores = Engine::Get()->num_omp_threads_per_worker(); if (omp_cores <= 1) { // Zero means not to use OMP, but don't interfere with external OMP behavior for (int i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_cores) for (int i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP> struct Kernel<OP, gpu> { template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); } }; #endif // __CUDACC__ } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
/*! * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return omp_get_max_threads(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { int ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (int i = ndim-1, j = idx; i >=0; --i) { int tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { int ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) ret += coord[i] * stride[i]; return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { int ret = 0; #pragma unroll for (int i = ndim-1, j = idx; i >=0; --i) { int tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(), from.FlatTo1D<xpu, DType>(), s); } else { MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } }; /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType* out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! \brief Special-case kernel shortcut for setting to zero */ using set_zero = set_to_int<0>; template<typename OP, typename xpu> struct Kernel; template<typename OP> struct Kernel<OP, cpu> { template<typename ...Args> inline static void Launch(mshadow::Stream<cpu> *s, const int N, Args... args) { } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP> struct Kernel<OP, gpu> { template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); } }; #endif // __CUDACC__ } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
/*! * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return omp_get_max_threads(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { int ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (int i = ndim-1, j = idx; i >=0; --i) { int tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { int ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) ret += coord[i] * stride[i]; return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { int ret = 0; #pragma unroll for (int i = ndim-1, j = idx; i >=0; --i) { int tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(), from.FlatTo1D<xpu, DType>(), s); } else { MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } }; /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType* out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! \brief Special-case kernel shortcut for setting to zero */ using set_zero = set_to_int<0>; template<typename OP, typename xpu> struct Kernel; template<typename OP> struct Kernel<OP, cpu> { template<typename ...Args> inline static void Launch(mshadow::Stream<cpu> *s, const int N, Args... args) { #ifdef _OPENMP const int omp_cores = Engine::Get()->num_omp_threads_per_worker(); if (omp_cores <= 1) { // Zero means not to use OMP, but don't interfere with external OMP behavior for (int i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_cores) for (int i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP> struct Kernel<OP, gpu> { template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); } }; #endif // __CUDACC__ } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
fourier.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF OOO U U RRRR IIIII EEEEE RRRR % % F O O U U R R I E R R % % FFF O O U U RRRR I EEE RRRR % % F O O U U R R I E R R % % F OOO UUU R R IIIII EEEEE R R % % % % % % MagickCore Discrete Fourier Transform Methods % % % % Software Design % % Sean Burke % % Fred Weinhaus % % Cristy % % July 2009 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/cache.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/fourier.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #if defined(MAGICKCORE_FFTW_DELEGATE) #if defined(MAGICKCORE_HAVE_COMPLEX_H) #include <complex.h> #endif #include <fftw3.h> #if !defined(MAGICKCORE_HAVE_CABS) #define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1])) #endif #if !defined(MAGICKCORE_HAVE_CARG) #define carg(z) (atan2(cimag(z),creal(z))) #endif #if !defined(MAGICKCORE_HAVE_CIMAG) #define cimag(z) (z[1]) #endif #if !defined(MAGICKCORE_HAVE_CREAL) #define creal(z) (z[0]) #endif #endif /* Typedef declarations. */ typedef struct _FourierInfo { ChannelType channel; MagickBooleanType modulus; size_t width, height; ssize_t center; } FourierInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p l e x I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ComplexImages() performs complex mathematics on an image sequence. % % The format of the ComplexImages method is: % % MagickBooleanType ComplexImages(Image *images,const ComplexOperator op, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o op: A complex operator. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op, ExceptionInfo *exception) { #define ComplexImageTag "Complex/Image" CacheView *Ai_view, *Ar_view, *Bi_view, *Br_view, *Ci_view, *Cr_view; const char *artifact; const Image *Ai_image, *Ar_image, *Bi_image, *Br_image; double snr; Image *Ci_image, *complex_images, *Cr_image, *image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if (images->next == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",images->filename); return((Image *) NULL); } image=CloneImage(images,images->columns,images->rows,MagickTrue,exception); if (image == (Image *) NULL) return((Image *) NULL); image->storage_class=DirectClass; image->depth=32UL; complex_images=NewImageList(); AppendImageToList(&complex_images,image); image=CloneImage(images,images->columns,images->rows,MagickTrue,exception); if (image == (Image *) NULL) { complex_images=DestroyImageList(complex_images); return(complex_images); } AppendImageToList(&complex_images,image); complex_images->storage_class=DirectClass; complex_images->depth=32UL; /* Apply complex mathematics to image pixels. */ artifact=GetImageArtifact(image,"complex:snr"); snr=0.0; if (artifact != (const char *) NULL) snr=StringToDouble(artifact,(char **) NULL); Ar_image=images; Ai_image=images->next; Br_image=images; Bi_image=images->next; if ((images->next->next != (Image *) NULL) && (images->next->next->next != (Image *) NULL)) { Br_image=images->next->next; Bi_image=images->next->next->next; } Cr_image=complex_images; Ci_image=complex_images->next; Ar_view=AcquireVirtualCacheView(Ar_image,exception); Ai_view=AcquireVirtualCacheView(Ai_image,exception); Br_view=AcquireVirtualCacheView(Br_image,exception); Bi_view=AcquireVirtualCacheView(Bi_image,exception); Cr_view=AcquireAuthenticCacheView(Cr_image,exception); Ci_view=AcquireAuthenticCacheView(Ci_image,exception); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(images,complex_images,images->rows,1) #endif for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *magick_restrict Ai, *magick_restrict Ar, *magick_restrict Bi, *magick_restrict Br; register PixelPacket *magick_restrict Ci, *magick_restrict Cr; register ssize_t x; if (status == MagickFalse) continue; Ar=GetCacheViewVirtualPixels(Ar_view,0,y,Ar_image->columns,1,exception); Ai=GetCacheViewVirtualPixels(Ai_view,0,y,Ai_image->columns,1,exception); Br=GetCacheViewVirtualPixels(Br_view,0,y,Br_image->columns,1,exception); Bi=GetCacheViewVirtualPixels(Bi_view,0,y,Bi_image->columns,1,exception); Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,Cr_image->columns,1,exception); Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,Ci_image->columns,1,exception); if ((Ar == (const PixelPacket *) NULL) || (Ai == (const PixelPacket *) NULL) || (Br == (const PixelPacket *) NULL) || (Bi == (const PixelPacket *) NULL) || (Cr == (PixelPacket *) NULL) || (Ci == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) images->columns; x++) { switch (op) { case AddComplexOperator: { Cr->red=Ar->red+Br->red; Ci->red=Ai->red+Bi->red; Cr->green=Ar->green+Br->green; Ci->green=Ai->green+Bi->green; Cr->blue=Ar->blue+Br->blue; Ci->blue=Ai->blue+Bi->blue; if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity+Br->opacity; Ci->opacity=Ai->opacity+Bi->opacity; } break; } case ConjugateComplexOperator: default: { Cr->red=Ar->red; Ci->red=(-Bi->red); Cr->green=Ar->green; Ci->green=(-Bi->green); Cr->blue=Ar->blue; Ci->blue=(-Bi->blue); if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity; Ci->opacity=(-Bi->opacity); } break; } case DivideComplexOperator: { double gamma; gamma=PerceptibleReciprocal(Br->red*Br->red+Bi->red*Bi->red+snr); Cr->red=gamma*(Ar->red*Br->red+Ai->red*Bi->red); Ci->red=gamma*(Ai->red*Br->red-Ar->red*Bi->red); gamma=PerceptibleReciprocal(Br->green*Br->green+Bi->green*Bi->green+ snr); Cr->green=gamma*(Ar->green*Br->green+Ai->green*Bi->green); Ci->green=gamma*(Ai->green*Br->green-Ar->green*Bi->green); gamma=PerceptibleReciprocal(Br->blue*Br->blue+Bi->blue*Bi->blue+snr); Cr->blue=gamma*(Ar->blue*Br->blue+Ai->blue*Bi->blue); Ci->blue=gamma*(Ai->blue*Br->blue-Ar->blue*Bi->blue); if (images->matte != MagickFalse) { gamma=PerceptibleReciprocal(Br->opacity*Br->opacity+Bi->opacity* Bi->opacity+snr); Cr->opacity=gamma*(Ar->opacity*Br->opacity+Ai->opacity* Bi->opacity); Ci->opacity=gamma*(Ai->opacity*Br->opacity-Ar->opacity* Bi->opacity); } break; } case MagnitudePhaseComplexOperator: { Cr->red=sqrt(Ar->red*Ar->red+Ai->red*Ai->red); Ci->red=atan2(Ai->red,Ar->red)/(2.0*MagickPI)+0.5; Cr->green=sqrt(Ar->green*Ar->green+Ai->green*Ai->green); Ci->green=atan2(Ai->green,Ar->green)/(2.0*MagickPI)+0.5; Cr->blue=sqrt(Ar->blue*Ar->blue+Ai->blue*Ai->blue); Ci->blue=atan2(Ai->blue,Ar->blue)/(2.0*MagickPI)+0.5; if (images->matte != MagickFalse) { Cr->opacity=sqrt(Ar->opacity*Ar->opacity+Ai->opacity*Ai->opacity); Ci->opacity=atan2(Ai->opacity,Ar->opacity)/(2.0*MagickPI)+0.5; } break; } case MultiplyComplexOperator: { Cr->red=QuantumScale*(Ar->red*Br->red-Ai->red*Bi->red); Ci->red=QuantumScale*(Ai->red*Br->red+Ar->red*Bi->red); Cr->green=QuantumScale*(Ar->green*Br->green-Ai->green*Bi->green); Ci->green=QuantumScale*(Ai->green*Br->green+Ar->green*Bi->green); Cr->blue=QuantumScale*(Ar->blue*Br->blue-Ai->blue*Bi->blue); Ci->blue=QuantumScale*(Ai->blue*Br->blue+Ar->blue*Bi->blue); if (images->matte != MagickFalse) { Cr->opacity=QuantumScale*(Ar->opacity*Br->opacity-Ai->opacity* Bi->opacity); Ci->opacity=QuantumScale*(Ai->opacity*Br->opacity+Ar->opacity* Bi->opacity); } break; } case RealImaginaryComplexOperator: { Cr->red=Ar->red*cos(2.0*MagickPI*(Ai->red-0.5)); Ci->red=Ar->red*sin(2.0*MagickPI*(Ai->red-0.5)); Cr->green=Ar->green*cos(2.0*MagickPI*(Ai->green-0.5)); Ci->green=Ar->green*sin(2.0*MagickPI*(Ai->green-0.5)); Cr->blue=Ar->blue*cos(2.0*MagickPI*(Ai->blue-0.5)); Ci->blue=Ar->blue*sin(2.0*MagickPI*(Ai->blue-0.5)); if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity*cos(2.0*MagickPI*(Ai->opacity-0.5)); Ci->opacity=Ar->opacity*sin(2.0*MagickPI*(Ai->opacity-0.5)); } break; } case SubtractComplexOperator: { Cr->red=Ar->red-Br->red; Ci->red=Ai->red-Bi->red; Cr->green=Ar->green-Br->green; Ci->green=Ai->green-Bi->green; Cr->blue=Ar->blue-Br->blue; Ci->blue=Ai->blue-Bi->blue; if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity-Br->opacity; Ci->opacity=Ai->opacity-Bi->opacity; } break; } } Ar++; Ai++; Br++; Bi++; Cr++; Ci++; } if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse) status=MagickFalse; if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ComplexImages) #endif proceed=SetImageProgress(images,ComplexImageTag,progress++, images->rows); if (proceed == MagickFalse) status=MagickFalse; } } Cr_view=DestroyCacheView(Cr_view); Ci_view=DestroyCacheView(Ci_view); Br_view=DestroyCacheView(Br_view); Bi_view=DestroyCacheView(Bi_view); Ar_view=DestroyCacheView(Ar_view); Ai_view=DestroyCacheView(Ai_view); if (status == MagickFalse) complex_images=DestroyImageList(complex_images); return(complex_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r w a r d F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ForwardFourierTransformImage() implements the discrete Fourier transform % (DFT) of the image either as a magnitude / phase or real / imaginary image % pair. % % The format of the ForwadFourierTransformImage method is: % % Image *ForwardFourierTransformImage(const Image *image, % const MagickBooleanType modulus,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulus: if true, return as transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType RollFourier(const size_t width,const size_t height, const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels) { double *source_pixels; MemoryInfo *source_info; register ssize_t i, x; ssize_t u, v, y; /* Move zero frequency (DC, average color) from (0,0) to (width/2,height/2). */ source_info=AcquireVirtualMemory(height,width*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) return(MagickFalse); source_pixels=(double *) GetVirtualMemoryBlob(source_info); i=0L; for (y=0L; y < (ssize_t) height; y++) { if (y_offset < 0L) v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset; else v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height : y+y_offset; for (x=0L; x < (ssize_t) width; x++) { if (x_offset < 0L) u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset; else u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width : x+x_offset; source_pixels[v*width+u]=roll_pixels[i++]; } } (void) CopyMagickMemory(roll_pixels,source_pixels,height*width* sizeof(*source_pixels)); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType ForwardQuadrantSwap(const size_t width, const size_t height,double *source_pixels,double *forward_pixels) { MagickBooleanType status; register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) floor((double) width/2L)+1L; status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L, source_pixels); if (status == MagickFalse) return(MagickFalse); for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L-1L); x++) forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x]; for (y=1; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L-1L); x++) forward_pixels[(height-y)*width+width/2L-x-1L]= source_pixels[y*center+x+1L]; for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[-x+width/2L-1L]=forward_pixels[x+width/2L+1L]; return(MagickTrue); } static void CorrectPhaseLHS(const size_t width,const size_t height, double *fourier_pixels) { register ssize_t x; ssize_t y; for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) fourier_pixels[y*width+x]*=(-1.0); } static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info, Image *image,double *magnitude,double *phase,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *magnitude_pixels, *phase_pixels; Image *magnitude_image, *phase_image; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; register IndexPacket *indexes; register PixelPacket *q; register ssize_t x; ssize_t i, y; magnitude_image=GetFirstImageInList(image); phase_image=GetNextImageInList(image); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",image->filename); return(MagickFalse); } /* Create "Fourier Transform" image from constituent arrays. */ magnitude_info=AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); (void) ResetMagickMemory(magnitude_pixels,0,fourier_info->height* fourier_info->width*sizeof(*magnitude_pixels)); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); (void) ResetMagickMemory(phase_pixels,0,fourier_info->height* fourier_info->width*sizeof(*phase_pixels)); status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height, magnitude,magnitude_pixels); if (status != MagickFalse) status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase, phase_pixels); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]/=(2.0*MagickPI); phase_pixels[i]+=0.5; i++; } } magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception); i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->height,1UL, exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(magnitude_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange*magnitude_pixels[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange*magnitude_pixels[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange*magnitude_pixels[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange*magnitude_pixels[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange* magnitude_pixels[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange*magnitude_pixels[i])); break; } } i++; q++; } status=SyncCacheViewAuthenticPixels(magnitude_view,exception); if (status == MagickFalse) break; } magnitude_view=DestroyCacheView(magnitude_view); i=0L; phase_view=AcquireAuthenticCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->height,1UL, exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(phase_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } } i++; q++; } status=SyncCacheViewAuthenticPixels(phase_view,exception); if (status == MagickFalse) break; } phase_view=DestroyCacheView(phase_view); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info, const Image *image,double *magnitude_pixels,double *phase_pixels, ExceptionInfo *exception) { CacheView *image_view; const char *value; double *source_pixels; fftw_complex *forward_pixels; fftw_plan fftw_r2c_plan; MemoryInfo *forward_info, *source_info; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Generate the forward Fourier transform. */ source_info=AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); ResetMagickMemory(source_pixels,0,fourier_info->height*fourier_info->width* sizeof(*source_pixels)); i=0L; image_view=AcquireVirtualCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { source_pixels[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { source_pixels[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { source_pixels[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { source_pixels[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { source_pixels[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { source_pixels[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } image_view=DestroyCacheView(image_view); forward_info=AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->center*sizeof(*forward_pixels)); if (forward_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); return(MagickFalse); } forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ForwardFourierTransform) #endif fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height, source_pixels,forward_pixels,FFTW_ESTIMATE); fftw_execute(fftw_r2c_plan); fftw_destroy_plan(fftw_r2c_plan); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); value=GetImageArtifact(image,"fourier:normalize"); if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0)) { double gamma; /* Normalize inverse transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) forward_pixels[i]*=gamma; #else forward_pixels[i][0]*=gamma; forward_pixels[i][1]*=gamma; #endif i++; } } /* Generate magnitude and phase (or real and imaginary). */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=cabs(forward_pixels[i]); phase_pixels[i]=carg(forward_pixels[i]); i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=creal(forward_pixels[i]); phase_pixels[i]=cimag(forward_pixels[i]); i++; } forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info); return(MagickTrue); } static MagickBooleanType ForwardFourierTransformChannel(const Image *image, const ChannelType channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { double *magnitude_pixels, *phase_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; size_t extent; fourier_info.width=image->columns; fourier_info.height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { extent=image->columns < image->rows ? image->rows : image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) floor((double) fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; magnitude_info=AcquireVirtualMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info == (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels, phase_pixels,exception); if (status != MagickFalse) status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels, phase_pixels,exception); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } #endif MagickExport Image *ForwardFourierTransformImage(const Image *image, const MagickBooleanType modulus,ExceptionInfo *exception) { Image *fourier_image; fourier_image=NewImageList(); #if !defined(MAGICKCORE_FFTW_DELEGATE) (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", image->filename); #else { Image *magnitude_image; size_t extent, height, width; width=image->columns; height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { extent=image->columns < image->rows ? image->rows : image->columns; width=(extent & 0x01) == 1 ? extent+1UL : extent; } height=width; magnitude_image=CloneImage(image,width,height,MagickTrue,exception); if (magnitude_image != (Image *) NULL) { Image *phase_image; magnitude_image->storage_class=DirectClass; magnitude_image->depth=32UL; phase_image=CloneImage(image,width,height,MagickTrue,exception); if (phase_image == (Image *) NULL) magnitude_image=DestroyImage(magnitude_image); else { MagickBooleanType is_gray, status; phase_image->storage_class=DirectClass; phase_image->depth=32UL; AppendImageToList(&fourier_image,magnitude_image); AppendImageToList(&fourier_image,phase_image); status=MagickTrue; is_gray=IsGrayImage(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=ForwardFourierTransformChannel(image, GrayChannels,modulus,fourier_image,exception); else thread_status=ForwardFourierTransformChannel(image,RedChannel, modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, GreenChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, BlueChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->matte != MagickFalse) thread_status=ForwardFourierTransformChannel(image, OpacityChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->colorspace == CMYKColorspace) thread_status=ForwardFourierTransformChannel(image, IndexChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImageList(fourier_image); fftw_cleanup(); } } } #endif return(fourier_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v e r s e F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InverseFourierTransformImage() implements the inverse discrete Fourier % transform (DFT) of the image either as a magnitude / phase or real / % imaginary image pair. % % The format of the InverseFourierTransformImage method is: % % Image *InverseFourierTransformImage(const Image *magnitude_image, % const Image *phase_image,const MagickBooleanType modulus, % ExceptionInfo *exception) % % A description of each parameter follows: % % o magnitude_image: the magnitude or real image. % % o phase_image: the phase or imaginary image. % % o modulus: if true, return transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType InverseQuadrantSwap(const size_t width, const size_t height,const double *source,double *destination) { register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) floor((double) width/2L)+1L; for (y=1L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L+1L); x++) destination[(height-y)*center-x+width/2L]=source[y*width+x]; for (y=0L; y < (ssize_t) height; y++) destination[y*center]=source[y*width+width/2L]; for (x=0L; x < center; x++) destination[x]=source[center-x-1L]; return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination)); } static MagickBooleanType InverseFourier(FourierInfo *fourier_info, const Image *magnitude_image,const Image *phase_image, fftw_complex *fourier_pixels,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *inverse_pixels, *magnitude_pixels, *phase_pixels; MagickBooleanType status; MemoryInfo *inverse_info, *magnitude_info, *phase_info; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Inverse fourier - read image and break down into a double array. */ magnitude_info=AcquireVirtualMemory((size_t)fourier_info->height, fourier_info->width*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*phase_pixels)); inverse_info=AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->center*sizeof(*inverse_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL) || (inverse_info == (MemoryInfo *) NULL)) { if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (inverse_info != (MemoryInfo *) NULL) inverse_info=RelinquishVirtualMemory(inverse_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info); i=0L; magnitude_view=AcquireVirtualCacheView(magnitude_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(magnitude_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { magnitude_pixels[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { magnitude_pixels[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { magnitude_pixels[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { magnitude_pixels[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { magnitude_pixels[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { magnitude_pixels[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } magnitude_view=DestroyCacheView(magnitude_view); status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, magnitude_pixels,inverse_pixels); (void) CopyMagickMemory(magnitude_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*magnitude_pixels)); i=0L; phase_view=AcquireVirtualCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(phase_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { phase_pixels[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { phase_pixels[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { phase_pixels[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { phase_pixels[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { phase_pixels[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { phase_pixels[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]-=0.5; phase_pixels[i]*=(2.0*MagickPI); i++; } } phase_view=DestroyCacheView(phase_view); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (status != MagickFalse) status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, phase_pixels,inverse_pixels); (void) CopyMagickMemory(phase_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*phase_pixels)); inverse_info=RelinquishVirtualMemory(inverse_info); /* Merge two sets. */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I* magnitude_pixels[i]*sin(phase_pixels[i]); #else fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]); fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]); #endif i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i]; #else fourier_pixels[i][0]=magnitude_pixels[i]; fourier_pixels[i][1]=phase_pixels[i]; #endif i++; } magnitude_info=RelinquishVirtualMemory(magnitude_info); phase_info=RelinquishVirtualMemory(phase_info); return(status); } static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info, fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception) { CacheView *image_view; double *source_pixels; const char *value; fftw_plan fftw_c2r_plan; MemoryInfo *source_info; register IndexPacket *indexes; register PixelPacket *q; register ssize_t i, x; ssize_t y; source_info=AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); value=GetImageArtifact(image,"fourier:normalize"); if (LocaleCompare(value,"inverse") == 0) { double gamma; /* Normalize Fourier transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]*=gamma; #else fourier_pixels[i][0]*=gamma; fourier_pixels[i][1]*=gamma; #endif i++; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InverseFourierTransform) #endif { fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height, fourier_pixels,source_pixels,FFTW_ESTIMATE); fftw_execute(fftw_c2r_plan); fftw_destroy_plan(fftw_c2r_plan); } i=0L; image_view=AcquireAuthenticCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { if (y >= (ssize_t) image->rows) break; q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width > image->columns ? image->columns : fourier_info->width,1UL,exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { if (x < (ssize_t) image->columns) switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange*source_pixels[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange*source_pixels[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange*source_pixels[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange*source_pixels[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange* source_pixels[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange*source_pixels[i])); break; } } i++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } image_view=DestroyCacheView(image_view); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType InverseFourierTransformChannel( const Image *magnitude_image,const Image *phase_image, const ChannelType channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { fftw_complex *inverse_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *inverse_info; size_t extent; fourier_info.width=magnitude_image->columns; fourier_info.height=magnitude_image->rows; if ((magnitude_image->columns != magnitude_image->rows) || ((magnitude_image->columns % 2) != 0) || ((magnitude_image->rows % 2) != 0)) { extent=magnitude_image->columns < magnitude_image->rows ? magnitude_image->rows : magnitude_image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) floor((double) fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; inverse_info=AcquireVirtualMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*inverse_pixels)); if (inverse_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info); status=InverseFourier(&fourier_info,magnitude_image,phase_image, inverse_pixels,exception); if (status != MagickFalse) status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image, exception); inverse_info=RelinquishVirtualMemory(inverse_info); return(status); } #endif MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image, const Image *phase_image,const MagickBooleanType modulus, ExceptionInfo *exception) { Image *fourier_image; assert(magnitude_image != (Image *) NULL); assert(magnitude_image->signature == MagickSignature); if (magnitude_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", magnitude_image->filename); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",magnitude_image->filename); return((Image *) NULL); } #if !defined(MAGICKCORE_FFTW_DELEGATE) fourier_image=(Image *) NULL; (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", magnitude_image->filename); #else { fourier_image=CloneImage(magnitude_image,magnitude_image->columns, magnitude_image->rows,MagickTrue,exception); if (fourier_image != (Image *) NULL) { MagickBooleanType is_gray, status; status=MagickTrue; is_gray=IsGrayImage(magnitude_image,exception); if (is_gray != MagickFalse) is_gray=IsGrayImage(phase_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GrayChannels,modulus,fourier_image,exception); else thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,RedChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GreenChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BlueChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->matte != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,OpacityChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->colorspace == CMYKColorspace) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,IndexChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImage(fourier_image); } fftw_cleanup(); } #endif return(fourier_image); }
/* * Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/cache.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/fourier.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #if defined(MAGICKCORE_FFTW_DELEGATE) #if defined(MAGICKCORE_HAVE_COMPLEX_H) #include <complex.h> #endif #include <fftw3.h> #if !defined(MAGICKCORE_HAVE_CABS) #define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1])) #endif #if !defined(MAGICKCORE_HAVE_CARG) #define carg(z) (atan2(cimag(z),creal(z))) #endif #if !defined(MAGICKCORE_HAVE_CIMAG) #define cimag(z) (z[1]) #endif #if !defined(MAGICKCORE_HAVE_CREAL) #define creal(z) (z[0]) #endif #endif /* * Typedef declarations. */ typedef struct _FourierInfo { ChannelType channel; MagickBooleanType modulus; size_t width, height; ssize_t center; } FourierInfo; /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o m p l e x I m a g e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ComplexImages() performs complex mathematics on an image * sequence. % % The format of the ComplexImages method is: % % * MagickBooleanType ComplexImages(Image *images,const ComplexOperator op, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o op: A complex operator. % % o * exception: return any errors or warnings in this structure. % */ MagickExport Image * ComplexImages(const Image * images, const ComplexOperator op, ExceptionInfo * exception) { #define ComplexImageTag "Complex/Image" CacheView * Ai_view, *Ar_view, *Bi_view, *Br_view, *Ci_view, *Cr_view; const char *artifact; const Image * Ai_image, *Ar_image, *Bi_image, *Br_image; double snr; Image * Ci_image, *complex_images, *Cr_image, *image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if (images->next == (Image *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ImageError, "ImageSequenceRequired", "`%s'", images->filename); return ((Image *) NULL); } image = CloneImage(images, images->columns, images->rows, MagickTrue, exception); if (image == (Image *) NULL) return ((Image *) NULL); image->storage_class = DirectClass; image->depth = 32UL; complex_images = NewImageList(); AppendImageToList(&complex_images, image); image = CloneImage(images, images->columns, images->rows, MagickTrue, exception); if (image == (Image *) NULL) { complex_images = DestroyImageList(complex_images); return (complex_images); } AppendImageToList(&complex_images, image); complex_images->storage_class = DirectClass; complex_images->depth = 32UL; /* * Apply complex mathematics to image pixels. */ artifact = GetImageArtifact(image, "complex:snr"); snr = 0.0; if (artifact != (const char *)NULL) snr = StringToDouble(artifact, (char **)NULL); Ar_image = images; Ai_image = images->next; Br_image = images; Bi_image = images->next; if ((images->next->next != (Image *) NULL) && (images->next->next->next != (Image *) NULL)) { Br_image = images->next->next; Bi_image = images->next->next->next; } Cr_image = complex_images; Ci_image = complex_images->next; Ar_view = AcquireVirtualCacheView(Ar_image, exception); Ai_view = AcquireVirtualCacheView(Ai_image, exception); Br_view = AcquireVirtualCacheView(Br_image, exception); Bi_view = AcquireVirtualCacheView(Bi_image, exception); Cr_view = AcquireAuthenticCacheView(Cr_image, exception); Ci_view = AcquireAuthenticCacheView(Ci_image, exception); status = MagickTrue; progress = 0; for (y = 0; y < (ssize_t) images->rows; y++) { register const PixelPacket * magick_restrict Ai, *magick_restrict Ar, *magick_restrict Bi, *magick_restrict Br; register PixelPacket * magick_restrict Ci, *magick_restrict Cr; register ssize_t x; if (status == MagickFalse) continue; Ar = GetCacheViewVirtualPixels(Ar_view, 0, y, Ar_image->columns, 1, exception); Ai = GetCacheViewVirtualPixels(Ai_view, 0, y, Ai_image->columns, 1, exception); Br = GetCacheViewVirtualPixels(Br_view, 0, y, Br_image->columns, 1, exception); Bi = GetCacheViewVirtualPixels(Bi_view, 0, y, Bi_image->columns, 1, exception); Cr = QueueCacheViewAuthenticPixels(Cr_view, 0, y, Cr_image->columns, 1, exception); Ci = QueueCacheViewAuthenticPixels(Ci_view, 0, y, Ci_image->columns, 1, exception); if ((Ar == (const PixelPacket *)NULL) || (Ai == (const PixelPacket *)NULL) || (Br == (const PixelPacket *)NULL) || (Bi == (const PixelPacket *)NULL) || (Cr == (PixelPacket *) NULL) || (Ci == (PixelPacket *) NULL)) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) images->columns; x++) { switch (op) { case AddComplexOperator: { Cr->red = Ar->red + Br->red; Ci->red = Ai->red + Bi->red; Cr->green = Ar->green + Br->green; Ci->green = Ai->green + Bi->green; Cr->blue = Ar->blue + Br->blue; Ci->blue = Ai->blue + Bi->blue; if (images->matte != MagickFalse) { Cr->opacity = Ar->opacity + Br->opacity; Ci->opacity = Ai->opacity + Bi->opacity; } break; } case ConjugateComplexOperator: default: { Cr->red = Ar->red; Ci->red = (-Bi->red); Cr->green = Ar->green; Ci->green = (-Bi->green); Cr->blue = Ar->blue; Ci->blue = (-Bi->blue); if (images->matte != MagickFalse) { Cr->opacity = Ar->opacity; Ci->opacity = (-Bi->opacity); } break; } case DivideComplexOperator: { double gamma; gamma = PerceptibleReciprocal(Br->red * Br->red + Bi->red * Bi->red + snr); Cr->red = gamma * (Ar->red * Br->red + Ai->red * Bi->red); Ci->red = gamma * (Ai->red * Br->red - Ar->red * Bi->red); gamma = PerceptibleReciprocal(Br->green * Br->green + Bi->green * Bi->green + snr); Cr->green = gamma * (Ar->green * Br->green + Ai->green * Bi->green); Ci->green = gamma * (Ai->green * Br->green - Ar->green * Bi->green); gamma = PerceptibleReciprocal(Br->blue * Br->blue + Bi->blue * Bi->blue + snr); Cr->blue = gamma * (Ar->blue * Br->blue + Ai->blue * Bi->blue); Ci->blue = gamma * (Ai->blue * Br->blue - Ar->blue * Bi->blue); if (images->matte != MagickFalse) { gamma = PerceptibleReciprocal(Br->opacity * Br->opacity + Bi->opacity * Bi->opacity + snr); Cr->opacity = gamma * (Ar->opacity * Br->opacity + Ai->opacity * Bi->opacity); Ci->opacity = gamma * (Ai->opacity * Br->opacity - Ar->opacity * Bi->opacity); } break; } case MagnitudePhaseComplexOperator: { Cr->red = sqrt(Ar->red * Ar->red + Ai->red * Ai->red); Ci->red = atan2(Ai->red, Ar->red) / (2.0 * MagickPI) + 0.5; Cr->green = sqrt(Ar->green * Ar->green + Ai->green * Ai->green); Ci->green = atan2(Ai->green, Ar->green) / (2.0 * MagickPI) + 0.5; Cr->blue = sqrt(Ar->blue * Ar->blue + Ai->blue * Ai->blue); Ci->blue = atan2(Ai->blue, Ar->blue) / (2.0 * MagickPI) + 0.5; if (images->matte != MagickFalse) { Cr->opacity = sqrt(Ar->opacity * Ar->opacity + Ai->opacity * Ai->opacity); Ci->opacity = atan2(Ai->opacity, Ar->opacity) / (2.0 * MagickPI) + 0.5; } break; } case MultiplyComplexOperator: { Cr->red = QuantumScale * (Ar->red * Br->red - Ai->red * Bi->red); Ci->red = QuantumScale * (Ai->red * Br->red + Ar->red * Bi->red); Cr->green = QuantumScale * (Ar->green * Br->green - Ai->green * Bi->green); Ci->green = QuantumScale * (Ai->green * Br->green + Ar->green * Bi->green); Cr->blue = QuantumScale * (Ar->blue * Br->blue - Ai->blue * Bi->blue); Ci->blue = QuantumScale * (Ai->blue * Br->blue + Ar->blue * Bi->blue); if (images->matte != MagickFalse) { Cr->opacity = QuantumScale * (Ar->opacity * Br->opacity - Ai->opacity * Bi->opacity); Ci->opacity = QuantumScale * (Ai->opacity * Br->opacity + Ar->opacity * Bi->opacity); } break; } case RealImaginaryComplexOperator: { Cr->red = Ar->red * cos(2.0 * MagickPI * (Ai->red - 0.5)); Ci->red = Ar->red * sin(2.0 * MagickPI * (Ai->red - 0.5)); Cr->green = Ar->green * cos(2.0 * MagickPI * (Ai->green - 0.5)); Ci->green = Ar->green * sin(2.0 * MagickPI * (Ai->green - 0.5)); Cr->blue = Ar->blue * cos(2.0 * MagickPI * (Ai->blue - 0.5)); Ci->blue = Ar->blue * sin(2.0 * MagickPI * (Ai->blue - 0.5)); if (images->matte != MagickFalse) { Cr->opacity = Ar->opacity * cos(2.0 * MagickPI * (Ai->opacity - 0.5)); Ci->opacity = Ar->opacity * sin(2.0 * MagickPI * (Ai->opacity - 0.5)); } break; } case SubtractComplexOperator: { Cr->red = Ar->red - Br->red; Ci->red = Ai->red - Bi->red; Cr->green = Ar->green - Br->green; Ci->green = Ai->green - Bi->green; Cr->blue = Ar->blue - Br->blue; Ci->blue = Ai->blue - Bi->blue; if (images->matte != MagickFalse) { Cr->opacity = Ar->opacity - Br->opacity; Ci->opacity = Ai->opacity - Bi->opacity; } break; } } Ar++; Ai++; Br++; Bi++; Cr++; Ci++; } if (SyncCacheViewAuthenticPixels(Ci_view, exception) == MagickFalse) status = MagickFalse; if (SyncCacheViewAuthenticPixels(Cr_view, exception) == MagickFalse) status = MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(images, ComplexImageTag, progress++, images->rows); if (proceed == MagickFalse) status = MagickFalse; } } Cr_view = DestroyCacheView(Cr_view); Ci_view = DestroyCacheView(Ci_view); Br_view = DestroyCacheView(Br_view); Bi_view = DestroyCacheView(Bi_view); Ar_view = DestroyCacheView(Ar_view); Ai_view = DestroyCacheView(Ai_view); if (status == MagickFalse) complex_images = DestroyImageList(complex_images); return (complex_images); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % F o r w a r d F o u r i e r T r a n s f o r m I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ForwardFourierTransformImage() implements the discrete Fourier * transform % (DFT) of the image either as a magnitude / phase or real / * imaginary image % pair. % % The format of the * ForwadFourierTransformImage method is: % % Image * *ForwardFourierTransformImage(const Image *image, % const * MagickBooleanType modulus,ExceptionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o modulus: if * true, return as transform as a magnitude / phase pair % otherwise a * real / imaginary image pair. % % o exception: return any errors or * warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType RollFourier(const size_t width, const size_t height, const ssize_t x_offset, const ssize_t y_offset, double *roll_pixels) { double *source_pixels; MemoryInfo * source_info; register ssize_t i, x; ssize_t u, v, y; /* * Move zero frequency (DC, average color) from (0,0) to * (width/2,height/2). */ source_info = AcquireVirtualMemory(height, width * sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) return (MagickFalse); source_pixels = (double *)GetVirtualMemoryBlob(source_info); i = 0L; for (y = 0L; y < (ssize_t) height; y++) { if (y_offset < 0L) v = ((y + y_offset) < 0L) ? y + y_offset + (ssize_t) height : y + y_offset; else v = ((y + y_offset) > ((ssize_t) height - 1L)) ? y + y_offset - (ssize_t) height : y + y_offset; for (x = 0L; x < (ssize_t) width; x++) { if (x_offset < 0L) u = ((x + x_offset) < 0L) ? x + x_offset + (ssize_t) width : x + x_offset; else u = ((x + x_offset) > ((ssize_t) width - 1L)) ? x + x_offset - (ssize_t) width : x + x_offset; source_pixels[v * width + u] = roll_pixels[i++]; } } (void)CopyMagickMemory(roll_pixels, source_pixels, height * width * sizeof(*source_pixels)); source_info = RelinquishVirtualMemory(source_info); return (MagickTrue); } static MagickBooleanType ForwardQuadrantSwap(const size_t width, const size_t height, double *source_pixels, double *forward_pixels) { MagickBooleanType status; register ssize_t x; ssize_t center, y; /* * Swap quadrants. */ center = (ssize_t) floor((double)width / 2L) + 1L; status = RollFourier((size_t) center, height, 0L, (ssize_t) height / 2L, source_pixels); if (status == MagickFalse) return (MagickFalse); for (y = 0L; y < (ssize_t) height; y++) for (x = 0L; x < (ssize_t) (width / 2L - 1L); x++) forward_pixels[y * width + x + width / 2L] = source_pixels[y * center + x]; for (y = 1; y < (ssize_t) height; y++) for (x = 0L; x < (ssize_t) (width / 2L - 1L); x++) forward_pixels[(height - y) * width + width / 2L - x - 1L] = source_pixels[y * center + x + 1L]; for (x = 0L; x < (ssize_t) (width / 2L); x++) forward_pixels[-x + width / 2L - 1L] = forward_pixels[x + width / 2L + 1L]; return (MagickTrue); } static void CorrectPhaseLHS(const size_t width, const size_t height, double *fourier_pixels) { register ssize_t x; ssize_t y; for (y = 0L; y < (ssize_t) height; y++) for (x = 0L; x < (ssize_t) (width / 2L); x++) fourier_pixels[y * width + x] *= (-1.0); } static MagickBooleanType ForwardFourier(const FourierInfo * fourier_info, Image * image, double *magnitude, double *phase, ExceptionInfo * exception) { CacheView * magnitude_view, *phase_view; double *magnitude_pixels, *phase_pixels; Image * magnitude_image, *phase_image; MagickBooleanType status; MemoryInfo * magnitude_info, *phase_info; register IndexPacket * indexes; register PixelPacket * q; register ssize_t x; ssize_t i, y; magnitude_image = GetFirstImageInList(image); phase_image = GetNextImageInList(image); if (phase_image == (Image *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ImageError, "ImageSequenceRequired", "`%s'", image->filename); return (MagickFalse); } /* * Create "Fourier Transform" image from constituent arrays. */ magnitude_info = AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->width * sizeof(*magnitude_pixels)); phase_info = AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->width * sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info = RelinquishVirtualMemory(phase_info); if (magnitude_info != (MemoryInfo *) NULL) magnitude_info = RelinquishVirtualMemory(magnitude_info); (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); return (MagickFalse); } magnitude_pixels = (double *)GetVirtualMemoryBlob(magnitude_info); (void)ResetMagickMemory(magnitude_pixels, 0, fourier_info->height * fourier_info->width * sizeof(*magnitude_pixels)); phase_pixels = (double *)GetVirtualMemoryBlob(phase_info); (void)ResetMagickMemory(phase_pixels, 0, fourier_info->height * fourier_info->width * sizeof(*phase_pixels)); status = ForwardQuadrantSwap(fourier_info->width, fourier_info->height, magnitude, magnitude_pixels); if (status != MagickFalse) status = ForwardQuadrantSwap(fourier_info->width, fourier_info->height, phase, phase_pixels); CorrectPhaseLHS(fourier_info->width, fourier_info->height, phase_pixels); if (fourier_info->modulus != MagickFalse) { i = 0L; for (y = 0L; y < (ssize_t) fourier_info->height; y++) for (x = 0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i] /= (2.0 * MagickPI); phase_pixels[i] += 0.5; i++; } } magnitude_view = AcquireAuthenticCacheView(magnitude_image, exception); i = 0L; for (y = 0L; y < (ssize_t) fourier_info->height; y++) { q = GetCacheViewAuthenticPixels(magnitude_view, 0L, y, fourier_info->height, 1UL, exception); if (q == (PixelPacket *) NULL) break; indexes = GetCacheViewAuthenticIndexQueue(magnitude_view); for (x = 0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q, ClampToQuantum(QuantumRange * magnitude_pixels[i])); break; } case GreenChannel: { SetPixelGreen(q, ClampToQuantum(QuantumRange * magnitude_pixels[i])); break; } case BlueChannel: { SetPixelBlue(q, ClampToQuantum(QuantumRange * magnitude_pixels[i])); break; } case OpacityChannel: { SetPixelOpacity(q, ClampToQuantum(QuantumRange * magnitude_pixels[i])); break; } case IndexChannel: { SetPixelIndex(indexes + x, ClampToQuantum(QuantumRange * magnitude_pixels[i])); break; } case GrayChannels: { SetPixelGray(q, ClampToQuantum(QuantumRange * magnitude_pixels[i])); break; } } i++; q++; } status = SyncCacheViewAuthenticPixels(magnitude_view, exception); if (status == MagickFalse) break; } magnitude_view = DestroyCacheView(magnitude_view); i = 0L; phase_view = AcquireAuthenticCacheView(phase_image, exception); for (y = 0L; y < (ssize_t) fourier_info->height; y++) { q = GetCacheViewAuthenticPixels(phase_view, 0L, y, fourier_info->height, 1UL, exception); if (q == (PixelPacket *) NULL) break; indexes = GetCacheViewAuthenticIndexQueue(phase_view); for (x = 0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q, ClampToQuantum(QuantumRange * phase_pixels[i])); break; } case GreenChannel: { SetPixelGreen(q, ClampToQuantum(QuantumRange * phase_pixels[i])); break; } case BlueChannel: { SetPixelBlue(q, ClampToQuantum(QuantumRange * phase_pixels[i])); break; } case OpacityChannel: { SetPixelOpacity(q, ClampToQuantum(QuantumRange * phase_pixels[i])); break; } case IndexChannel: { SetPixelIndex(indexes + x, ClampToQuantum(QuantumRange * phase_pixels[i])); break; } case GrayChannels: { SetPixelGray(q, ClampToQuantum(QuantumRange * phase_pixels[i])); break; } } i++; q++; } status = SyncCacheViewAuthenticPixels(phase_view, exception); if (status == MagickFalse) break; } phase_view = DestroyCacheView(phase_view); phase_info = RelinquishVirtualMemory(phase_info); magnitude_info = RelinquishVirtualMemory(magnitude_info); return (status); } static MagickBooleanType ForwardFourierTransform(FourierInfo * fourier_info, const Image * image, double *magnitude_pixels, double *phase_pixels, ExceptionInfo * exception) { CacheView * image_view; const char *value; double *source_pixels; fftw_complex * forward_pixels; fftw_plan fftw_r2c_plan; MemoryInfo * forward_info, *source_info; register const IndexPacket * indexes; register const PixelPacket * p; register ssize_t i, x; ssize_t y; /* * Generate the forward Fourier transform. */ source_info = AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->width * sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); return (MagickFalse); } source_pixels = (double *)GetVirtualMemoryBlob(source_info); ResetMagickMemory(source_pixels, 0, fourier_info->height * fourier_info->width * sizeof(*source_pixels)); i = 0L; image_view = AcquireVirtualCacheView(image, exception); for (y = 0L; y < (ssize_t) fourier_info->height; y++) { p = GetCacheViewVirtualPixels(image_view, 0L, y, fourier_info->width, 1UL, exception); if (p == (const PixelPacket *)NULL) break; indexes = GetCacheViewVirtualIndexQueue(image_view); for (x = 0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { source_pixels[i] = QuantumScale * GetPixelRed(p); break; } case GreenChannel: { source_pixels[i] = QuantumScale * GetPixelGreen(p); break; } case BlueChannel: { source_pixels[i] = QuantumScale * GetPixelBlue(p); break; } case OpacityChannel: { source_pixels[i] = QuantumScale * GetPixelOpacity(p); break; } case IndexChannel: { source_pixels[i] = QuantumScale * GetPixelIndex(indexes + x); break; } case GrayChannels: { source_pixels[i] = QuantumScale * GetPixelGray(p); break; } } i++; p++; } } image_view = DestroyCacheView(image_view); forward_info = AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->center * sizeof(*forward_pixels)); if (forward_info == (MemoryInfo *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); source_info = (MemoryInfo *) RelinquishVirtualMemory(source_info); return (MagickFalse); } forward_pixels = (fftw_complex *) GetVirtualMemoryBlob(forward_info); fftw_r2c_plan = fftw_plan_dft_r2c_2d(fourier_info->width, fourier_info->height, source_pixels, forward_pixels, FFTW_ESTIMATE); fftw_execute(fftw_r2c_plan); fftw_destroy_plan(fftw_r2c_plan); source_info = (MemoryInfo *) RelinquishVirtualMemory(source_info); value = GetImageArtifact(image, "fourier:normalize"); if ((value == (const char *)NULL) || (LocaleCompare(value, "forward") == 0)) { double gamma; /* * Normalize inverse transform. */ i = 0L; gamma = PerceptibleReciprocal((double)fourier_info->width * fourier_info->height); for (y = 0L; y < (ssize_t) fourier_info->height; y++) for (x = 0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) forward_pixels[i] *= gamma; #else forward_pixels[i][0] *= gamma; forward_pixels[i][1] *= gamma; #endif i++; } } /* * Generate magnitude and phase (or real and imaginary). */ i = 0L; if (fourier_info->modulus != MagickFalse) for (y = 0L; y < (ssize_t) fourier_info->height; y++) for (x = 0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i] = cabs(forward_pixels[i]); phase_pixels[i] = carg(forward_pixels[i]); i++; } else for (y = 0L; y < (ssize_t) fourier_info->height; y++) for (x = 0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i] = creal(forward_pixels[i]); phase_pixels[i] = cimag(forward_pixels[i]); i++; } forward_info = (MemoryInfo *) RelinquishVirtualMemory(forward_info); return (MagickTrue); } static MagickBooleanType ForwardFourierTransformChannel(const Image * image, const ChannelType channel, const MagickBooleanType modulus, Image * fourier_image, ExceptionInfo * exception) { double *magnitude_pixels, *phase_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo * magnitude_info, *phase_info; size_t extent; fourier_info.width = image->columns; fourier_info.height = image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { extent = image->columns < image->rows ? image->rows : image->columns; fourier_info.width = (extent & 0x01) == 1 ? extent + 1UL : extent; } fourier_info.height = fourier_info.width; fourier_info.center = (ssize_t) floor((double)fourier_info.width / 2L) + 1L; fourier_info.channel = channel; fourier_info.modulus = modulus; magnitude_info = AcquireVirtualMemory((size_t) fourier_info.height, fourier_info.center * sizeof(*magnitude_pixels)); phase_info = AcquireVirtualMemory((size_t) fourier_info.height, fourier_info.center * sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info = RelinquishVirtualMemory(phase_info); if (magnitude_info == (MemoryInfo *) NULL) magnitude_info = RelinquishVirtualMemory(magnitude_info); (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); return (MagickFalse); } magnitude_pixels = (double *)GetVirtualMemoryBlob(magnitude_info); phase_pixels = (double *)GetVirtualMemoryBlob(phase_info); status = ForwardFourierTransform(&fourier_info, image, magnitude_pixels, phase_pixels, exception); if (status != MagickFalse) status = ForwardFourier(&fourier_info, fourier_image, magnitude_pixels, phase_pixels, exception); phase_info = RelinquishVirtualMemory(phase_info); magnitude_info = RelinquishVirtualMemory(magnitude_info); return (status); } #endif MagickExport Image * ForwardFourierTransformImage(const Image * image, const MagickBooleanType modulus, ExceptionInfo * exception) { Image * fourier_image; fourier_image = NewImageList(); #if !defined(MAGICKCORE_FFTW_DELEGATE) (void)modulus; (void)ThrowMagickException(exception, GetMagickModule(), MissingDelegateWarning, "DelegateLibrarySupportNotBuiltIn", "`%s' (FFTW)", image->filename); #else { Image * magnitude_image; size_t extent, height, width; width = image->columns; height = image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { extent = image->columns < image->rows ? image->rows : image->columns; width = (extent & 0x01) == 1 ? extent + 1UL : extent; } height = width; magnitude_image = CloneImage(image, width, height, MagickTrue, exception); if (magnitude_image != (Image *) NULL) { Image * phase_image; magnitude_image->storage_class = DirectClass; magnitude_image->depth = 32UL; phase_image = CloneImage(image, width, height, MagickTrue, exception); if (phase_image == (Image *) NULL) magnitude_image = DestroyImage(magnitude_image); else { MagickBooleanType is_gray, status; phase_image->storage_class = DirectClass; phase_image->depth = 32UL; AppendImageToList(&fourier_image, magnitude_image); AppendImageToList(&fourier_image, phase_image); status = MagickTrue; is_gray = IsGrayImage(image, exception); { { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status = ForwardFourierTransformChannel(image, GrayChannels, modulus, fourier_image, exception); else thread_status = ForwardFourierTransformChannel(image, RedChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } { MagickBooleanType thread_status; thread_status = MagickTrue; if (is_gray == MagickFalse) thread_status = ForwardFourierTransformChannel(image, GreenChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } { MagickBooleanType thread_status; thread_status = MagickTrue; if (is_gray == MagickFalse) thread_status = ForwardFourierTransformChannel(image, BlueChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } { MagickBooleanType thread_status; thread_status = MagickTrue; if (image->matte != MagickFalse) thread_status = ForwardFourierTransformChannel(image, OpacityChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } { MagickBooleanType thread_status; thread_status = MagickTrue; if (image->colorspace == CMYKColorspace) thread_status = ForwardFourierTransformChannel(image, IndexChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } } if (status == MagickFalse) fourier_image = DestroyImageList(fourier_image); fftw_cleanup(); } } } #endif return (fourier_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I n v e r s e F o u r i e r T r a n s f o r m I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % InverseFourierTransformImage() implements the inverse discrete * Fourier % transform (DFT) of the image either as a magnitude / phase or * real / % imaginary image pair. % % The format of the * InverseFourierTransformImage method is: % % Image * *InverseFourierTransformImage(const Image *magnitude_image, % const * Image *phase_image,const MagickBooleanType modulus, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o * magnitude_image: the magnitude or real image. % % o phase_image: the * phase or imaginary image. % % o modulus: if true, return transform as a * magnitude / phase pair % otherwise a real / imaginary image pair. % % * o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType InverseQuadrantSwap(const size_t width, const size_t height, const double *source, double *destination) { register ssize_t x; ssize_t center, y; /* * Swap quadrants. */ center = (ssize_t) floor((double)width / 2L) + 1L; for (y = 1L; y < (ssize_t) height; y++) for (x = 0L; x < (ssize_t) (width / 2L + 1L); x++) destination[(height - y) * center - x + width / 2L] = source[y * width + x]; for (y = 0L; y < (ssize_t) height; y++) destination[y * center] = source[y * width + width / 2L]; for (x = 0L; x < center; x++) destination[x] = source[center - x - 1L]; return (RollFourier(center, height, 0L, (ssize_t) height / -2L, destination)); } static MagickBooleanType InverseFourier(FourierInfo * fourier_info, const Image * magnitude_image, const Image * phase_image, fftw_complex * fourier_pixels, ExceptionInfo * exception) { CacheView * magnitude_view, *phase_view; double *inverse_pixels, *magnitude_pixels, *phase_pixels; MagickBooleanType status; MemoryInfo * inverse_info, *magnitude_info, *phase_info; register const IndexPacket * indexes; register const PixelPacket * p; register ssize_t i, x; ssize_t y; /* * Inverse fourier - read image and break down into a double array. */ magnitude_info = AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->width * sizeof(*magnitude_pixels)); phase_info = AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->width * sizeof(*phase_pixels)); inverse_info = AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->center * sizeof(*inverse_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL) || (inverse_info == (MemoryInfo *) NULL)) { if (magnitude_info != (MemoryInfo *) NULL) magnitude_info = RelinquishVirtualMemory(magnitude_info); if (phase_info != (MemoryInfo *) NULL) phase_info = RelinquishVirtualMemory(phase_info); if (inverse_info != (MemoryInfo *) NULL) inverse_info = RelinquishVirtualMemory(inverse_info); (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", magnitude_image->filename); return (MagickFalse); } magnitude_pixels = (double *)GetVirtualMemoryBlob(magnitude_info); phase_pixels = (double *)GetVirtualMemoryBlob(phase_info); inverse_pixels = (double *)GetVirtualMemoryBlob(inverse_info); i = 0L; magnitude_view = AcquireVirtualCacheView(magnitude_image, exception); for (y = 0L; y < (ssize_t) fourier_info->height; y++) { p = GetCacheViewVirtualPixels(magnitude_view, 0L, y, fourier_info->width, 1UL, exception); if (p == (const PixelPacket *)NULL) break; indexes = GetCacheViewAuthenticIndexQueue(magnitude_view); for (x = 0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { magnitude_pixels[i] = QuantumScale * GetPixelRed(p); break; } case GreenChannel: { magnitude_pixels[i] = QuantumScale * GetPixelGreen(p); break; } case BlueChannel: { magnitude_pixels[i] = QuantumScale * GetPixelBlue(p); break; } case OpacityChannel: { magnitude_pixels[i] = QuantumScale * GetPixelOpacity(p); break; } case IndexChannel: { magnitude_pixels[i] = QuantumScale * GetPixelIndex(indexes + x); break; } case GrayChannels: { magnitude_pixels[i] = QuantumScale * GetPixelGray(p); break; } } i++; p++; } } magnitude_view = DestroyCacheView(magnitude_view); status = InverseQuadrantSwap(fourier_info->width, fourier_info->height, magnitude_pixels, inverse_pixels); (void)CopyMagickMemory(magnitude_pixels, inverse_pixels, fourier_info->height * fourier_info->center * sizeof(*magnitude_pixels)); i = 0L; phase_view = AcquireVirtualCacheView(phase_image, exception); for (y = 0L; y < (ssize_t) fourier_info->height; y++) { p = GetCacheViewVirtualPixels(phase_view, 0, y, fourier_info->width, 1, exception); if (p == (const PixelPacket *)NULL) break; indexes = GetCacheViewAuthenticIndexQueue(phase_view); for (x = 0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { phase_pixels[i] = QuantumScale * GetPixelRed(p); break; } case GreenChannel: { phase_pixels[i] = QuantumScale * GetPixelGreen(p); break; } case BlueChannel: { phase_pixels[i] = QuantumScale * GetPixelBlue(p); break; } case OpacityChannel: { phase_pixels[i] = QuantumScale * GetPixelOpacity(p); break; } case IndexChannel: { phase_pixels[i] = QuantumScale * GetPixelIndex(indexes + x); break; } case GrayChannels: { phase_pixels[i] = QuantumScale * GetPixelGray(p); break; } } i++; p++; } } if (fourier_info->modulus != MagickFalse) { i = 0L; for (y = 0L; y < (ssize_t) fourier_info->height; y++) for (x = 0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i] -= 0.5; phase_pixels[i] *= (2.0 * MagickPI); i++; } } phase_view = DestroyCacheView(phase_view); CorrectPhaseLHS(fourier_info->width, fourier_info->height, phase_pixels); if (status != MagickFalse) status = InverseQuadrantSwap(fourier_info->width, fourier_info->height, phase_pixels, inverse_pixels); (void)CopyMagickMemory(phase_pixels, inverse_pixels, fourier_info->height * fourier_info->center * sizeof(*phase_pixels)); inverse_info = RelinquishVirtualMemory(inverse_info); /* * Merge two sets. */ i = 0L; if (fourier_info->modulus != MagickFalse) for (y = 0L; y < (ssize_t) fourier_info->height; y++) for (x = 0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i] = magnitude_pixels[i] * cos(phase_pixels[i]) + I * magnitude_pixels[i] * sin(phase_pixels[i]); #else fourier_pixels[i][0] = magnitude_pixels[i] * cos(phase_pixels[i]); fourier_pixels[i][1] = magnitude_pixels[i] * sin(phase_pixels[i]); #endif i++; } else for (y = 0L; y < (ssize_t) fourier_info->height; y++) for (x = 0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i] = magnitude_pixels[i] + I * phase_pixels[i]; #else fourier_pixels[i][0] = magnitude_pixels[i]; fourier_pixels[i][1] = phase_pixels[i]; #endif i++; } magnitude_info = RelinquishVirtualMemory(magnitude_info); phase_info = RelinquishVirtualMemory(phase_info); return (status); } static MagickBooleanType InverseFourierTransform(FourierInfo * fourier_info, fftw_complex * fourier_pixels, Image * image, ExceptionInfo * exception) { CacheView * image_view; double *source_pixels; const char *value; fftw_plan fftw_c2r_plan; MemoryInfo * source_info; register IndexPacket * indexes; register PixelPacket * q; register ssize_t i, x; ssize_t y; source_info = AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->width * sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); return (MagickFalse); } source_pixels = (double *)GetVirtualMemoryBlob(source_info); value = GetImageArtifact(image, "fourier:normalize"); if (LocaleCompare(value, "inverse") == 0) { double gamma; /* * Normalize Fourier transform. */ i = 0L; gamma = PerceptibleReciprocal((double)fourier_info->width * fourier_info->height); for (y = 0L; y < (ssize_t) fourier_info->height; y++) for (x = 0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i] *= gamma; #else fourier_pixels[i][0] *= gamma; fourier_pixels[i][1] *= gamma; #endif i++; } } { fftw_c2r_plan = fftw_plan_dft_c2r_2d(fourier_info->width, fourier_info->height, fourier_pixels, source_pixels, FFTW_ESTIMATE); fftw_execute(fftw_c2r_plan); fftw_destroy_plan(fftw_c2r_plan); } i = 0L; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0L; y < (ssize_t) fourier_info->height; y++) { if (y >= (ssize_t) image->rows) break; q = GetCacheViewAuthenticPixels(image_view, 0L, y, fourier_info->width > image->columns ? image->columns : fourier_info->width, 1UL, exception); if (q == (PixelPacket *) NULL) break; indexes = GetCacheViewAuthenticIndexQueue(image_view); for (x = 0L; x < (ssize_t) fourier_info->width; x++) { if (x < (ssize_t) image->columns) switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q, ClampToQuantum(QuantumRange * source_pixels[i])); break; } case GreenChannel: { SetPixelGreen(q, ClampToQuantum(QuantumRange * source_pixels[i])); break; } case BlueChannel: { SetPixelBlue(q, ClampToQuantum(QuantumRange * source_pixels[i])); break; } case OpacityChannel: { SetPixelOpacity(q, ClampToQuantum(QuantumRange * source_pixels[i])); break; } case IndexChannel: { SetPixelIndex(indexes + x, ClampToQuantum(QuantumRange * source_pixels[i])); break; } case GrayChannels: { SetPixelGray(q, ClampToQuantum(QuantumRange * source_pixels[i])); break; } } i++; q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) break; } image_view = DestroyCacheView(image_view); source_info = RelinquishVirtualMemory(source_info); return (MagickTrue); } static MagickBooleanType InverseFourierTransformChannel( const Image * magnitude_image, const Image * phase_image, const ChannelType channel, const MagickBooleanType modulus, Image * fourier_image, ExceptionInfo * exception) { fftw_complex * inverse_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo * inverse_info; size_t extent; fourier_info.width = magnitude_image->columns; fourier_info.height = magnitude_image->rows; if ((magnitude_image->columns != magnitude_image->rows) || ((magnitude_image->columns % 2) != 0) || ((magnitude_image->rows % 2) != 0)) { extent = magnitude_image->columns < magnitude_image->rows ? magnitude_image->rows : magnitude_image->columns; fourier_info.width = (extent & 0x01) == 1 ? extent + 1UL : extent; } fourier_info.height = fourier_info.width; fourier_info.center = (ssize_t) floor((double)fourier_info.width / 2L) + 1L; fourier_info.channel = channel; fourier_info.modulus = modulus; inverse_info = AcquireVirtualMemory((size_t) fourier_info.height, fourier_info.center * sizeof(*inverse_pixels)); if (inverse_info == (MemoryInfo *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", magnitude_image->filename); return (MagickFalse); } inverse_pixels = (fftw_complex *) GetVirtualMemoryBlob(inverse_info); status = InverseFourier(&fourier_info, magnitude_image, phase_image, inverse_pixels, exception); if (status != MagickFalse) status = InverseFourierTransform(&fourier_info, inverse_pixels, fourier_image, exception); inverse_info = RelinquishVirtualMemory(inverse_info); return (status); } #endif MagickExport Image * InverseFourierTransformImage(const Image * magnitude_image, const Image * phase_image, const MagickBooleanType modulus, ExceptionInfo * exception) { Image * fourier_image; assert(magnitude_image != (Image *) NULL); assert(magnitude_image->signature == MagickSignature); if (magnitude_image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", magnitude_image->filename); if (phase_image == (Image *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ImageError, "ImageSequenceRequired", "`%s'", magnitude_image->filename); return ((Image *) NULL); } #if !defined(MAGICKCORE_FFTW_DELEGATE) fourier_image = (Image *) NULL; (void)modulus; (void)ThrowMagickException(exception, GetMagickModule(), MissingDelegateWarning, "DelegateLibrarySupportNotBuiltIn", "`%s' (FFTW)", magnitude_image->filename); #else { fourier_image = CloneImage(magnitude_image, magnitude_image->columns, magnitude_image->rows, MagickTrue, exception); if (fourier_image != (Image *) NULL) { MagickBooleanType is_gray, status; status = MagickTrue; is_gray = IsGrayImage(magnitude_image, exception); if (is_gray != MagickFalse) is_gray = IsGrayImage(phase_image, exception); { { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status = InverseFourierTransformChannel(magnitude_image, phase_image, GrayChannels, modulus, fourier_image, exception); else thread_status = InverseFourierTransformChannel(magnitude_image, phase_image, RedChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } { MagickBooleanType thread_status; thread_status = MagickTrue; if (is_gray == MagickFalse) thread_status = InverseFourierTransformChannel(magnitude_image, phase_image, GreenChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } { MagickBooleanType thread_status; thread_status = MagickTrue; if (is_gray == MagickFalse) thread_status = InverseFourierTransformChannel(magnitude_image, phase_image, BlueChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } { MagickBooleanType thread_status; thread_status = MagickTrue; if (magnitude_image->matte != MagickFalse) thread_status = InverseFourierTransformChannel(magnitude_image, phase_image, OpacityChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } { MagickBooleanType thread_status; thread_status = MagickTrue; if (magnitude_image->colorspace == CMYKColorspace) thread_status = InverseFourierTransformChannel(magnitude_image, phase_image, IndexChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } } if (status == MagickFalse) fourier_image = DestroyImage(fourier_image); } fftw_cleanup(); } #endif return (fourier_image); }
/* * Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/cache.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/fourier.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #if defined(MAGICKCORE_FFTW_DELEGATE) #if defined(MAGICKCORE_HAVE_COMPLEX_H) #include <complex.h> #endif #include <fftw3.h> #if !defined(MAGICKCORE_HAVE_CABS) #define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1])) #endif #if !defined(MAGICKCORE_HAVE_CARG) #define carg(z) (atan2(cimag(z),creal(z))) #endif #if !defined(MAGICKCORE_HAVE_CIMAG) #define cimag(z) (z[1]) #endif #if !defined(MAGICKCORE_HAVE_CREAL) #define creal(z) (z[0]) #endif #endif /* * Typedef declarations. */ typedef struct _FourierInfo { ChannelType channel; MagickBooleanType modulus; size_t width, height; ssize_t center; } FourierInfo; /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o m p l e x I m a g e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ComplexImages() performs complex mathematics on an image * sequence. % % The format of the ComplexImages method is: % % * MagickBooleanType ComplexImages(Image *images,const ComplexOperator op, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o op: A complex operator. % % o * exception: return any errors or warnings in this structure. % */ MagickExport Image * ComplexImages(const Image * images, const ComplexOperator op, ExceptionInfo * exception) { #define ComplexImageTag "Complex/Image" CacheView * Ai_view, *Ar_view, *Bi_view, *Br_view, *Ci_view, *Cr_view; const char *artifact; const Image * Ai_image, *Ar_image, *Bi_image, *Br_image; double snr; Image * Ci_image, *complex_images, *Cr_image, *image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if (images->next == (Image *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ImageError, "ImageSequenceRequired", "`%s'", images->filename); return ((Image *) NULL); } image = CloneImage(images, images->columns, images->rows, MagickTrue, exception); if (image == (Image *) NULL) return ((Image *) NULL); image->storage_class = DirectClass; image->depth = 32UL; complex_images = NewImageList(); AppendImageToList(&complex_images, image); image = CloneImage(images, images->columns, images->rows, MagickTrue, exception); if (image == (Image *) NULL) { complex_images = DestroyImageList(complex_images); return (complex_images); } AppendImageToList(&complex_images, image); complex_images->storage_class = DirectClass; complex_images->depth = 32UL; /* * Apply complex mathematics to image pixels. */ artifact = GetImageArtifact(image, "complex:snr"); snr = 0.0; if (artifact != (const char *)NULL) snr = StringToDouble(artifact, (char **)NULL); Ar_image = images; Ai_image = images->next; Br_image = images; Bi_image = images->next; if ((images->next->next != (Image *) NULL) && (images->next->next->next != (Image *) NULL)) { Br_image = images->next->next; Bi_image = images->next->next->next; } Cr_image = complex_images; Ci_image = complex_images->next; Ar_view = AcquireVirtualCacheView(Ar_image, exception); Ai_view = AcquireVirtualCacheView(Ai_image, exception); Br_view = AcquireVirtualCacheView(Br_image, exception); Bi_view = AcquireVirtualCacheView(Bi_image, exception); Cr_view = AcquireAuthenticCacheView(Cr_image, exception); Ci_view = AcquireAuthenticCacheView(Ci_image, exception); status = MagickTrue; progress = 0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(images,complex_images,images->rows,1) #endif for (y = 0; y < (ssize_t) images->rows; y++) { register const PixelPacket * magick_restrict Ai, *magick_restrict Ar, *magick_restrict Bi, *magick_restrict Br; register PixelPacket * magick_restrict Ci, *magick_restrict Cr; register ssize_t x; if (status == MagickFalse) continue; Ar = GetCacheViewVirtualPixels(Ar_view, 0, y, Ar_image->columns, 1, exception); Ai = GetCacheViewVirtualPixels(Ai_view, 0, y, Ai_image->columns, 1, exception); Br = GetCacheViewVirtualPixels(Br_view, 0, y, Br_image->columns, 1, exception); Bi = GetCacheViewVirtualPixels(Bi_view, 0, y, Bi_image->columns, 1, exception); Cr = QueueCacheViewAuthenticPixels(Cr_view, 0, y, Cr_image->columns, 1, exception); Ci = QueueCacheViewAuthenticPixels(Ci_view, 0, y, Ci_image->columns, 1, exception); if ((Ar == (const PixelPacket *)NULL) || (Ai == (const PixelPacket *)NULL) || (Br == (const PixelPacket *)NULL) || (Bi == (const PixelPacket *)NULL) || (Cr == (PixelPacket *) NULL) || (Ci == (PixelPacket *) NULL)) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) images->columns; x++) { switch (op) { case AddComplexOperator: { Cr->red = Ar->red + Br->red; Ci->red = Ai->red + Bi->red; Cr->green = Ar->green + Br->green; Ci->green = Ai->green + Bi->green; Cr->blue = Ar->blue + Br->blue; Ci->blue = Ai->blue + Bi->blue; if (images->matte != MagickFalse) { Cr->opacity = Ar->opacity + Br->opacity; Ci->opacity = Ai->opacity + Bi->opacity; } break; } case ConjugateComplexOperator: default: { Cr->red = Ar->red; Ci->red = (-Bi->red); Cr->green = Ar->green; Ci->green = (-Bi->green); Cr->blue = Ar->blue; Ci->blue = (-Bi->blue); if (images->matte != MagickFalse) { Cr->opacity = Ar->opacity; Ci->opacity = (-Bi->opacity); } break; } case DivideComplexOperator: { double gamma; gamma = PerceptibleReciprocal(Br->red * Br->red + Bi->red * Bi->red + snr); Cr->red = gamma * (Ar->red * Br->red + Ai->red * Bi->red); Ci->red = gamma * (Ai->red * Br->red - Ar->red * Bi->red); gamma = PerceptibleReciprocal(Br->green * Br->green + Bi->green * Bi->green + snr); Cr->green = gamma * (Ar->green * Br->green + Ai->green * Bi->green); Ci->green = gamma * (Ai->green * Br->green - Ar->green * Bi->green); gamma = PerceptibleReciprocal(Br->blue * Br->blue + Bi->blue * Bi->blue + snr); Cr->blue = gamma * (Ar->blue * Br->blue + Ai->blue * Bi->blue); Ci->blue = gamma * (Ai->blue * Br->blue - Ar->blue * Bi->blue); if (images->matte != MagickFalse) { gamma = PerceptibleReciprocal(Br->opacity * Br->opacity + Bi->opacity * Bi->opacity + snr); Cr->opacity = gamma * (Ar->opacity * Br->opacity + Ai->opacity * Bi->opacity); Ci->opacity = gamma * (Ai->opacity * Br->opacity - Ar->opacity * Bi->opacity); } break; } case MagnitudePhaseComplexOperator: { Cr->red = sqrt(Ar->red * Ar->red + Ai->red * Ai->red); Ci->red = atan2(Ai->red, Ar->red) / (2.0 * MagickPI) + 0.5; Cr->green = sqrt(Ar->green * Ar->green + Ai->green * Ai->green); Ci->green = atan2(Ai->green, Ar->green) / (2.0 * MagickPI) + 0.5; Cr->blue = sqrt(Ar->blue * Ar->blue + Ai->blue * Ai->blue); Ci->blue = atan2(Ai->blue, Ar->blue) / (2.0 * MagickPI) + 0.5; if (images->matte != MagickFalse) { Cr->opacity = sqrt(Ar->opacity * Ar->opacity + Ai->opacity * Ai->opacity); Ci->opacity = atan2(Ai->opacity, Ar->opacity) / (2.0 * MagickPI) + 0.5; } break; } case MultiplyComplexOperator: { Cr->red = QuantumScale * (Ar->red * Br->red - Ai->red * Bi->red); Ci->red = QuantumScale * (Ai->red * Br->red + Ar->red * Bi->red); Cr->green = QuantumScale * (Ar->green * Br->green - Ai->green * Bi->green); Ci->green = QuantumScale * (Ai->green * Br->green + Ar->green * Bi->green); Cr->blue = QuantumScale * (Ar->blue * Br->blue - Ai->blue * Bi->blue); Ci->blue = QuantumScale * (Ai->blue * Br->blue + Ar->blue * Bi->blue); if (images->matte != MagickFalse) { Cr->opacity = QuantumScale * (Ar->opacity * Br->opacity - Ai->opacity * Bi->opacity); Ci->opacity = QuantumScale * (Ai->opacity * Br->opacity + Ar->opacity * Bi->opacity); } break; } case RealImaginaryComplexOperator: { Cr->red = Ar->red * cos(2.0 * MagickPI * (Ai->red - 0.5)); Ci->red = Ar->red * sin(2.0 * MagickPI * (Ai->red - 0.5)); Cr->green = Ar->green * cos(2.0 * MagickPI * (Ai->green - 0.5)); Ci->green = Ar->green * sin(2.0 * MagickPI * (Ai->green - 0.5)); Cr->blue = Ar->blue * cos(2.0 * MagickPI * (Ai->blue - 0.5)); Ci->blue = Ar->blue * sin(2.0 * MagickPI * (Ai->blue - 0.5)); if (images->matte != MagickFalse) { Cr->opacity = Ar->opacity * cos(2.0 * MagickPI * (Ai->opacity - 0.5)); Ci->opacity = Ar->opacity * sin(2.0 * MagickPI * (Ai->opacity - 0.5)); } break; } case SubtractComplexOperator: { Cr->red = Ar->red - Br->red; Ci->red = Ai->red - Bi->red; Cr->green = Ar->green - Br->green; Ci->green = Ai->green - Bi->green; Cr->blue = Ar->blue - Br->blue; Ci->blue = Ai->blue - Bi->blue; if (images->matte != MagickFalse) { Cr->opacity = Ar->opacity - Br->opacity; Ci->opacity = Ai->opacity - Bi->opacity; } break; } } Ar++; Ai++; Br++; Bi++; Cr++; Ci++; } if (SyncCacheViewAuthenticPixels(Ci_view, exception) == MagickFalse) status = MagickFalse; if (SyncCacheViewAuthenticPixels(Cr_view, exception) == MagickFalse) status = MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ComplexImages) #endif proceed = SetImageProgress(images, ComplexImageTag, progress++, images->rows); if (proceed == MagickFalse) status = MagickFalse; } } Cr_view = DestroyCacheView(Cr_view); Ci_view = DestroyCacheView(Ci_view); Br_view = DestroyCacheView(Br_view); Bi_view = DestroyCacheView(Bi_view); Ar_view = DestroyCacheView(Ar_view); Ai_view = DestroyCacheView(Ai_view); if (status == MagickFalse) complex_images = DestroyImageList(complex_images); return (complex_images); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % F o r w a r d F o u r i e r T r a n s f o r m I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ForwardFourierTransformImage() implements the discrete Fourier * transform % (DFT) of the image either as a magnitude / phase or real / * imaginary image % pair. % % The format of the * ForwadFourierTransformImage method is: % % Image * *ForwardFourierTransformImage(const Image *image, % const * MagickBooleanType modulus,ExceptionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o modulus: if * true, return as transform as a magnitude / phase pair % otherwise a * real / imaginary image pair. % % o exception: return any errors or * warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType RollFourier(const size_t width, const size_t height, const ssize_t x_offset, const ssize_t y_offset, double *roll_pixels) { double *source_pixels; MemoryInfo * source_info; register ssize_t i, x; ssize_t u, v, y; /* * Move zero frequency (DC, average color) from (0,0) to * (width/2,height/2). */ source_info = AcquireVirtualMemory(height, width * sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) return (MagickFalse); source_pixels = (double *)GetVirtualMemoryBlob(source_info); i = 0L; for (y = 0L; y < (ssize_t) height; y++) { if (y_offset < 0L) v = ((y + y_offset) < 0L) ? y + y_offset + (ssize_t) height : y + y_offset; else v = ((y + y_offset) > ((ssize_t) height - 1L)) ? y + y_offset - (ssize_t) height : y + y_offset; for (x = 0L; x < (ssize_t) width; x++) { if (x_offset < 0L) u = ((x + x_offset) < 0L) ? x + x_offset + (ssize_t) width : x + x_offset; else u = ((x + x_offset) > ((ssize_t) width - 1L)) ? x + x_offset - (ssize_t) width : x + x_offset; source_pixels[v * width + u] = roll_pixels[i++]; } } (void)CopyMagickMemory(roll_pixels, source_pixels, height * width * sizeof(*source_pixels)); source_info = RelinquishVirtualMemory(source_info); return (MagickTrue); } static MagickBooleanType ForwardQuadrantSwap(const size_t width, const size_t height, double *source_pixels, double *forward_pixels) { MagickBooleanType status; register ssize_t x; ssize_t center, y; /* * Swap quadrants. */ center = (ssize_t) floor((double)width / 2L) + 1L; status = RollFourier((size_t) center, height, 0L, (ssize_t) height / 2L, source_pixels); if (status == MagickFalse) return (MagickFalse); for (y = 0L; y < (ssize_t) height; y++) for (x = 0L; x < (ssize_t) (width / 2L - 1L); x++) forward_pixels[y * width + x + width / 2L] = source_pixels[y * center + x]; for (y = 1; y < (ssize_t) height; y++) for (x = 0L; x < (ssize_t) (width / 2L - 1L); x++) forward_pixels[(height - y) * width + width / 2L - x - 1L] = source_pixels[y * center + x + 1L]; for (x = 0L; x < (ssize_t) (width / 2L); x++) forward_pixels[-x + width / 2L - 1L] = forward_pixels[x + width / 2L + 1L]; return (MagickTrue); } static void CorrectPhaseLHS(const size_t width, const size_t height, double *fourier_pixels) { register ssize_t x; ssize_t y; for (y = 0L; y < (ssize_t) height; y++) for (x = 0L; x < (ssize_t) (width / 2L); x++) fourier_pixels[y * width + x] *= (-1.0); } static MagickBooleanType ForwardFourier(const FourierInfo * fourier_info, Image * image, double *magnitude, double *phase, ExceptionInfo * exception) { CacheView * magnitude_view, *phase_view; double *magnitude_pixels, *phase_pixels; Image * magnitude_image, *phase_image; MagickBooleanType status; MemoryInfo * magnitude_info, *phase_info; register IndexPacket * indexes; register PixelPacket * q; register ssize_t x; ssize_t i, y; magnitude_image = GetFirstImageInList(image); phase_image = GetNextImageInList(image); if (phase_image == (Image *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ImageError, "ImageSequenceRequired", "`%s'", image->filename); return (MagickFalse); } /* * Create "Fourier Transform" image from constituent arrays. */ magnitude_info = AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->width * sizeof(*magnitude_pixels)); phase_info = AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->width * sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info = RelinquishVirtualMemory(phase_info); if (magnitude_info != (MemoryInfo *) NULL) magnitude_info = RelinquishVirtualMemory(magnitude_info); (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); return (MagickFalse); } magnitude_pixels = (double *)GetVirtualMemoryBlob(magnitude_info); (void)ResetMagickMemory(magnitude_pixels, 0, fourier_info->height * fourier_info->width * sizeof(*magnitude_pixels)); phase_pixels = (double *)GetVirtualMemoryBlob(phase_info); (void)ResetMagickMemory(phase_pixels, 0, fourier_info->height * fourier_info->width * sizeof(*phase_pixels)); status = ForwardQuadrantSwap(fourier_info->width, fourier_info->height, magnitude, magnitude_pixels); if (status != MagickFalse) status = ForwardQuadrantSwap(fourier_info->width, fourier_info->height, phase, phase_pixels); CorrectPhaseLHS(fourier_info->width, fourier_info->height, phase_pixels); if (fourier_info->modulus != MagickFalse) { i = 0L; for (y = 0L; y < (ssize_t) fourier_info->height; y++) for (x = 0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i] /= (2.0 * MagickPI); phase_pixels[i] += 0.5; i++; } } magnitude_view = AcquireAuthenticCacheView(magnitude_image, exception); i = 0L; for (y = 0L; y < (ssize_t) fourier_info->height; y++) { q = GetCacheViewAuthenticPixels(magnitude_view, 0L, y, fourier_info->height, 1UL, exception); if (q == (PixelPacket *) NULL) break; indexes = GetCacheViewAuthenticIndexQueue(magnitude_view); for (x = 0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q, ClampToQuantum(QuantumRange * magnitude_pixels[i])); break; } case GreenChannel: { SetPixelGreen(q, ClampToQuantum(QuantumRange * magnitude_pixels[i])); break; } case BlueChannel: { SetPixelBlue(q, ClampToQuantum(QuantumRange * magnitude_pixels[i])); break; } case OpacityChannel: { SetPixelOpacity(q, ClampToQuantum(QuantumRange * magnitude_pixels[i])); break; } case IndexChannel: { SetPixelIndex(indexes + x, ClampToQuantum(QuantumRange * magnitude_pixels[i])); break; } case GrayChannels: { SetPixelGray(q, ClampToQuantum(QuantumRange * magnitude_pixels[i])); break; } } i++; q++; } status = SyncCacheViewAuthenticPixels(magnitude_view, exception); if (status == MagickFalse) break; } magnitude_view = DestroyCacheView(magnitude_view); i = 0L; phase_view = AcquireAuthenticCacheView(phase_image, exception); for (y = 0L; y < (ssize_t) fourier_info->height; y++) { q = GetCacheViewAuthenticPixels(phase_view, 0L, y, fourier_info->height, 1UL, exception); if (q == (PixelPacket *) NULL) break; indexes = GetCacheViewAuthenticIndexQueue(phase_view); for (x = 0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q, ClampToQuantum(QuantumRange * phase_pixels[i])); break; } case GreenChannel: { SetPixelGreen(q, ClampToQuantum(QuantumRange * phase_pixels[i])); break; } case BlueChannel: { SetPixelBlue(q, ClampToQuantum(QuantumRange * phase_pixels[i])); break; } case OpacityChannel: { SetPixelOpacity(q, ClampToQuantum(QuantumRange * phase_pixels[i])); break; } case IndexChannel: { SetPixelIndex(indexes + x, ClampToQuantum(QuantumRange * phase_pixels[i])); break; } case GrayChannels: { SetPixelGray(q, ClampToQuantum(QuantumRange * phase_pixels[i])); break; } } i++; q++; } status = SyncCacheViewAuthenticPixels(phase_view, exception); if (status == MagickFalse) break; } phase_view = DestroyCacheView(phase_view); phase_info = RelinquishVirtualMemory(phase_info); magnitude_info = RelinquishVirtualMemory(magnitude_info); return (status); } static MagickBooleanType ForwardFourierTransform(FourierInfo * fourier_info, const Image * image, double *magnitude_pixels, double *phase_pixels, ExceptionInfo * exception) { CacheView * image_view; const char *value; double *source_pixels; fftw_complex * forward_pixels; fftw_plan fftw_r2c_plan; MemoryInfo * forward_info, *source_info; register const IndexPacket * indexes; register const PixelPacket * p; register ssize_t i, x; ssize_t y; /* * Generate the forward Fourier transform. */ source_info = AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->width * sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); return (MagickFalse); } source_pixels = (double *)GetVirtualMemoryBlob(source_info); ResetMagickMemory(source_pixels, 0, fourier_info->height * fourier_info->width * sizeof(*source_pixels)); i = 0L; image_view = AcquireVirtualCacheView(image, exception); for (y = 0L; y < (ssize_t) fourier_info->height; y++) { p = GetCacheViewVirtualPixels(image_view, 0L, y, fourier_info->width, 1UL, exception); if (p == (const PixelPacket *)NULL) break; indexes = GetCacheViewVirtualIndexQueue(image_view); for (x = 0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { source_pixels[i] = QuantumScale * GetPixelRed(p); break; } case GreenChannel: { source_pixels[i] = QuantumScale * GetPixelGreen(p); break; } case BlueChannel: { source_pixels[i] = QuantumScale * GetPixelBlue(p); break; } case OpacityChannel: { source_pixels[i] = QuantumScale * GetPixelOpacity(p); break; } case IndexChannel: { source_pixels[i] = QuantumScale * GetPixelIndex(indexes + x); break; } case GrayChannels: { source_pixels[i] = QuantumScale * GetPixelGray(p); break; } } i++; p++; } } image_view = DestroyCacheView(image_view); forward_info = AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->center * sizeof(*forward_pixels)); if (forward_info == (MemoryInfo *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); source_info = (MemoryInfo *) RelinquishVirtualMemory(source_info); return (MagickFalse); } forward_pixels = (fftw_complex *) GetVirtualMemoryBlob(forward_info); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ForwardFourierTransform) #endif fftw_r2c_plan = fftw_plan_dft_r2c_2d(fourier_info->width, fourier_info->height, source_pixels, forward_pixels, FFTW_ESTIMATE); fftw_execute(fftw_r2c_plan); fftw_destroy_plan(fftw_r2c_plan); source_info = (MemoryInfo *) RelinquishVirtualMemory(source_info); value = GetImageArtifact(image, "fourier:normalize"); if ((value == (const char *)NULL) || (LocaleCompare(value, "forward") == 0)) { double gamma; /* * Normalize inverse transform. */ i = 0L; gamma = PerceptibleReciprocal((double)fourier_info->width * fourier_info->height); for (y = 0L; y < (ssize_t) fourier_info->height; y++) for (x = 0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) forward_pixels[i] *= gamma; #else forward_pixels[i][0] *= gamma; forward_pixels[i][1] *= gamma; #endif i++; } } /* * Generate magnitude and phase (or real and imaginary). */ i = 0L; if (fourier_info->modulus != MagickFalse) for (y = 0L; y < (ssize_t) fourier_info->height; y++) for (x = 0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i] = cabs(forward_pixels[i]); phase_pixels[i] = carg(forward_pixels[i]); i++; } else for (y = 0L; y < (ssize_t) fourier_info->height; y++) for (x = 0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i] = creal(forward_pixels[i]); phase_pixels[i] = cimag(forward_pixels[i]); i++; } forward_info = (MemoryInfo *) RelinquishVirtualMemory(forward_info); return (MagickTrue); } static MagickBooleanType ForwardFourierTransformChannel(const Image * image, const ChannelType channel, const MagickBooleanType modulus, Image * fourier_image, ExceptionInfo * exception) { double *magnitude_pixels, *phase_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo * magnitude_info, *phase_info; size_t extent; fourier_info.width = image->columns; fourier_info.height = image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { extent = image->columns < image->rows ? image->rows : image->columns; fourier_info.width = (extent & 0x01) == 1 ? extent + 1UL : extent; } fourier_info.height = fourier_info.width; fourier_info.center = (ssize_t) floor((double)fourier_info.width / 2L) + 1L; fourier_info.channel = channel; fourier_info.modulus = modulus; magnitude_info = AcquireVirtualMemory((size_t) fourier_info.height, fourier_info.center * sizeof(*magnitude_pixels)); phase_info = AcquireVirtualMemory((size_t) fourier_info.height, fourier_info.center * sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info = RelinquishVirtualMemory(phase_info); if (magnitude_info == (MemoryInfo *) NULL) magnitude_info = RelinquishVirtualMemory(magnitude_info); (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); return (MagickFalse); } magnitude_pixels = (double *)GetVirtualMemoryBlob(magnitude_info); phase_pixels = (double *)GetVirtualMemoryBlob(phase_info); status = ForwardFourierTransform(&fourier_info, image, magnitude_pixels, phase_pixels, exception); if (status != MagickFalse) status = ForwardFourier(&fourier_info, fourier_image, magnitude_pixels, phase_pixels, exception); phase_info = RelinquishVirtualMemory(phase_info); magnitude_info = RelinquishVirtualMemory(magnitude_info); return (status); } #endif MagickExport Image * ForwardFourierTransformImage(const Image * image, const MagickBooleanType modulus, ExceptionInfo * exception) { Image * fourier_image; fourier_image = NewImageList(); #if !defined(MAGICKCORE_FFTW_DELEGATE) (void)modulus; (void)ThrowMagickException(exception, GetMagickModule(), MissingDelegateWarning, "DelegateLibrarySupportNotBuiltIn", "`%s' (FFTW)", image->filename); #else { Image * magnitude_image; size_t extent, height, width; width = image->columns; height = image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { extent = image->columns < image->rows ? image->rows : image->columns; width = (extent & 0x01) == 1 ? extent + 1UL : extent; } height = width; magnitude_image = CloneImage(image, width, height, MagickTrue, exception); if (magnitude_image != (Image *) NULL) { Image * phase_image; magnitude_image->storage_class = DirectClass; magnitude_image->depth = 32UL; phase_image = CloneImage(image, width, height, MagickTrue, exception); if (phase_image == (Image *) NULL) magnitude_image = DestroyImage(magnitude_image); else { MagickBooleanType is_gray, status; phase_image->storage_class = DirectClass; phase_image->depth = 32UL; AppendImageToList(&fourier_image, magnitude_image); AppendImageToList(&fourier_image, phase_image); status = MagickTrue; is_gray = IsGrayImage(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status = ForwardFourierTransformChannel(image, GrayChannels, modulus, fourier_image, exception); else thread_status = ForwardFourierTransformChannel(image, RedChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status = MagickTrue; if (is_gray == MagickFalse) thread_status = ForwardFourierTransformChannel(image, GreenChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status = MagickTrue; if (is_gray == MagickFalse) thread_status = ForwardFourierTransformChannel(image, BlueChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status = MagickTrue; if (image->matte != MagickFalse) thread_status = ForwardFourierTransformChannel(image, OpacityChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status = MagickTrue; if (image->colorspace == CMYKColorspace) thread_status = ForwardFourierTransformChannel(image, IndexChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } } if (status == MagickFalse) fourier_image = DestroyImageList(fourier_image); fftw_cleanup(); } } } #endif return (fourier_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I n v e r s e F o u r i e r T r a n s f o r m I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % InverseFourierTransformImage() implements the inverse discrete * Fourier % transform (DFT) of the image either as a magnitude / phase or * real / % imaginary image pair. % % The format of the * InverseFourierTransformImage method is: % % Image * *InverseFourierTransformImage(const Image *magnitude_image, % const * Image *phase_image,const MagickBooleanType modulus, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o * magnitude_image: the magnitude or real image. % % o phase_image: the * phase or imaginary image. % % o modulus: if true, return transform as a * magnitude / phase pair % otherwise a real / imaginary image pair. % % * o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType InverseQuadrantSwap(const size_t width, const size_t height, const double *source, double *destination) { register ssize_t x; ssize_t center, y; /* * Swap quadrants. */ center = (ssize_t) floor((double)width / 2L) + 1L; for (y = 1L; y < (ssize_t) height; y++) for (x = 0L; x < (ssize_t) (width / 2L + 1L); x++) destination[(height - y) * center - x + width / 2L] = source[y * width + x]; for (y = 0L; y < (ssize_t) height; y++) destination[y * center] = source[y * width + width / 2L]; for (x = 0L; x < center; x++) destination[x] = source[center - x - 1L]; return (RollFourier(center, height, 0L, (ssize_t) height / -2L, destination)); } static MagickBooleanType InverseFourier(FourierInfo * fourier_info, const Image * magnitude_image, const Image * phase_image, fftw_complex * fourier_pixels, ExceptionInfo * exception) { CacheView * magnitude_view, *phase_view; double *inverse_pixels, *magnitude_pixels, *phase_pixels; MagickBooleanType status; MemoryInfo * inverse_info, *magnitude_info, *phase_info; register const IndexPacket * indexes; register const PixelPacket * p; register ssize_t i, x; ssize_t y; /* * Inverse fourier - read image and break down into a double array. */ magnitude_info = AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->width * sizeof(*magnitude_pixels)); phase_info = AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->width * sizeof(*phase_pixels)); inverse_info = AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->center * sizeof(*inverse_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL) || (inverse_info == (MemoryInfo *) NULL)) { if (magnitude_info != (MemoryInfo *) NULL) magnitude_info = RelinquishVirtualMemory(magnitude_info); if (phase_info != (MemoryInfo *) NULL) phase_info = RelinquishVirtualMemory(phase_info); if (inverse_info != (MemoryInfo *) NULL) inverse_info = RelinquishVirtualMemory(inverse_info); (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", magnitude_image->filename); return (MagickFalse); } magnitude_pixels = (double *)GetVirtualMemoryBlob(magnitude_info); phase_pixels = (double *)GetVirtualMemoryBlob(phase_info); inverse_pixels = (double *)GetVirtualMemoryBlob(inverse_info); i = 0L; magnitude_view = AcquireVirtualCacheView(magnitude_image, exception); for (y = 0L; y < (ssize_t) fourier_info->height; y++) { p = GetCacheViewVirtualPixels(magnitude_view, 0L, y, fourier_info->width, 1UL, exception); if (p == (const PixelPacket *)NULL) break; indexes = GetCacheViewAuthenticIndexQueue(magnitude_view); for (x = 0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { magnitude_pixels[i] = QuantumScale * GetPixelRed(p); break; } case GreenChannel: { magnitude_pixels[i] = QuantumScale * GetPixelGreen(p); break; } case BlueChannel: { magnitude_pixels[i] = QuantumScale * GetPixelBlue(p); break; } case OpacityChannel: { magnitude_pixels[i] = QuantumScale * GetPixelOpacity(p); break; } case IndexChannel: { magnitude_pixels[i] = QuantumScale * GetPixelIndex(indexes + x); break; } case GrayChannels: { magnitude_pixels[i] = QuantumScale * GetPixelGray(p); break; } } i++; p++; } } magnitude_view = DestroyCacheView(magnitude_view); status = InverseQuadrantSwap(fourier_info->width, fourier_info->height, magnitude_pixels, inverse_pixels); (void)CopyMagickMemory(magnitude_pixels, inverse_pixels, fourier_info->height * fourier_info->center * sizeof(*magnitude_pixels)); i = 0L; phase_view = AcquireVirtualCacheView(phase_image, exception); for (y = 0L; y < (ssize_t) fourier_info->height; y++) { p = GetCacheViewVirtualPixels(phase_view, 0, y, fourier_info->width, 1, exception); if (p == (const PixelPacket *)NULL) break; indexes = GetCacheViewAuthenticIndexQueue(phase_view); for (x = 0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { phase_pixels[i] = QuantumScale * GetPixelRed(p); break; } case GreenChannel: { phase_pixels[i] = QuantumScale * GetPixelGreen(p); break; } case BlueChannel: { phase_pixels[i] = QuantumScale * GetPixelBlue(p); break; } case OpacityChannel: { phase_pixels[i] = QuantumScale * GetPixelOpacity(p); break; } case IndexChannel: { phase_pixels[i] = QuantumScale * GetPixelIndex(indexes + x); break; } case GrayChannels: { phase_pixels[i] = QuantumScale * GetPixelGray(p); break; } } i++; p++; } } if (fourier_info->modulus != MagickFalse) { i = 0L; for (y = 0L; y < (ssize_t) fourier_info->height; y++) for (x = 0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i] -= 0.5; phase_pixels[i] *= (2.0 * MagickPI); i++; } } phase_view = DestroyCacheView(phase_view); CorrectPhaseLHS(fourier_info->width, fourier_info->height, phase_pixels); if (status != MagickFalse) status = InverseQuadrantSwap(fourier_info->width, fourier_info->height, phase_pixels, inverse_pixels); (void)CopyMagickMemory(phase_pixels, inverse_pixels, fourier_info->height * fourier_info->center * sizeof(*phase_pixels)); inverse_info = RelinquishVirtualMemory(inverse_info); /* * Merge two sets. */ i = 0L; if (fourier_info->modulus != MagickFalse) for (y = 0L; y < (ssize_t) fourier_info->height; y++) for (x = 0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i] = magnitude_pixels[i] * cos(phase_pixels[i]) + I * magnitude_pixels[i] * sin(phase_pixels[i]); #else fourier_pixels[i][0] = magnitude_pixels[i] * cos(phase_pixels[i]); fourier_pixels[i][1] = magnitude_pixels[i] * sin(phase_pixels[i]); #endif i++; } else for (y = 0L; y < (ssize_t) fourier_info->height; y++) for (x = 0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i] = magnitude_pixels[i] + I * phase_pixels[i]; #else fourier_pixels[i][0] = magnitude_pixels[i]; fourier_pixels[i][1] = phase_pixels[i]; #endif i++; } magnitude_info = RelinquishVirtualMemory(magnitude_info); phase_info = RelinquishVirtualMemory(phase_info); return (status); } static MagickBooleanType InverseFourierTransform(FourierInfo * fourier_info, fftw_complex * fourier_pixels, Image * image, ExceptionInfo * exception) { CacheView * image_view; double *source_pixels; const char *value; fftw_plan fftw_c2r_plan; MemoryInfo * source_info; register IndexPacket * indexes; register PixelPacket * q; register ssize_t i, x; ssize_t y; source_info = AcquireVirtualMemory((size_t) fourier_info->height, fourier_info->width * sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); return (MagickFalse); } source_pixels = (double *)GetVirtualMemoryBlob(source_info); value = GetImageArtifact(image, "fourier:normalize"); if (LocaleCompare(value, "inverse") == 0) { double gamma; /* * Normalize Fourier transform. */ i = 0L; gamma = PerceptibleReciprocal((double)fourier_info->width * fourier_info->height); for (y = 0L; y < (ssize_t) fourier_info->height; y++) for (x = 0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i] *= gamma; #else fourier_pixels[i][0] *= gamma; fourier_pixels[i][1] *= gamma; #endif i++; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InverseFourierTransform) #endif { fftw_c2r_plan = fftw_plan_dft_c2r_2d(fourier_info->width, fourier_info->height, fourier_pixels, source_pixels, FFTW_ESTIMATE); fftw_execute(fftw_c2r_plan); fftw_destroy_plan(fftw_c2r_plan); } i = 0L; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0L; y < (ssize_t) fourier_info->height; y++) { if (y >= (ssize_t) image->rows) break; q = GetCacheViewAuthenticPixels(image_view, 0L, y, fourier_info->width > image->columns ? image->columns : fourier_info->width, 1UL, exception); if (q == (PixelPacket *) NULL) break; indexes = GetCacheViewAuthenticIndexQueue(image_view); for (x = 0L; x < (ssize_t) fourier_info->width; x++) { if (x < (ssize_t) image->columns) switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q, ClampToQuantum(QuantumRange * source_pixels[i])); break; } case GreenChannel: { SetPixelGreen(q, ClampToQuantum(QuantumRange * source_pixels[i])); break; } case BlueChannel: { SetPixelBlue(q, ClampToQuantum(QuantumRange * source_pixels[i])); break; } case OpacityChannel: { SetPixelOpacity(q, ClampToQuantum(QuantumRange * source_pixels[i])); break; } case IndexChannel: { SetPixelIndex(indexes + x, ClampToQuantum(QuantumRange * source_pixels[i])); break; } case GrayChannels: { SetPixelGray(q, ClampToQuantum(QuantumRange * source_pixels[i])); break; } } i++; q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) break; } image_view = DestroyCacheView(image_view); source_info = RelinquishVirtualMemory(source_info); return (MagickTrue); } static MagickBooleanType InverseFourierTransformChannel( const Image * magnitude_image, const Image * phase_image, const ChannelType channel, const MagickBooleanType modulus, Image * fourier_image, ExceptionInfo * exception) { fftw_complex * inverse_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo * inverse_info; size_t extent; fourier_info.width = magnitude_image->columns; fourier_info.height = magnitude_image->rows; if ((magnitude_image->columns != magnitude_image->rows) || ((magnitude_image->columns % 2) != 0) || ((magnitude_image->rows % 2) != 0)) { extent = magnitude_image->columns < magnitude_image->rows ? magnitude_image->rows : magnitude_image->columns; fourier_info.width = (extent & 0x01) == 1 ? extent + 1UL : extent; } fourier_info.height = fourier_info.width; fourier_info.center = (ssize_t) floor((double)fourier_info.width / 2L) + 1L; fourier_info.channel = channel; fourier_info.modulus = modulus; inverse_info = AcquireVirtualMemory((size_t) fourier_info.height, fourier_info.center * sizeof(*inverse_pixels)); if (inverse_info == (MemoryInfo *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", magnitude_image->filename); return (MagickFalse); } inverse_pixels = (fftw_complex *) GetVirtualMemoryBlob(inverse_info); status = InverseFourier(&fourier_info, magnitude_image, phase_image, inverse_pixels, exception); if (status != MagickFalse) status = InverseFourierTransform(&fourier_info, inverse_pixels, fourier_image, exception); inverse_info = RelinquishVirtualMemory(inverse_info); return (status); } #endif MagickExport Image * InverseFourierTransformImage(const Image * magnitude_image, const Image * phase_image, const MagickBooleanType modulus, ExceptionInfo * exception) { Image * fourier_image; assert(magnitude_image != (Image *) NULL); assert(magnitude_image->signature == MagickSignature); if (magnitude_image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", magnitude_image->filename); if (phase_image == (Image *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ImageError, "ImageSequenceRequired", "`%s'", magnitude_image->filename); return ((Image *) NULL); } #if !defined(MAGICKCORE_FFTW_DELEGATE) fourier_image = (Image *) NULL; (void)modulus; (void)ThrowMagickException(exception, GetMagickModule(), MissingDelegateWarning, "DelegateLibrarySupportNotBuiltIn", "`%s' (FFTW)", magnitude_image->filename); #else { fourier_image = CloneImage(magnitude_image, magnitude_image->columns, magnitude_image->rows, MagickTrue, exception); if (fourier_image != (Image *) NULL) { MagickBooleanType is_gray, status; status = MagickTrue; is_gray = IsGrayImage(magnitude_image, exception); if (is_gray != MagickFalse) is_gray = IsGrayImage(phase_image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status = InverseFourierTransformChannel(magnitude_image, phase_image, GrayChannels, modulus, fourier_image, exception); else thread_status = InverseFourierTransformChannel(magnitude_image, phase_image, RedChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status = MagickTrue; if (is_gray == MagickFalse) thread_status = InverseFourierTransformChannel(magnitude_image, phase_image, GreenChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status = MagickTrue; if (is_gray == MagickFalse) thread_status = InverseFourierTransformChannel(magnitude_image, phase_image, BlueChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status = MagickTrue; if (magnitude_image->matte != MagickFalse) thread_status = InverseFourierTransformChannel(magnitude_image, phase_image, OpacityChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status = MagickTrue; if (magnitude_image->colorspace == CMYKColorspace) thread_status = InverseFourierTransformChannel(magnitude_image, phase_image, IndexChannel, modulus, fourier_image, exception); if (thread_status == MagickFalse) status = thread_status; } } if (status == MagickFalse) fourier_image = DestroyImage(fourier_image); } fftw_cleanup(); } #endif return (fourier_image); }
blackscholes_routine.c
/* * Copyright (c) 2017, NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <math.h> #include <stdlib.h> #include <stdio.h> #include <openacc.h> #include "timer.h" #ifdef FP64 typedef double real; #define SQRT(x) sqrt((x)) #define EXP(x) exp((x)) #define FABS(x) fabs((x)) #define LOG(x) log((x)) #else typedef float real; #define SQRT(x) sqrtf((x)) #define EXP(x) expf((x)) #define FABS(x) fabsf((x)) #define LOG(x) logf((x)) #endif const float RISKFREE = 0.02f; const float VOLATILITY = 0.30f; /////////////////////////////////////////////////////////////////////////////// // Polynomial approximation of cumulative normal distribution function /////////////////////////////////////////////////////////////////////////////// #pragma acc routine seq real CND(real d) { const real A1 = (real)0.31938153; const real A2 = (real)-0.356563782; const real A3 = (real)1.781477937; const real A4 = (real)-1.821255978; const real A5 = (real)1.330274429; const real RSQRT2PI = (real)0.39894228040143267793994605993438; real K = (real)1.0 / ((real)1.0 + (real)0.2316419 * FABS(d)); real cnd = RSQRT2PI * EXP(- (real)0.5 * d * d) * (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))); if(d > 0) cnd = (real)1.0 - cnd; return cnd; } //////////////////////////////////////////////////////////////////////////////// // Process an array of optN options //////////////////////////////////////////////////////////////////////////////// void BlackScholes( real * restrict callResult, real * restrict putResult, real * restrict stockPrice, real * restrict optionStrike, real * restrict optionYears, real Riskfree, real Volatility, int optN, int accelerate) { #pragma omp parallel for if (accelerate) #pragma acc parallel loop if (accelerate) for(int opt = 0; opt < optN; opt++) { real S = stockPrice[opt]; real X = optionStrike[opt]; real T = optionYears[opt]; real R = Riskfree, V = Volatility; real sqrtT = SQRT(T); real d1 = (LOG(S / X) + (R + (real)0.5 * V * V) * T) / (V * sqrtT); real d2 = d1 - V * sqrtT; real CNDD1 = CND(d1); real CNDD2 = CND(d2); //Calculate Call and Put simultaneously real expRT = EXP(- R * T); callResult[opt] = (real)(S * CNDD1 - X * expRT * CNDD2); putResult[opt] = (real)(X * expRT * ((real)1.0 - CNDD2) - S * ((real)1.0 - CNDD1)); } } float RandFloat(float low, float high){ float t = (float)rand() / (float)RAND_MAX; return (1.0f - t) * low + t * high; } int main(int argc, char **argv) { int OPT_N = 4000000; int OPT_SZ = OPT_N * sizeof(float); int iterations = 10; if (argc >= 2) iterations = atoi(argv[1]); real //Results calculated by CPU for reference *callResultCPU, *putResultCPU, //GPU results *callResultGPU, *putResultGPU, //CPU instance of input data *stockPrice, *optionStrike, *optionYears; real delta, ref, sum_delta, sum_ref, max_delta, L1norm, gpuTime; printf("Initializing data...\n"); callResultCPU = (real *)malloc(OPT_SZ); putResultCPU = (real *)malloc(OPT_SZ); callResultGPU = (real *)malloc(OPT_SZ); putResultGPU = (real *)malloc(OPT_SZ); stockPrice = (real *)malloc(OPT_SZ); optionStrike = (real *)malloc(OPT_SZ); optionYears = (real *)malloc(OPT_SZ); srand(5347); //Generate options set for(int i = 0; i < OPT_N; i++){ callResultCPU[i] = (real)0.0; putResultCPU[i] = (real)-1.0; callResultGPU[i] = (real)0.0; putResultGPU[i] = (real)-1.0; stockPrice[i] = (real)RandFloat(5.0f, 30.0f); optionStrike[i] = (real)RandFloat(1.0f, 100.0f); optionYears[i] = (real)RandFloat(0.25f, 10.0f); } #ifdef _OPENACC // run once outside timer to initialize/prime acc_init(acc_device_nvidia); #endif BlackScholes( callResultGPU, putResultGPU, stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY, OPT_N, 1 ); printf("Running Unaccelerated Version %d iterations...\n", iterations); StartTimer(); for (int i = 0; i < iterations; i++) { BlackScholes( callResultCPU, putResultCPU, stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY, OPT_N, 0 ); } double ms = GetTimer() / iterations; printf("Running Accelerated Version %d iterations...\n", iterations); StartTimer(); for (int i = 0; i < iterations; i++) { BlackScholes( callResultGPU, putResultGPU, stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY, OPT_N, 1 ); } double msAccelerated = GetTimer() / iterations; //Both call and put is calculated printf("Options count : %i \n", 2 * OPT_N); printf("Unaccelerated:\n"); printf("\tBlackScholes() time : %f msec\n", ms); printf("\t%f GB/s, %f GOptions/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (ms * 1E-3), ((double)(2 * OPT_N) * 1E-9) / (ms * 1E-3)); printf("Accelerated:\n"); printf("\tBlackScholes() time : %f msec\n", msAccelerated); printf("\t%f GB/s, %f GOptions/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (msAccelerated * 1E-3), ((double)(2 * OPT_N) * 1E-9) / (msAccelerated * 1E-3)); printf("Comparing the results...\n"); //Calculate max absolute difference and L1 distance //between CPU and GPU results sum_delta = 0; sum_ref = 0; max_delta = 0; for(int i = 0; i < OPT_N; i++){ ref = callResultCPU[i]; delta = fabs(callResultCPU[i] - callResultGPU[i]); if(delta > max_delta) max_delta = delta; sum_delta += delta; sum_ref += fabs(ref); } L1norm = sum_delta / sum_ref; printf("L1 norm: %E\n", L1norm); printf("Max absolute error: %E\n\n", max_delta); if (max_delta > 2.0e-5) { printf("Test FAILED\n"); } else { printf("Test PASSED\n"); } free(callResultCPU); free(putResultCPU); free(callResultGPU); free(putResultGPU); free(stockPrice); free(optionStrike); free(optionYears); return 0; }
#include <math.h> #include <stdlib.h> #include <stdio.h> #include <openacc.h> #include "timer.h" #ifdef FP64 typedef double real; #define SQRT(x) sqrt((x)) #define EXP(x) exp((x)) #define FABS(x) fabs((x)) #define LOG(x) log((x)) #else typedef float real; #define SQRT(x) sqrtf((x)) #define EXP(x) expf((x)) #define FABS(x) fabsf((x)) #define LOG(x) logf((x)) #endif const float RISKFREE = 0.02 f; const float VOLATILITY = 0.30 f; /////////////////////////////////////////////////////////////////////////////// //Polynomial approximation of cumulative normal distribution function /////////////////////////////////////////////////////////////////////////////// #pragma acc routine seq real CND(real d) { const real A1 = (real) 0.31938153; const real A2 = (real) - 0.356563782; const real A3 = (real) 1.781477937; const real A4 = (real) - 1.821255978; const real A5 = (real) 1.330274429; const real RSQRT2PI = (real) 0.39894228040143267793994605993438; real K = (real) 1.0 / ((real) 1.0 + (real) 0.2316419 * FABS(d)); real cnd = RSQRT2PI * EXP(-(real) 0.5 * d * d) * (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))); if (d > 0) cnd = (real) 1.0 - cnd; return cnd; } //////////////////////////////////////////////////////////////////////////////// //Process an array of optN options //////////////////////////////////////////////////////////////////////////////// void BlackScholes( real * restrict callResult, real * restrict putResult, real * restrict stockPrice, real * restrict optionStrike, real * restrict optionYears, real Riskfree, real Volatility, int optN, int accelerate) { #pragma acc parallel loop if (accelerate) for (int opt = 0; opt < optN; opt++) { real S = stockPrice[opt]; real X = optionStrike[opt]; real T = optionYears[opt]; real R = Riskfree, V = Volatility; real sqrtT = SQRT(T); real d1 = (LOG(S / X) + (R + (real) 0.5 * V * V) * T) / (V * sqrtT); real d2 = d1 - V * sqrtT; real CNDD1 = CND(d1); real CNDD2 = CND(d2); //Calculate Call and Put simultaneously real expRT = EXP(-R * T); callResult[opt] = (real) (S * CNDD1 - X * expRT * CNDD2); putResult[opt] = (real) (X * expRT * ((real) 1.0 - CNDD2) - S * ((real) 1.0 - CNDD1)); } } float RandFloat(float low, float high) { float t = (float)rand() / (float)RAND_MAX; return (1.0 f - t) * low + t * high; } int main(int argc, char **argv) { int OPT_N = 4000000; int OPT_SZ = OPT_N * sizeof(float); int iterations = 10; if (argc >= 2) iterations = atoi(argv[1]); real // Results calculated by CPU for reference *callResultCPU, *putResultCPU, //GPU results * callResultGPU, *putResultGPU, //CPU instance of input data * stockPrice, *optionStrike, *optionYears; real delta, ref, sum_delta, sum_ref, max_delta, L1norm, gpuTime; printf("Initializing data...\n"); callResultCPU = (real *) malloc(OPT_SZ); putResultCPU = (real *) malloc(OPT_SZ); callResultGPU = (real *) malloc(OPT_SZ); putResultGPU = (real *) malloc(OPT_SZ); stockPrice = (real *) malloc(OPT_SZ); optionStrike = (real *) malloc(OPT_SZ); optionYears = (real *) malloc(OPT_SZ); srand(5347); //Generate options set for (int i = 0; i < OPT_N; i++) { callResultCPU[i] = (real) 0.0; putResultCPU[i] = (real) - 1.0; callResultGPU[i] = (real) 0.0; putResultGPU[i] = (real) - 1.0; stockPrice[i] = (real) RandFloat(5.0 f, 30.0 f); optionStrike[i] = (real) RandFloat(1.0 f, 100.0 f); optionYears[i] = (real) RandFloat(0.25 f, 10.0 f); } #ifdef _OPENACC //run once outside timer to initialize / prime acc_init(acc_device_nvidia); #endif BlackScholes( callResultGPU, putResultGPU, stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY, OPT_N, 1 ); printf("Running Unaccelerated Version %d iterations...\n", iterations); StartTimer(); for (int i = 0; i < iterations; i++) { BlackScholes( callResultCPU, putResultCPU, stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY, OPT_N, 0 ); } double ms = GetTimer() / iterations; printf("Running Accelerated Version %d iterations...\n", iterations); StartTimer(); for (int i = 0; i < iterations; i++) { BlackScholes( callResultGPU, putResultGPU, stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY, OPT_N, 1 ); } double msAccelerated = GetTimer() / iterations; //Both call and put is calculated printf("Options count : %i \n", 2 * OPT_N); printf("Unaccelerated:\n"); printf("\tBlackScholes() time : %f msec\n", ms); printf("\t%f GB/s, %f GOptions/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (ms * 1E-3), ((double)(2 * OPT_N) * 1E-9) / (ms * 1E-3)); printf("Accelerated:\n"); printf("\tBlackScholes() time : %f msec\n", msAccelerated); printf("\t%f GB/s, %f GOptions/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (msAccelerated * 1E-3), ((double)(2 * OPT_N) * 1E-9) / (msAccelerated * 1E-3)); printf("Comparing the results...\n"); //Calculate max absolute difference and L1 distance // between CPU and GPU results sum_delta = 0; sum_ref = 0; max_delta = 0; for (int i = 0; i < OPT_N; i++) { ref = callResultCPU[i]; delta = fabs(callResultCPU[i] - callResultGPU[i]); if (delta > max_delta) max_delta = delta; sum_delta += delta; sum_ref += fabs(ref); } L1norm = sum_delta / sum_ref; printf("L1 norm: %E\n", L1norm); printf("Max absolute error: %E\n\n", max_delta); if (max_delta > 2.0e-5) { printf("Test FAILED\n"); } else { printf("Test PASSED\n"); } free(callResultCPU); free(putResultCPU); free(callResultGPU); free(putResultGPU); free(stockPrice); free(optionStrike); free(optionYears); return 0; }
#include <math.h> #include <stdlib.h> #include <stdio.h> #include <openacc.h> #include "timer.h" #ifdef FP64 typedef double real; #define SQRT(x) sqrt((x)) #define EXP(x) exp((x)) #define FABS(x) fabs((x)) #define LOG(x) log((x)) #else typedef float real; #define SQRT(x) sqrtf((x)) #define EXP(x) expf((x)) #define FABS(x) fabsf((x)) #define LOG(x) logf((x)) #endif const float RISKFREE = 0.02 f; const float VOLATILITY = 0.30 f; /////////////////////////////////////////////////////////////////////////////// //Polynomial approximation of cumulative normal distribution function /////////////////////////////////////////////////////////////////////////////// #pragma acc routine seq real CND(real d) { const real A1 = (real) 0.31938153; const real A2 = (real) - 0.356563782; const real A3 = (real) 1.781477937; const real A4 = (real) - 1.821255978; const real A5 = (real) 1.330274429; const real RSQRT2PI = (real) 0.39894228040143267793994605993438; real K = (real) 1.0 / ((real) 1.0 + (real) 0.2316419 * FABS(d)); real cnd = RSQRT2PI * EXP(-(real) 0.5 * d * d) * (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))); if (d > 0) cnd = (real) 1.0 - cnd; return cnd; } //////////////////////////////////////////////////////////////////////////////// //Process an array of optN options //////////////////////////////////////////////////////////////////////////////// void BlackScholes( real * restrict callResult, real * restrict putResult, real * restrict stockPrice, real * restrict optionStrike, real * restrict optionYears, real Riskfree, real Volatility, int optN, int accelerate) { #pragma omp parallel for if (accelerate) #pragma acc parallel loop if (accelerate) for (int opt = 0; opt < optN; opt++) { real S = stockPrice[opt]; real X = optionStrike[opt]; real T = optionYears[opt]; real R = Riskfree, V = Volatility; real sqrtT = SQRT(T); real d1 = (LOG(S / X) + (R + (real) 0.5 * V * V) * T) / (V * sqrtT); real d2 = d1 - V * sqrtT; real CNDD1 = CND(d1); real CNDD2 = CND(d2); //Calculate Call and Put simultaneously real expRT = EXP(-R * T); callResult[opt] = (real) (S * CNDD1 - X * expRT * CNDD2); putResult[opt] = (real) (X * expRT * ((real) 1.0 - CNDD2) - S * ((real) 1.0 - CNDD1)); } } float RandFloat(float low, float high) { float t = (float)rand() / (float)RAND_MAX; return (1.0 f - t) * low + t * high; } int main(int argc, char **argv) { int OPT_N = 4000000; int OPT_SZ = OPT_N * sizeof(float); int iterations = 10; if (argc >= 2) iterations = atoi(argv[1]); real // Results calculated by CPU for reference *callResultCPU, *putResultCPU, //GPU results * callResultGPU, *putResultGPU, //CPU instance of input data * stockPrice, *optionStrike, *optionYears; real delta, ref, sum_delta, sum_ref, max_delta, L1norm, gpuTime; printf("Initializing data...\n"); callResultCPU = (real *) malloc(OPT_SZ); putResultCPU = (real *) malloc(OPT_SZ); callResultGPU = (real *) malloc(OPT_SZ); putResultGPU = (real *) malloc(OPT_SZ); stockPrice = (real *) malloc(OPT_SZ); optionStrike = (real *) malloc(OPT_SZ); optionYears = (real *) malloc(OPT_SZ); srand(5347); //Generate options set for (int i = 0; i < OPT_N; i++) { callResultCPU[i] = (real) 0.0; putResultCPU[i] = (real) - 1.0; callResultGPU[i] = (real) 0.0; putResultGPU[i] = (real) - 1.0; stockPrice[i] = (real) RandFloat(5.0 f, 30.0 f); optionStrike[i] = (real) RandFloat(1.0 f, 100.0 f); optionYears[i] = (real) RandFloat(0.25 f, 10.0 f); } #ifdef _OPENACC //run once outside timer to initialize / prime acc_init(acc_device_nvidia); #endif BlackScholes( callResultGPU, putResultGPU, stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY, OPT_N, 1 ); printf("Running Unaccelerated Version %d iterations...\n", iterations); StartTimer(); for (int i = 0; i < iterations; i++) { BlackScholes( callResultCPU, putResultCPU, stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY, OPT_N, 0 ); } double ms = GetTimer() / iterations; printf("Running Accelerated Version %d iterations...\n", iterations); StartTimer(); for (int i = 0; i < iterations; i++) { BlackScholes( callResultGPU, putResultGPU, stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY, OPT_N, 1 ); } double msAccelerated = GetTimer() / iterations; //Both call and put is calculated printf("Options count : %i \n", 2 * OPT_N); printf("Unaccelerated:\n"); printf("\tBlackScholes() time : %f msec\n", ms); printf("\t%f GB/s, %f GOptions/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (ms * 1E-3), ((double)(2 * OPT_N) * 1E-9) / (ms * 1E-3)); printf("Accelerated:\n"); printf("\tBlackScholes() time : %f msec\n", msAccelerated); printf("\t%f GB/s, %f GOptions/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (msAccelerated * 1E-3), ((double)(2 * OPT_N) * 1E-9) / (msAccelerated * 1E-3)); printf("Comparing the results...\n"); //Calculate max absolute difference and L1 distance // between CPU and GPU results sum_delta = 0; sum_ref = 0; max_delta = 0; for (int i = 0; i < OPT_N; i++) { ref = callResultCPU[i]; delta = fabs(callResultCPU[i] - callResultGPU[i]); if (delta > max_delta) max_delta = delta; sum_delta += delta; sum_ref += fabs(ref); } L1norm = sum_delta / sum_ref; printf("L1 norm: %E\n", L1norm); printf("Max absolute error: %E\n\n", max_delta); if (max_delta > 2.0e-5) { printf("Test FAILED\n"); } else { printf("Test PASSED\n"); } free(callResultCPU); free(putResultCPU); free(callResultGPU); free(putResultGPU); free(stockPrice); free(optionStrike); free(optionYears); return 0; }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// True if the current expression is a member bounds expression /// for a structure. Member bounds expressions can only reference /// members and cannot reference variables. bool IsMemberBoundsExpr; std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, CheckedPointerKind kind, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, CheckedArrayKind Kind, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, SourceLocation EqualLoc = SourceLocation()); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); bool ValidateNTCheckedType(ASTContext &C, QualType VDeclType, Expr *Init); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr, RecordDecl::Genericity GenericKind = RecordDecl::NonGeneric, ArrayRef<TypedefDecl *> TypeParams = ArrayRef<TypedefDecl *> {nullptr, 0} ); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); FieldDecl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); /// Push the parameters listed in Params into scope. void ActOnSetupParametersAgain(Scope* S, ArrayRef<ParmVarDecl *> Params); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // Checked C specific methods for merging function declarations. bool CheckedCFunctionDeclCompatibility(FunctionDecl *New, FunctionDecl *Old); bool CheckedCMergeFunctionDecls(FunctionDecl *New, FunctionDecl *Old); bool DiagnoseCheckedCFunctionCompatibility(FunctionDecl *New, FunctionDecl *Old); // used for %select in diagnostics for errors involving checked types. enum class CheckedTypeClassification { CCT_Any, CCT_Struct, CCT_Union }; // used for %select in diagnostics for errors involving redeclarations // with bounds enum class CheckedCBoundsError { CCBE_Parameter, CCBE_Return, CCBE_Variable }; // used for %select in diagnostics for errors involving redeclarations // with bounds annotations. enum class BoundsAnnotationKind { Bounds, IType }; CheckedTypeClassification classifyForCheckedTypeDiagnostic(QualType qt); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr, CheckedScopeSpecifier WrittenCSS = CSS_None, SourceLocation CSSLoc = SourceLocation(), SourceLocation CSMLoc = SourceLocation()); private: CheckedScopeSpecifier CheckingKind; // Keep a stack of saved checked scope information. class SavedCheckedScope { public: SavedCheckedScope(CheckedScopeSpecifier S, SourceLocation L) : Loc(L), Saved(S) {} SourceLocation Loc; CheckedScopeSpecifier Saved; }; SmallVector<SavedCheckedScope, 8> CheckingKindStack; // can be empty public: CheckedScopeSpecifier GetCheckedScopeInfo() { return CheckingKind; } void SetCheckedScopeInfo(CheckedScopeSpecifier CSS) { CheckingKind = CSS; } void PushCheckedScopeInfo(SourceLocation Loc) { CheckingKindStack.push_back(SavedCheckedScope(CheckingKind, Loc)); } bool PopCheckedScopeInfo() { if (CheckingKindStack.size() > 0) { CheckingKind = CheckingKindStack.back().Saved; CheckingKindStack.pop_back(); return false; } else return true; } void DiagnoseUnterminatedCheckedScope(); bool IsCheckedScope() { return CheckingKind != CSS_Unchecked; } class CheckedScopeRAII { Sema &SemaRef; CheckedScopeSpecifier PrevCheckingKind; public: CheckedScopeRAII(Sema &SemaRef, CheckedScopeSpecifier CSS) : SemaRef(SemaRef), PrevCheckingKind(SemaRef.CheckingKind) { if (CSS != CSS_None) SemaRef.CheckingKind = CSS; } CheckedScopeRAII(Sema &S, DeclSpec &DS) : CheckedScopeRAII(S, DS.getCheckedScopeSpecifier()) { } ~CheckedScopeRAII() { SemaRef.CheckingKind = PrevCheckingKind; } }; /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false, CheckedScopeSpecifier CSS = CSS_None): S(S), CheckedProperties(S, CSS) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; CheckedScopeRAII CheckedProperties; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); enum CheckedScopeTypeLocation { CSTL_TopLevel, CSTL_Nested, CSTL_BoundsSafeInterface }; /// Returns true if Ty is allowed in a checked scope: /// - If Ty is a pointer or array type, it must be a checked pointer or /// array type or an unchecked pointer or array type with a bounds-safe /// interface. /// - This rule applies recursively to any types nested within Ty. /// - All other types are allowed in checked scopes. /// Return false if Ty is not allowed. bool AllowedInCheckedScope(QualType Ty, const InteropTypeExpr *InteropType, bool IsParam, CheckedScopeTypeLocation Loc, CheckedScopeTypeLocation &ProblemLoc, QualType &ProblemTy); // Enum for diagnostic message that describes the type of declaration // being checked. enum CheckedDeclKind { CDK_Parameter, CDK_FunctionReturn, CDK_LocalVariable, CDK_GlobalVariable, CDK_Member }; /// \param D - target declaration /// \param UseLoc - default invalid location at declaration /// it is valid only if it is regarded as use of variable /// \returns true if target declaration is valid checked decl bool DiagnoseCheckedDecl(const ValueDecl *D, SourceLocation UseLoc = SourceLocation()); bool DiagnoseTypeInCheckedScope(QualType Ty, SourceLocation Start, SourceLocation End); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op, bool isCheckedScope = false); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Checked C Extension ----------------------===// private: QualType ValidateBoundsExprArgument(Expr *Arg); public: ExprResult ActOnNullaryBoundsExpr(SourceLocation BoundKWLoc, BoundsExpr::Kind Kind, SourceLocation RParenLoc); ExprResult ActOnCountBoundsExpr(SourceLocation BoundsKWLoc, BoundsExpr::Kind Kind, Expr *CountExpr, SourceLocation RParenLoc); ExprResult ActOnRangeBoundsExpr(SourceLocation BoundsKWLoc, Expr *LowerBound, Expr *UpperBound, SourceLocation RParenLoc); ExprResult CreateRangeBoundsExpr(SourceLocation BoundsKWLoc, Expr *LowerBound, Expr *UpperBound, RelativeBoundsClause *Relative, SourceLocation RParenLoc); ExprResult ActOnBoundsInteropType(SourceLocation TypeKWLoc, ParsedType Ty, SourceLocation RParenLoc); ExprResult CreateBoundsInteropTypeExpr(SourceLocation TypeKWLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc); ExprResult CreatePositionalParameterExpr(unsigned Index, QualType QT); RelativeBoundsClause* ActOnRelativeTypeBoundsClause(SourceLocation BoundsKWLoc, ParsedType Ty, SourceLocation RParenLoc); RelativeBoundsClause * CreateRelativeTypeBoundsClause(SourceLocation BoundsKWLoc, TypeSourceInfo *TyInfo, SourceLocation RParenLoc); RelativeBoundsClause* ActOnRelativeConstExprClause(Expr *ConstExpr, SourceLocation BoundsKWLoc, SourceLocation RParenLoc); bool CheckBoundsCastBaseType(Expr *E1); ExprResult ActOnBoundsCastExprBounds(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAnagleBracketLoc, ParsedType D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E1, BoundsExpr *ParsedBounds); ExprResult ActOnBoundsCastExprSingle( Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAnagleBracketLoc, ParsedType D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E1); ExprResult BuildBoundsCastExpr(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *CastTypeInfo, SourceRange AngleBrackets, SourceRange Paren, Expr *E1, BoundsExpr *bounds); bool DiagnoseBoundsDeclType(QualType Ty, DeclaratorDecl *D, BoundsAnnotations &BA, bool IsReturnAnnots); /// \\brief Update information in ASTContext tracking for a member what /// bounds declarations depend upon it. FD is the member whose /// bounds are given by Bounds. void TrackMemberBoundsDependences(FieldDecl *FD, BoundsExpr *Bounds); void ActOnBoundsDecl(DeclaratorDecl *D, BoundsAnnotations Annots, bool MergeDeferredBounds = false); void ActOnEmptyBoundsDecl(DeclaratorDecl *D); void ActOnInvalidBoundsDecl(DeclaratorDecl *D); /// \brief Add default bounds/interop type expressions to Annots, if appropriate. void InferBoundsAnnots(QualType Ty, BoundsAnnotations &Annots, bool IsParam); // \#pragma CHECKED_SCOPE. enum PragmaCheckedScopeKind { PCSK_On, PCSK_Off, PCSK_BoundsOnly, PCSK_Push, PCSK_Pop }; void ActOnPragmaCheckedScope(PragmaCheckedScopeKind Kind, SourceLocation Loc); void DiagnoseUnterminatedPragmaCheckedScopePush(); BoundsExpr *CreateInvalidBoundsExpr(); /// /brief Synthesize the interop type expression implied by the presence /// of a bounds expression. Ty is the original unchecked type. Returns null /// if none exists. InteropTypeExpr *SynthesizeInteropTypeExpr(QualType Ty, bool IsParam); BoundsExpr *CreateCountForArrayType(QualType QT); // _Return_value in Checked C bounds expressions. ExprResult ActOnReturnValueExpr(SourceLocation Loc); /// \brief When non-NULL, the type of the '_Return_value' expression. QualType BoundsExprReturnValue; /// \brief RAII object used to temporarily set the the type of _Return_value class CheckedCReturnValueRAII { Sema &S; QualType OldReturnValue; public: CheckedCReturnValueRAII(Sema &S, QualType ReturnVal) : S(S) { OldReturnValue = S.BoundsExprReturnValue; S.BoundsExprReturnValue = ReturnVal; } ~CheckedCReturnValueRAII() { S.BoundsExprReturnValue = OldReturnValue; } }; typedef bool (*ParseDeferredBoundsCallBackFn)(void *P, std::unique_ptr<CachedTokens> Toks, ArrayRef<ParmVarDecl *> Params, BoundsAnnotations &Result, const Declarator &D); void SetDeferredBoundsCallBack(void *OpaqueData, ParseDeferredBoundsCallBackFn p); ParseDeferredBoundsCallBackFn DeferredBoundsParser; void *DeferredBoundsParserData; // Represents the context where an expression must be non-modifying. enum NonModifyingContext { NMC_Unknown, NMC_Dynamic_Check, NMC_Count, // Bounds count expression. NMC_Byte_Count, // Bounds byte count expression. NMC_Range, // Bounds range expression. NMC_Function_Return, // Argument for parameter used in function // return bounds. NMC_Function_Parameter // Argument for parameter used in function // parameter bounds. }; /// /brief Checks whether an expression is non-modifying /// (see Checked C Spec, 3.6.1). Returns true if the expression is non-modifying, /// false otherwise. enum NonModifyingMessage { NMM_None, NMM_Error, NMM_Note }; /// \brief Checks whether an expression is non-modifying /// (see Checked C Spec, 3.6.1). Returns true if the expression is non-modifying, /// false otherwise. bool CheckIsNonModifying(Expr *E, NonModifyingContext Req = NonModifyingContext::NMC_Unknown, NonModifyingMessage = NMM_Error); BoundsExpr *CheckNonModifyingBounds(BoundsExpr *Bounds, Expr *E); ExprResult ActOnFunctionTypeApplication(ExprResult TypeFunc, SourceLocation Loc, ArrayRef<TypeArgument> Args); RecordDecl *ActOnRecordTypeApplication(RecordDecl *Base, ArrayRef<TypeArgument> TypeArgs); const ExistentialType *ActOnExistentialType(ASTContext &Context, const Type *TypeVar, QualType InnerType); /// Complete a delayed type application by populating the record's fields with the right types. /// Should only be called once per delayed 'RecordDecl'. void CompleteTypeAppFields(RecordDecl *Incomplete); // Determine whether the given 'RecordDecl' is part of an 'expanding cycle'. // Generic records that form part of an expanding cycle can't be instantiated because they // produce an infinite number of type applications (because we construct the transitive closure // of type applications eagerly). // // Consider the graph of type parameter dependencies as defined below. An expanding cycle // is a cycle in the graph that contains at least one expanding edge. // // We show how the graph is built via an example. Suppose we have three generic structs A<T>, B<U>, C<V>: // // struct A _For_any(T) { struct A<T>* a; struct B<T> *b; } // struct B _For_any(U) { struct C<struct C<U> > *c; } // struct C _For_any(V) { struct A<V>* a; } // // The vertices of the graph are T, U, and V (the type parameter, alpha re-named if needed). // There is an edge between nodes N1 and N2 if N2 is used in a field anywhere in the position of N1. // If N2 appears at the "top-level" replacing N1, then the resulting edge is "non-expanding". // Otheriwse, if N2 appears nested within the argument that replaces N1, then the edge is "expanding". // // In our example the edges are: // // non-expanding: T -> T, T -> U, V -> T, U -> V // expanding: U => V // // T -> U, U => V, V -> T is an expanding cycle because it contains the expanding edge U => V // // The cycle will be detected when C is processed (because C is defined last). If we tried to instantiate C, we would // end up performing the following type applications: // A<V>, B<V>, C<C<V>>, A<C<V>>, B<C<V>>, C<C<C<V>>>, ... // // The definition of expanding cycle is adapted from the 'ECMA 335 Common Language Infrastructure (CLI) Partitions I to VI' standard. // Specifically, Partition II, section II.9.2 'Generics and recursive inheritance graphs'. bool DiagnoseExpandingCycles(RecordDecl *Base, SourceLocation Loc); QualType SubstituteTypeArgs(QualType QT, ArrayRef<TypeArgument> TypeArgs); std::vector<const TypedefNameDecl *> FindFreeVariableDecls(QualType T); bool AbstractForFunctionType(BoundsAnnotations &BA, ArrayRef<DeclaratorChunk::ParamInfo> Params); /// \brief Take a bounds expression with positional parameters from a function /// type and substitute DeclRefs to the corresonding parameters in Params. BoundsExpr *ConcretizeFromFunctionType(BoundsExpr *Expr, ArrayRef<ParmVarDecl *> Params); /// \brief Take a member bounds expression with member references and /// replace the member references with member access expressions using /// MemberBase as the base. Returns a nullptr if there is an error. BoundsExpr *MakeMemberBoundsConcrete(Expr *MemberBase, bool IsArrow, BoundsExpr *Bounds); BoundsExpr *ConcretizeFromFunctionTypeWithArgs(BoundsExpr *Bounds, ArrayRef<Expr *> Args, NonModifyingContext ErrorKind); /// ConvertToFullyCheckedType: convert an expression E to a fully checked type. This /// is used to retype declrefs and member exprs in checked scopes with bounds-safe /// interfaces. The Checked C spec that says that such uses in checked scopes shall be /// treated as having "checked type". ExprResult ConvertToFullyCheckedType(Expr *E, InteropTypeExpr *BA, bool IsParamUse, ExprValueKind VK); /// GetArrayPtrDereference - determine if an lvalue expression is a /// dereference of an _Array_ptr or _Nt_array_ptr (via '*" or an array /// subscript operator). If it is, return the actual dereference expression /// and set Result to the pointer type being dereferenced. Otherwise, return /// null. Expr *GetArrayPtrDereference(Expr *E, QualType &Result); /// ReplaceAssignmentImplicitCast: E has had assignment conversion rules /// applied to it. If an implicit cast has been introduced because of the /// assignment conversion rules, replace it with an explicit cast. /// This allows us to substitute E into other operator expressions without worrying /// about the different implicit conversion rules between assignments and //// other operators. Sema tree rewriting assumes that semantic /// analysis will recreate implicit casts. That doesn't happen properly if /// E is taken from an assignment expression and used in another operator expression. Expr *MakeAssignmentImplicitCastExplicit(Expr *E); enum BoundsDeclarationCheck { BDC_Assignment, BDC_Initialization }; /// \brief Check that address=of operation is not taking the /// address of members used in bounds. void CheckAddressTakenMembers(UnaryOperator *AddrOf); /// \brief Check whether E contains a return value expression. bool ContainsReturnValueExpr(Expr *E); /// \brief Wrap a call expression in a Checked C temporay binding /// expression, if a temporary is needed to describe the bounds /// of the result of the call expression. ExprResult CreateTemporaryForCallIfNeeded(ExprResult R); /// CheckFunctionBodyBoundsDecls - check bounds declarations within a function /// body. void CheckFunctionBodyBoundsDecls(FunctionDecl *FD, Stmt *Body); /// CheckTopLevelBoundsDecls - check bounds declarations for variable declarations /// not within a function body. void CheckTopLevelBoundsDecls(VarDecl *VD); // WarnDynamicCheckAlwaysFails - Adds a warning if an explicit dynamic check // will always fail. void WarnDynamicCheckAlwaysFails(const Expr *Condition); // If the VarDecl D has a byte_count or count bounds expression, // NormalizeBounds expands it to a range bounds expression. The expanded // range bounds are attached to the VarDecl D to avoid recomputing the // normalized bounds for D. BoundsExpr *NormalizeBounds(const VarDecl *D); // This is wrapper around CheckBoundsDeclaration::ExpandToRange. This // provides an easy way to invoke this function from outside the class. Given // a byte_count or count bounds expression for the VarDecl D, ExpandToRange // will expand it to a range bounds expression. BoundsExpr *ExpandBoundsToRange(const VarDecl *D, const BoundsExpr *B); // // Track variables that in-scope bounds declarations depend upon. // TODO: generalize this to other lvalue expressions. class BoundsDependencyTracker { public: typedef SmallVector<VarDecl *, 2> VarBoundsDecls; typedef VarBoundsDecls::iterator VarBoundsIterator; typedef llvm::iterator_range<VarBoundsIterator> VarBoundsIteratorRange; // mapping from variables to bounds that depend upon the variables. typedef std::map<VarDecl *, VarBoundsDecls> DependentMap; private: // Map variables to the bounds declarations that are // in scope and depend upon them. DependentMap Map; // Track the bounds that are in scope so that we can remove them from the // dependent map when the scope is exited. std::vector<VarDecl *> BoundsInScope; public: BoundsDependencyTracker() {} // Call these when entering/exiting scopes so that we can track when // variables go out of scope. EnterScope returns an integer // that should be passed to the corresponding ExitScope call. unsigned EnterScope(); void ExitScope(unsigned scopeBegin); // If D has a bounds declaration, add its dependencies to the existing // scope. void Add(VarDecl *D); VarBoundsIteratorRange DependentBoundsDecls(VarDecl *D) { auto Iter = Map.find(D); if (Iter == Map.end()) return VarBoundsIteratorRange(nullptr, nullptr); return VarBoundsIteratorRange(Iter->second.begin(),Iter->second.end()); } void Dump(raw_ostream &OS); }; BoundsDependencyTracker BoundsDependencies; // Map expressions that modify lvalues (assignments and pre/post // increment/decrement operations) to bounds that may depend on the modified // lvalues. We check the validity of bounds declarations after // expression statements using data flow analysis. During the analysis, // we need to know whether an expression modifies an lvalue involved in a // bounds invariant. The AST traversal order for determining this is lexical // and conflicts with preferred orderings for dataflow analysis, so we // precompute this information before analyzing a function body. class ModifiedBoundsDependencies { public: // A C lvalue expression with bounds on values stored in the lvalue. // It is either a variable or a member expression. struct LValueWithBounds { LValueWithBounds(llvm::PointerUnion<VarDecl *, MemberExpr *> Target, BoundsExpr *Bounds) : Target(Target), Bounds(Bounds) {} llvm::PointerUnion<VarDecl *, MemberExpr *> Target; BoundsExpr *Bounds; // Bounds for target. }; typedef SmallVector<LValueWithBounds,2> LValuesWithBounds; // Map assignments or pre/post increment/decrement expressions to bounds // that depend upon the lvalue modified by the expressions. typedef std::map<Expr *, LValuesWithBounds> DependentBounds; void Add(Expr *E, llvm::PointerUnion<VarDecl *, MemberExpr *> LValue, BoundsExpr *Bounds); void Dump(raw_ostream &OS); ModifiedBoundsDependencies() {} DependentBounds Tracker; }; /// \brief Compute a mapping from statements that modify lvalues to /// in-scope bounds declarations that depend on those lvalues. /// FD is the function being declared and Body is the body of the /// function. They are passed in separately because Body hasn't /// been attached to FD yet. void ComputeBoundsDependencies(ModifiedBoundsDependencies &Tracker, FunctionDecl *FD, Stmt *Body); /// \brief RAII class used to indicate that we are substituting an expression /// into another expression during bounds checking. We need to suppress /// diagnostics emission during this. We are doing type-preserving /// substitutions, so we don't expect semantic errors during substitution. /// There could be warnings, which would confuse users. The warnings could /// could also be escalated to errors, which would cause compilation failures. class ExprSubstitutionScope { Sema &SemaRef; bool PrevDisableSubstitionDiagnostics; public: explicit ExprSubstitutionScope(Sema &SemaRef, bool DisableDiagnostics = true) : SemaRef(SemaRef), PrevDisableSubstitionDiagnostics( SemaRef.DisableSubstitionDiagnostics) { SemaRef.DisableSubstitionDiagnostics = DisableDiagnostics; } ~ExprSubstitutionScope() { SemaRef.DisableSubstitionDiagnostics = PrevDisableSubstitionDiagnostics; } }; bool DisableSubstitionDiagnostics; ExprResult ActOnPackExpression(Expr *PackedExpr, QualType ExistType, TypeArgument SubstArg, SourceLocation StartLoc, SourceLocation EndLoc); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, ConceptDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion, bool isBoundsSafeInterfaceCast = false); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// IncompatibleCheckedCVoid - Assignments to/from void pointers to pointers /// to data containing checked pointers is not allowed in regular checked /// scopes. It is allowed only in unchecked and checked bounds_only scopes. IncompatibleCheckedCVoid, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true, QualType LHSInteropType = QualType()); public: /// \brief: Given a value with type Ty that has a bounds declaration, /// compute the bounds-safe interface type. Returns a null QualType /// if nnoe exists. QualType SynthesizeInteropType(QualType Ty, bool isParam); /// Rewrite function types with bounds-safe interfaces on unchecked /// types to use the checked types specified by the interfaces. Recursively /// apply the rewrite to function types nested within the type. QualType RewriteBoundsSafeInterfaceTypes(QualType Ty); /// \brief Get the bounds-safe interface type for LHS. /// Returns a null QualType if there isn't one. QualType GetCheckedCLValueInteropType(ExprResult LHS); /// \brief Get the bounds-safe interface type for RHS. /// Returns a null QualType if there isn't one. QualType GetCheckedCRValueInteropType(ExprResult RHS); /// \brief If T is an array type, create a checked array type version of T. /// This includes propagating the checked property to nested array types. If /// a valid checked array type cannot be constructed and Diagnose is true, /// print a diagnostic message for the problem. QualType MakeCheckedArrayType(QualType T, bool Diagnose = false, SourceLocation Loc = SourceLocation()); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; /// \brief RAII object that handles state changes for processing a member // bounds expressions. class EnterMemberBoundsExprRAII { Sema &S; bool SavedMemberBounds; public: EnterMemberBoundsExprRAII(Sema &S) : S(S), SavedMemberBounds(S.IsMemberBoundsExpr) { S.IsMemberBoundsExpr = true; } ~EnterMemberBoundsExprRAII() { S.IsMemberBoundsExpr = SavedMemberBounds; } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// True if the current expression is a member bounds expression /// for a structure. Member bounds expressions can only reference /// members and cannot reference variables. bool IsMemberBoundsExpr; std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, CheckedPointerKind kind, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, CheckedArrayKind Kind, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, SourceLocation EqualLoc = SourceLocation()); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); bool ValidateNTCheckedType(ASTContext &C, QualType VDeclType, Expr *Init); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr, RecordDecl::Genericity GenericKind = RecordDecl::NonGeneric, ArrayRef<TypedefDecl *> TypeParams = ArrayRef<TypedefDecl *> {nullptr, 0} ); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); FieldDecl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); /// Push the parameters listed in Params into scope. void ActOnSetupParametersAgain(Scope* S, ArrayRef<ParmVarDecl *> Params); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // Checked C specific methods for merging function declarations. bool CheckedCFunctionDeclCompatibility(FunctionDecl *New, FunctionDecl *Old); bool CheckedCMergeFunctionDecls(FunctionDecl *New, FunctionDecl *Old); bool DiagnoseCheckedCFunctionCompatibility(FunctionDecl *New, FunctionDecl *Old); // used for %select in diagnostics for errors involving checked types. enum class CheckedTypeClassification { CCT_Any, CCT_Struct, CCT_Union }; // used for %select in diagnostics for errors involving redeclarations // with bounds enum class CheckedCBoundsError { CCBE_Parameter, CCBE_Return, CCBE_Variable }; // used for %select in diagnostics for errors involving redeclarations // with bounds annotations. enum class BoundsAnnotationKind { Bounds, IType }; CheckedTypeClassification classifyForCheckedTypeDiagnostic(QualType qt); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr, CheckedScopeSpecifier WrittenCSS = CSS_None, SourceLocation CSSLoc = SourceLocation(), SourceLocation CSMLoc = SourceLocation()); private: CheckedScopeSpecifier CheckingKind; // Keep a stack of saved checked scope information. class SavedCheckedScope { public: SavedCheckedScope(CheckedScopeSpecifier S, SourceLocation L) : Loc(L), Saved(S) {} SourceLocation Loc; CheckedScopeSpecifier Saved; }; SmallVector<SavedCheckedScope, 8> CheckingKindStack; // can be empty public: CheckedScopeSpecifier GetCheckedScopeInfo() { return CheckingKind; } void SetCheckedScopeInfo(CheckedScopeSpecifier CSS) { CheckingKind = CSS; } void PushCheckedScopeInfo(SourceLocation Loc) { CheckingKindStack.push_back(SavedCheckedScope(CheckingKind, Loc)); } bool PopCheckedScopeInfo() { if (CheckingKindStack.size() > 0) { CheckingKind = CheckingKindStack.back().Saved; CheckingKindStack.pop_back(); return false; } else return true; } void DiagnoseUnterminatedCheckedScope(); bool IsCheckedScope() { return CheckingKind != CSS_Unchecked; } class CheckedScopeRAII { Sema &SemaRef; CheckedScopeSpecifier PrevCheckingKind; public: CheckedScopeRAII(Sema &SemaRef, CheckedScopeSpecifier CSS) : SemaRef(SemaRef), PrevCheckingKind(SemaRef.CheckingKind) { if (CSS != CSS_None) SemaRef.CheckingKind = CSS; } CheckedScopeRAII(Sema &S, DeclSpec &DS) : CheckedScopeRAII(S, DS.getCheckedScopeSpecifier()) { } ~CheckedScopeRAII() { SemaRef.CheckingKind = PrevCheckingKind; } }; /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false, CheckedScopeSpecifier CSS = CSS_None): S(S), CheckedProperties(S, CSS) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; CheckedScopeRAII CheckedProperties; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); enum CheckedScopeTypeLocation { CSTL_TopLevel, CSTL_Nested, CSTL_BoundsSafeInterface }; /// Returns true if Ty is allowed in a checked scope: /// - If Ty is a pointer or array type, it must be a checked pointer or /// array type or an unchecked pointer or array type with a bounds-safe /// interface. /// - This rule applies recursively to any types nested within Ty. /// - All other types are allowed in checked scopes. /// Return false if Ty is not allowed. bool AllowedInCheckedScope(QualType Ty, const InteropTypeExpr *InteropType, bool IsParam, CheckedScopeTypeLocation Loc, CheckedScopeTypeLocation &ProblemLoc, QualType &ProblemTy); // Enum for diagnostic message that describes the type of declaration // being checked. enum CheckedDeclKind { CDK_Parameter, CDK_FunctionReturn, CDK_LocalVariable, CDK_GlobalVariable, CDK_Member }; /// \param D - target declaration /// \param UseLoc - default invalid location at declaration /// it is valid only if it is regarded as use of variable /// \returns true if target declaration is valid checked decl bool DiagnoseCheckedDecl(const ValueDecl *D, SourceLocation UseLoc = SourceLocation()); bool DiagnoseTypeInCheckedScope(QualType Ty, SourceLocation Start, SourceLocation End); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op, bool isCheckedScope = false); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Checked C Extension ----------------------===// private: QualType ValidateBoundsExprArgument(Expr *Arg); public: ExprResult ActOnNullaryBoundsExpr(SourceLocation BoundKWLoc, BoundsExpr::Kind Kind, SourceLocation RParenLoc); ExprResult ActOnCountBoundsExpr(SourceLocation BoundsKWLoc, BoundsExpr::Kind Kind, Expr *CountExpr, SourceLocation RParenLoc); ExprResult ActOnRangeBoundsExpr(SourceLocation BoundsKWLoc, Expr *LowerBound, Expr *UpperBound, SourceLocation RParenLoc); ExprResult CreateRangeBoundsExpr(SourceLocation BoundsKWLoc, Expr *LowerBound, Expr *UpperBound, RelativeBoundsClause *Relative, SourceLocation RParenLoc); ExprResult ActOnBoundsInteropType(SourceLocation TypeKWLoc, ParsedType Ty, SourceLocation RParenLoc); ExprResult CreateBoundsInteropTypeExpr(SourceLocation TypeKWLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc); ExprResult CreatePositionalParameterExpr(unsigned Index, QualType QT); RelativeBoundsClause* ActOnRelativeTypeBoundsClause(SourceLocation BoundsKWLoc, ParsedType Ty, SourceLocation RParenLoc); RelativeBoundsClause * CreateRelativeTypeBoundsClause(SourceLocation BoundsKWLoc, TypeSourceInfo *TyInfo, SourceLocation RParenLoc); RelativeBoundsClause* ActOnRelativeConstExprClause(Expr *ConstExpr, SourceLocation BoundsKWLoc, SourceLocation RParenLoc); bool CheckBoundsCastBaseType(Expr *E1); ExprResult ActOnBoundsCastExprBounds(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAnagleBracketLoc, ParsedType D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E1, BoundsExpr *ParsedBounds); ExprResult ActOnBoundsCastExprSingle( Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAnagleBracketLoc, ParsedType D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E1); ExprResult BuildBoundsCastExpr(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *CastTypeInfo, SourceRange AngleBrackets, SourceRange Paren, Expr *E1, BoundsExpr *bounds); bool DiagnoseBoundsDeclType(QualType Ty, DeclaratorDecl *D, BoundsAnnotations &BA, bool IsReturnAnnots); /// \\brief Update information in ASTContext tracking for a member what /// bounds declarations depend upon it. FD is the member whose /// bounds are given by Bounds. void TrackMemberBoundsDependences(FieldDecl *FD, BoundsExpr *Bounds); void ActOnBoundsDecl(DeclaratorDecl *D, BoundsAnnotations Annots, bool MergeDeferredBounds = false); void ActOnEmptyBoundsDecl(DeclaratorDecl *D); void ActOnInvalidBoundsDecl(DeclaratorDecl *D); /// \brief Add default bounds/interop type expressions to Annots, if appropriate. void InferBoundsAnnots(QualType Ty, BoundsAnnotations &Annots, bool IsParam); // \#pragma CHECKED_SCOPE. enum PragmaCheckedScopeKind { PCSK_On, PCSK_Off, PCSK_BoundsOnly, PCSK_Push, PCSK_Pop }; void ActOnPragmaCheckedScope(PragmaCheckedScopeKind Kind, SourceLocation Loc); void DiagnoseUnterminatedPragmaCheckedScopePush(); BoundsExpr *CreateInvalidBoundsExpr(); /// /brief Synthesize the interop type expression implied by the presence /// of a bounds expression. Ty is the original unchecked type. Returns null /// if none exists. InteropTypeExpr *SynthesizeInteropTypeExpr(QualType Ty, bool IsParam); BoundsExpr *CreateCountForArrayType(QualType QT); // _Return_value in Checked C bounds expressions. ExprResult ActOnReturnValueExpr(SourceLocation Loc); /// \brief When non-NULL, the type of the '_Return_value' expression. QualType BoundsExprReturnValue; /// \brief RAII object used to temporarily set the the type of _Return_value class CheckedCReturnValueRAII { Sema &S; QualType OldReturnValue; public: CheckedCReturnValueRAII(Sema &S, QualType ReturnVal) : S(S) { OldReturnValue = S.BoundsExprReturnValue; S.BoundsExprReturnValue = ReturnVal; } ~CheckedCReturnValueRAII() { S.BoundsExprReturnValue = OldReturnValue; } }; typedef bool (*ParseDeferredBoundsCallBackFn)(void *P, std::unique_ptr<CachedTokens> Toks, ArrayRef<ParmVarDecl *> Params, BoundsAnnotations &Result, const Declarator &D); void SetDeferredBoundsCallBack(void *OpaqueData, ParseDeferredBoundsCallBackFn p); ParseDeferredBoundsCallBackFn DeferredBoundsParser; void *DeferredBoundsParserData; // Represents the context where an expression must be non-modifying. enum NonModifyingContext { NMC_Unknown, NMC_Dynamic_Check, NMC_Count, // Bounds count expression. NMC_Byte_Count, // Bounds byte count expression. NMC_Range, // Bounds range expression. NMC_Function_Return, // Argument for parameter used in function // return bounds. NMC_Function_Parameter // Argument for parameter used in function // parameter bounds. }; /// /brief Checks whether an expression is non-modifying /// (see Checked C Spec, 3.6.1). Returns true if the expression is non-modifying, /// false otherwise. enum NonModifyingMessage { NMM_None, NMM_Error, NMM_Note }; /// \brief Checks whether an expression is non-modifying /// (see Checked C Spec, 3.6.1). Returns true if the expression is non-modifying, /// false otherwise. bool CheckIsNonModifying(Expr *E, NonModifyingContext Req = NonModifyingContext::NMC_Unknown, NonModifyingMessage = NMM_Error); BoundsExpr *CheckNonModifyingBounds(BoundsExpr *Bounds, Expr *E); ExprResult ActOnFunctionTypeApplication(ExprResult TypeFunc, SourceLocation Loc, ArrayRef<TypeArgument> Args); RecordDecl *ActOnRecordTypeApplication(RecordDecl *Base, ArrayRef<TypeArgument> TypeArgs); const ExistentialType *ActOnExistentialType(ASTContext &Context, const Type *TypeVar, QualType InnerType); /// Complete a delayed type application by populating the record's fields with the right types. /// Should only be called once per delayed 'RecordDecl'. void CompleteTypeAppFields(RecordDecl *Incomplete); // Determine whether the given 'RecordDecl' is part of an 'expanding cycle'. // Generic records that form part of an expanding cycle can't be instantiated because they // produce an infinite number of type applications (because we construct the transitive closure // of type applications eagerly). // // Consider the graph of type parameter dependencies as defined below. An expanding cycle // is a cycle in the graph that contains at least one expanding edge. // // We show how the graph is built via an example. Suppose we have three generic structs A<T>, B<U>, C<V>: // // struct A _For_any(T) { struct A<T>* a; struct B<T> *b; } // struct B _For_any(U) { struct C<struct C<U> > *c; } // struct C _For_any(V) { struct A<V>* a; } // // The vertices of the graph are T, U, and V (the type parameter, alpha re-named if needed). // There is an edge between nodes N1 and N2 if N2 is used in a field anywhere in the position of N1. // If N2 appears at the "top-level" replacing N1, then the resulting edge is "non-expanding". // Otheriwse, if N2 appears nested within the argument that replaces N1, then the edge is "expanding". // // In our example the edges are: // // non-expanding: T -> T, T -> U, V -> T, U -> V // expanding: U => V // // T -> U, U => V, V -> T is an expanding cycle because it contains the expanding edge U => V // // The cycle will be detected when C is processed (because C is defined last). If we tried to instantiate C, we would // end up performing the following type applications: // A<V>, B<V>, C<C<V>>, A<C<V>>, B<C<V>>, C<C<C<V>>>, ... // // The definition of expanding cycle is adapted from the 'ECMA 335 Common Language Infrastructure (CLI) Partitions I to VI' standard. // Specifically, Partition II, section II.9.2 'Generics and recursive inheritance graphs'. bool DiagnoseExpandingCycles(RecordDecl *Base, SourceLocation Loc); QualType SubstituteTypeArgs(QualType QT, ArrayRef<TypeArgument> TypeArgs); std::vector<const TypedefNameDecl *> FindFreeVariableDecls(QualType T); bool AbstractForFunctionType(BoundsAnnotations &BA, ArrayRef<DeclaratorChunk::ParamInfo> Params); /// \brief Take a bounds expression with positional parameters from a function /// type and substitute DeclRefs to the corresonding parameters in Params. BoundsExpr *ConcretizeFromFunctionType(BoundsExpr *Expr, ArrayRef<ParmVarDecl *> Params); /// \brief Take a member bounds expression with member references and /// replace the member references with member access expressions using /// MemberBase as the base. Returns a nullptr if there is an error. BoundsExpr *MakeMemberBoundsConcrete(Expr *MemberBase, bool IsArrow, BoundsExpr *Bounds); BoundsExpr *ConcretizeFromFunctionTypeWithArgs(BoundsExpr *Bounds, ArrayRef<Expr *> Args, NonModifyingContext ErrorKind); /// ConvertToFullyCheckedType: convert an expression E to a fully checked type. This /// is used to retype declrefs and member exprs in checked scopes with bounds-safe /// interfaces. The Checked C spec that says that such uses in checked scopes shall be /// treated as having "checked type". ExprResult ConvertToFullyCheckedType(Expr *E, InteropTypeExpr *BA, bool IsParamUse, ExprValueKind VK); /// GetArrayPtrDereference - determine if an lvalue expression is a /// dereference of an _Array_ptr or _Nt_array_ptr (via '*" or an array /// subscript operator). If it is, return the actual dereference expression /// and set Result to the pointer type being dereferenced. Otherwise, return /// null. Expr *GetArrayPtrDereference(Expr *E, QualType &Result); /// ReplaceAssignmentImplicitCast: E has had assignment conversion rules /// applied to it. If an implicit cast has been introduced because of the /// assignment conversion rules, replace it with an explicit cast. /// This allows us to substitute E into other operator expressions without worrying /// about the different implicit conversion rules between assignments and //// other operators. Sema tree rewriting assumes that semantic /// analysis will recreate implicit casts. That doesn't happen properly if /// E is taken from an assignment expression and used in another operator expression. Expr *MakeAssignmentImplicitCastExplicit(Expr *E); enum BoundsDeclarationCheck { BDC_Assignment, BDC_Initialization }; /// \brief Check that address=of operation is not taking the /// address of members used in bounds. void CheckAddressTakenMembers(UnaryOperator *AddrOf); /// \brief Check whether E contains a return value expression. bool ContainsReturnValueExpr(Expr *E); /// \brief Wrap a call expression in a Checked C temporay binding /// expression, if a temporary is needed to describe the bounds /// of the result of the call expression. ExprResult CreateTemporaryForCallIfNeeded(ExprResult R); /// CheckFunctionBodyBoundsDecls - check bounds declarations within a function /// body. void CheckFunctionBodyBoundsDecls(FunctionDecl *FD, Stmt *Body); /// CheckTopLevelBoundsDecls - check bounds declarations for variable declarations /// not within a function body. void CheckTopLevelBoundsDecls(VarDecl *VD); // WarnDynamicCheckAlwaysFails - Adds a warning if an explicit dynamic check // will always fail. void WarnDynamicCheckAlwaysFails(const Expr *Condition); // If the VarDecl D has a byte_count or count bounds expression, // NormalizeBounds expands it to a range bounds expression. The expanded // range bounds are attached to the VarDecl D to avoid recomputing the // normalized bounds for D. BoundsExpr *NormalizeBounds(const VarDecl *D); // This is wrapper around CheckBoundsDeclaration::ExpandToRange. This // provides an easy way to invoke this function from outside the class. Given // a byte_count or count bounds expression for the VarDecl D, ExpandToRange // will expand it to a range bounds expression. BoundsExpr *ExpandBoundsToRange(const VarDecl *D, const BoundsExpr *B); // // Track variables that in-scope bounds declarations depend upon. // TODO: generalize this to other lvalue expressions. class BoundsDependencyTracker { public: typedef SmallVector<VarDecl *, 2> VarBoundsDecls; typedef VarBoundsDecls::iterator VarBoundsIterator; typedef llvm::iterator_range<VarBoundsIterator> VarBoundsIteratorRange; // mapping from variables to bounds that depend upon the variables. typedef std::map<VarDecl *, VarBoundsDecls> DependentMap; private: // Map variables to the bounds declarations that are // in scope and depend upon them. DependentMap Map; // Track the bounds that are in scope so that we can remove them from the // dependent map when the scope is exited. std::vector<VarDecl *> BoundsInScope; public: BoundsDependencyTracker() {} // Call these when entering/exiting scopes so that we can track when // variables go out of scope. EnterScope returns an integer // that should be passed to the corresponding ExitScope call. unsigned EnterScope(); void ExitScope(unsigned scopeBegin); // If D has a bounds declaration, add its dependencies to the existing // scope. void Add(VarDecl *D); VarBoundsIteratorRange DependentBoundsDecls(VarDecl *D) { auto Iter = Map.find(D); if (Iter == Map.end()) return VarBoundsIteratorRange(nullptr, nullptr); return VarBoundsIteratorRange(Iter->second.begin(),Iter->second.end()); } void Dump(raw_ostream &OS); }; BoundsDependencyTracker BoundsDependencies; // Map expressions that modify lvalues (assignments and pre/post // increment/decrement operations) to bounds that may depend on the modified // lvalues. We check the validity of bounds declarations after // expression statements using data flow analysis. During the analysis, // we need to know whether an expression modifies an lvalue involved in a // bounds invariant. The AST traversal order for determining this is lexical // and conflicts with preferred orderings for dataflow analysis, so we // precompute this information before analyzing a function body. class ModifiedBoundsDependencies { public: // A C lvalue expression with bounds on values stored in the lvalue. // It is either a variable or a member expression. struct LValueWithBounds { LValueWithBounds(llvm::PointerUnion<VarDecl *, MemberExpr *> Target, BoundsExpr *Bounds) : Target(Target), Bounds(Bounds) {} llvm::PointerUnion<VarDecl *, MemberExpr *> Target; BoundsExpr *Bounds; // Bounds for target. }; typedef SmallVector<LValueWithBounds,2> LValuesWithBounds; // Map assignments or pre/post increment/decrement expressions to bounds // that depend upon the lvalue modified by the expressions. typedef std::map<Expr *, LValuesWithBounds> DependentBounds; void Add(Expr *E, llvm::PointerUnion<VarDecl *, MemberExpr *> LValue, BoundsExpr *Bounds); void Dump(raw_ostream &OS); ModifiedBoundsDependencies() {} DependentBounds Tracker; }; /// \brief Compute a mapping from statements that modify lvalues to /// in-scope bounds declarations that depend on those lvalues. /// FD is the function being declared and Body is the body of the /// function. They are passed in separately because Body hasn't /// been attached to FD yet. void ComputeBoundsDependencies(ModifiedBoundsDependencies &Tracker, FunctionDecl *FD, Stmt *Body); /// \brief RAII class used to indicate that we are substituting an expression /// into another expression during bounds checking. We need to suppress /// diagnostics emission during this. We are doing type-preserving /// substitutions, so we don't expect semantic errors during substitution. /// There could be warnings, which would confuse users. The warnings could /// could also be escalated to errors, which would cause compilation failures. class ExprSubstitutionScope { Sema &SemaRef; bool PrevDisableSubstitionDiagnostics; public: explicit ExprSubstitutionScope(Sema &SemaRef, bool DisableDiagnostics = true) : SemaRef(SemaRef), PrevDisableSubstitionDiagnostics( SemaRef.DisableSubstitionDiagnostics) { SemaRef.DisableSubstitionDiagnostics = DisableDiagnostics; } ~ExprSubstitutionScope() { SemaRef.DisableSubstitionDiagnostics = PrevDisableSubstitionDiagnostics; } }; bool DisableSubstitionDiagnostics; ExprResult ActOnPackExpression(Expr *PackedExpr, QualType ExistType, TypeArgument SubstArg, SourceLocation StartLoc, SourceLocation EndLoc); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, ConceptDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested ' unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed ' DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed ' DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed ' DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of ' DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of ' DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of ' OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of ' void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of ' DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. ' bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the ' void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\ StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\ /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion, bool isBoundsSafeInterfaceCast = false); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// IncompatibleCheckedCVoid - Assignments to/from void pointers to pointers /// to data containing checked pointers is not allowed in regular checked /// scopes. It is allowed only in unchecked and checked bounds_only scopes. IncompatibleCheckedCVoid, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true, QualType LHSInteropType = QualType()); public: /// \brief: Given a value with type Ty that has a bounds declaration, /// compute the bounds-safe interface type. Returns a null QualType /// if nnoe exists. QualType SynthesizeInteropType(QualType Ty, bool isParam); /// Rewrite function types with bounds-safe interfaces on unchecked /// types to use the checked types specified by the interfaces. Recursively /// apply the rewrite to function types nested within the type. QualType RewriteBoundsSafeInterfaceTypes(QualType Ty); /// \brief Get the bounds-safe interface type for LHS. /// Returns a null QualType if there isn't one. QualType GetCheckedCLValueInteropType(ExprResult LHS); /// \brief Get the bounds-safe interface type for RHS. /// Returns a null QualType if there isn't one. QualType GetCheckedCRValueInteropType(ExprResult RHS); /// \brief If T is an array type, create a checked array type version of T. /// This includes propagating the checked property to nested array types. If /// a valid checked array type cannot be constructed and Diagnose is true, /// print a diagnostic message for the problem. QualType MakeCheckedArrayType(QualType T, bool Diagnose = false, SourceLocation Loc = SourceLocation()); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; /// \brief RAII object that handles state changes for processing a member // bounds expressions. class EnterMemberBoundsExprRAII { Sema &S; bool SavedMemberBounds; public: EnterMemberBoundsExprRAII(Sema &S) : S(S), SavedMemberBounds(S.IsMemberBoundsExpr) { S.IsMemberBoundsExpr = true; } ~EnterMemberBoundsExprRAII() { S.IsMemberBoundsExpr = SavedMemberBounds; } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// True if the current expression is a member bounds expression /// for a structure. Member bounds expressions can only reference /// members and cannot reference variables. bool IsMemberBoundsExpr; std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, CheckedPointerKind kind, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, CheckedArrayKind Kind, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, SourceLocation EqualLoc = SourceLocation()); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); bool ValidateNTCheckedType(ASTContext &C, QualType VDeclType, Expr *Init); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr, RecordDecl::Genericity GenericKind = RecordDecl::NonGeneric, ArrayRef<TypedefDecl *> TypeParams = ArrayRef<TypedefDecl *> {nullptr, 0} ); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); FieldDecl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); /// Push the parameters listed in Params into scope. void ActOnSetupParametersAgain(Scope* S, ArrayRef<ParmVarDecl *> Params); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // Checked C specific methods for merging function declarations. bool CheckedCFunctionDeclCompatibility(FunctionDecl *New, FunctionDecl *Old); bool CheckedCMergeFunctionDecls(FunctionDecl *New, FunctionDecl *Old); bool DiagnoseCheckedCFunctionCompatibility(FunctionDecl *New, FunctionDecl *Old); // used for %select in diagnostics for errors involving checked types. enum class CheckedTypeClassification { CCT_Any, CCT_Struct, CCT_Union }; // used for %select in diagnostics for errors involving redeclarations // with bounds enum class CheckedCBoundsError { CCBE_Parameter, CCBE_Return, CCBE_Variable }; // used for %select in diagnostics for errors involving redeclarations // with bounds annotations. enum class BoundsAnnotationKind { Bounds, IType }; CheckedTypeClassification classifyForCheckedTypeDiagnostic(QualType qt); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr, CheckedScopeSpecifier WrittenCSS = CSS_None, SourceLocation CSSLoc = SourceLocation(), SourceLocation CSMLoc = SourceLocation()); private: CheckedScopeSpecifier CheckingKind; // Keep a stack of saved checked scope information. class SavedCheckedScope { public: SavedCheckedScope(CheckedScopeSpecifier S, SourceLocation L) : Loc(L), Saved(S) {} SourceLocation Loc; CheckedScopeSpecifier Saved; }; SmallVector<SavedCheckedScope, 8> CheckingKindStack; // can be empty public: CheckedScopeSpecifier GetCheckedScopeInfo() { return CheckingKind; } void SetCheckedScopeInfo(CheckedScopeSpecifier CSS) { CheckingKind = CSS; } void PushCheckedScopeInfo(SourceLocation Loc) { CheckingKindStack.push_back(SavedCheckedScope(CheckingKind, Loc)); } bool PopCheckedScopeInfo() { if (CheckingKindStack.size() > 0) { CheckingKind = CheckingKindStack.back().Saved; CheckingKindStack.pop_back(); return false; } else return true; } void DiagnoseUnterminatedCheckedScope(); bool IsCheckedScope() { return CheckingKind != CSS_Unchecked; } class CheckedScopeRAII { Sema &SemaRef; CheckedScopeSpecifier PrevCheckingKind; public: CheckedScopeRAII(Sema &SemaRef, CheckedScopeSpecifier CSS) : SemaRef(SemaRef), PrevCheckingKind(SemaRef.CheckingKind) { if (CSS != CSS_None) SemaRef.CheckingKind = CSS; } CheckedScopeRAII(Sema &S, DeclSpec &DS) : CheckedScopeRAII(S, DS.getCheckedScopeSpecifier()) { } ~CheckedScopeRAII() { SemaRef.CheckingKind = PrevCheckingKind; } }; /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false, CheckedScopeSpecifier CSS = CSS_None): S(S), CheckedProperties(S, CSS) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; CheckedScopeRAII CheckedProperties; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); enum CheckedScopeTypeLocation { CSTL_TopLevel, CSTL_Nested, CSTL_BoundsSafeInterface }; /// Returns true if Ty is allowed in a checked scope: /// - If Ty is a pointer or array type, it must be a checked pointer or /// array type or an unchecked pointer or array type with a bounds-safe /// interface. /// - This rule applies recursively to any types nested within Ty. /// - All other types are allowed in checked scopes. /// Return false if Ty is not allowed. bool AllowedInCheckedScope(QualType Ty, const InteropTypeExpr *InteropType, bool IsParam, CheckedScopeTypeLocation Loc, CheckedScopeTypeLocation &ProblemLoc, QualType &ProblemTy); // Enum for diagnostic message that describes the type of declaration // being checked. enum CheckedDeclKind { CDK_Parameter, CDK_FunctionReturn, CDK_LocalVariable, CDK_GlobalVariable, CDK_Member }; /// \param D - target declaration /// \param UseLoc - default invalid location at declaration /// it is valid only if it is regarded as use of variable /// \returns true if target declaration is valid checked decl bool DiagnoseCheckedDecl(const ValueDecl *D, SourceLocation UseLoc = SourceLocation()); bool DiagnoseTypeInCheckedScope(QualType Ty, SourceLocation Start, SourceLocation End); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op, bool isCheckedScope = false); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Checked C Extension ----------------------===// private: QualType ValidateBoundsExprArgument(Expr *Arg); public: ExprResult ActOnNullaryBoundsExpr(SourceLocation BoundKWLoc, BoundsExpr::Kind Kind, SourceLocation RParenLoc); ExprResult ActOnCountBoundsExpr(SourceLocation BoundsKWLoc, BoundsExpr::Kind Kind, Expr *CountExpr, SourceLocation RParenLoc); ExprResult ActOnRangeBoundsExpr(SourceLocation BoundsKWLoc, Expr *LowerBound, Expr *UpperBound, SourceLocation RParenLoc); ExprResult CreateRangeBoundsExpr(SourceLocation BoundsKWLoc, Expr *LowerBound, Expr *UpperBound, RelativeBoundsClause *Relative, SourceLocation RParenLoc); ExprResult ActOnBoundsInteropType(SourceLocation TypeKWLoc, ParsedType Ty, SourceLocation RParenLoc); ExprResult CreateBoundsInteropTypeExpr(SourceLocation TypeKWLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc); ExprResult CreatePositionalParameterExpr(unsigned Index, QualType QT); RelativeBoundsClause* ActOnRelativeTypeBoundsClause(SourceLocation BoundsKWLoc, ParsedType Ty, SourceLocation RParenLoc); RelativeBoundsClause * CreateRelativeTypeBoundsClause(SourceLocation BoundsKWLoc, TypeSourceInfo *TyInfo, SourceLocation RParenLoc); RelativeBoundsClause* ActOnRelativeConstExprClause(Expr *ConstExpr, SourceLocation BoundsKWLoc, SourceLocation RParenLoc); bool CheckBoundsCastBaseType(Expr *E1); ExprResult ActOnBoundsCastExprBounds(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAnagleBracketLoc, ParsedType D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E1, BoundsExpr *ParsedBounds); ExprResult ActOnBoundsCastExprSingle( Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAnagleBracketLoc, ParsedType D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E1); ExprResult BuildBoundsCastExpr(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *CastTypeInfo, SourceRange AngleBrackets, SourceRange Paren, Expr *E1, BoundsExpr *bounds); bool DiagnoseBoundsDeclType(QualType Ty, DeclaratorDecl *D, BoundsAnnotations &BA, bool IsReturnAnnots); /// \\brief Update information in ASTContext tracking for a member what /// bounds declarations depend upon it. FD is the member whose /// bounds are given by Bounds. void TrackMemberBoundsDependences(FieldDecl *FD, BoundsExpr *Bounds); void ActOnBoundsDecl(DeclaratorDecl *D, BoundsAnnotations Annots, bool MergeDeferredBounds = false); void ActOnEmptyBoundsDecl(DeclaratorDecl *D); void ActOnInvalidBoundsDecl(DeclaratorDecl *D); /// \brief Add default bounds/interop type expressions to Annots, if appropriate. void InferBoundsAnnots(QualType Ty, BoundsAnnotations &Annots, bool IsParam); // \#pragma CHECKED_SCOPE. enum PragmaCheckedScopeKind { PCSK_On, PCSK_Off, PCSK_BoundsOnly, PCSK_Push, PCSK_Pop }; void ActOnPragmaCheckedScope(PragmaCheckedScopeKind Kind, SourceLocation Loc); void DiagnoseUnterminatedPragmaCheckedScopePush(); BoundsExpr *CreateInvalidBoundsExpr(); /// /brief Synthesize the interop type expression implied by the presence /// of a bounds expression. Ty is the original unchecked type. Returns null /// if none exists. InteropTypeExpr *SynthesizeInteropTypeExpr(QualType Ty, bool IsParam); BoundsExpr *CreateCountForArrayType(QualType QT); // _Return_value in Checked C bounds expressions. ExprResult ActOnReturnValueExpr(SourceLocation Loc); /// \brief When non-NULL, the type of the '_Return_value' expression. QualType BoundsExprReturnValue; /// \brief RAII object used to temporarily set the the type of _Return_value class CheckedCReturnValueRAII { Sema &S; QualType OldReturnValue; public: CheckedCReturnValueRAII(Sema &S, QualType ReturnVal) : S(S) { OldReturnValue = S.BoundsExprReturnValue; S.BoundsExprReturnValue = ReturnVal; } ~CheckedCReturnValueRAII() { S.BoundsExprReturnValue = OldReturnValue; } }; typedef bool (*ParseDeferredBoundsCallBackFn)(void *P, std::unique_ptr<CachedTokens> Toks, ArrayRef<ParmVarDecl *> Params, BoundsAnnotations &Result, const Declarator &D); void SetDeferredBoundsCallBack(void *OpaqueData, ParseDeferredBoundsCallBackFn p); ParseDeferredBoundsCallBackFn DeferredBoundsParser; void *DeferredBoundsParserData; // Represents the context where an expression must be non-modifying. enum NonModifyingContext { NMC_Unknown, NMC_Dynamic_Check, NMC_Count, // Bounds count expression. NMC_Byte_Count, // Bounds byte count expression. NMC_Range, // Bounds range expression. NMC_Function_Return, // Argument for parameter used in function // return bounds. NMC_Function_Parameter // Argument for parameter used in function // parameter bounds. }; /// /brief Checks whether an expression is non-modifying /// (see Checked C Spec, 3.6.1). Returns true if the expression is non-modifying, /// false otherwise. enum NonModifyingMessage { NMM_None, NMM_Error, NMM_Note }; /// \brief Checks whether an expression is non-modifying /// (see Checked C Spec, 3.6.1). Returns true if the expression is non-modifying, /// false otherwise. bool CheckIsNonModifying(Expr *E, NonModifyingContext Req = NonModifyingContext::NMC_Unknown, NonModifyingMessage = NMM_Error); BoundsExpr *CheckNonModifyingBounds(BoundsExpr *Bounds, Expr *E); ExprResult ActOnFunctionTypeApplication(ExprResult TypeFunc, SourceLocation Loc, ArrayRef<TypeArgument> Args); RecordDecl *ActOnRecordTypeApplication(RecordDecl *Base, ArrayRef<TypeArgument> TypeArgs); const ExistentialType *ActOnExistentialType(ASTContext &Context, const Type *TypeVar, QualType InnerType); /// Complete a delayed type application by populating the record's fields with the right types. /// Should only be called once per delayed 'RecordDecl'. void CompleteTypeAppFields(RecordDecl *Incomplete); // Determine whether the given 'RecordDecl' is part of an 'expanding cycle'. // Generic records that form part of an expanding cycle can't be instantiated because they // produce an infinite number of type applications (because we construct the transitive closure // of type applications eagerly). // // Consider the graph of type parameter dependencies as defined below. An expanding cycle // is a cycle in the graph that contains at least one expanding edge. // // We show how the graph is built via an example. Suppose we have three generic structs A<T>, B<U>, C<V>: // // struct A _For_any(T) { struct A<T>* a; struct B<T> *b; } // struct B _For_any(U) { struct C<struct C<U> > *c; } // struct C _For_any(V) { struct A<V>* a; } // // The vertices of the graph are T, U, and V (the type parameter, alpha re-named if needed). // There is an edge between nodes N1 and N2 if N2 is used in a field anywhere in the position of N1. // If N2 appears at the "top-level" replacing N1, then the resulting edge is "non-expanding". // Otheriwse, if N2 appears nested within the argument that replaces N1, then the edge is "expanding". // // In our example the edges are: // // non-expanding: T -> T, T -> U, V -> T, U -> V // expanding: U => V // // T -> U, U => V, V -> T is an expanding cycle because it contains the expanding edge U => V // // The cycle will be detected when C is processed (because C is defined last). If we tried to instantiate C, we would // end up performing the following type applications: // A<V>, B<V>, C<C<V>>, A<C<V>>, B<C<V>>, C<C<C<V>>>, ... // // The definition of expanding cycle is adapted from the 'ECMA 335 Common Language Infrastructure (CLI) Partitions I to VI' standard. // Specifically, Partition II, section II.9.2 'Generics and recursive inheritance graphs'. bool DiagnoseExpandingCycles(RecordDecl *Base, SourceLocation Loc); QualType SubstituteTypeArgs(QualType QT, ArrayRef<TypeArgument> TypeArgs); std::vector<const TypedefNameDecl *> FindFreeVariableDecls(QualType T); bool AbstractForFunctionType(BoundsAnnotations &BA, ArrayRef<DeclaratorChunk::ParamInfo> Params); /// \brief Take a bounds expression with positional parameters from a function /// type and substitute DeclRefs to the corresonding parameters in Params. BoundsExpr *ConcretizeFromFunctionType(BoundsExpr *Expr, ArrayRef<ParmVarDecl *> Params); /// \brief Take a member bounds expression with member references and /// replace the member references with member access expressions using /// MemberBase as the base. Returns a nullptr if there is an error. BoundsExpr *MakeMemberBoundsConcrete(Expr *MemberBase, bool IsArrow, BoundsExpr *Bounds); BoundsExpr *ConcretizeFromFunctionTypeWithArgs(BoundsExpr *Bounds, ArrayRef<Expr *> Args, NonModifyingContext ErrorKind); /// ConvertToFullyCheckedType: convert an expression E to a fully checked type. This /// is used to retype declrefs and member exprs in checked scopes with bounds-safe /// interfaces. The Checked C spec that says that such uses in checked scopes shall be /// treated as having "checked type". ExprResult ConvertToFullyCheckedType(Expr *E, InteropTypeExpr *BA, bool IsParamUse, ExprValueKind VK); /// GetArrayPtrDereference - determine if an lvalue expression is a /// dereference of an _Array_ptr or _Nt_array_ptr (via '*" or an array /// subscript operator). If it is, return the actual dereference expression /// and set Result to the pointer type being dereferenced. Otherwise, return /// null. Expr *GetArrayPtrDereference(Expr *E, QualType &Result); /// ReplaceAssignmentImplicitCast: E has had assignment conversion rules /// applied to it. If an implicit cast has been introduced because of the /// assignment conversion rules, replace it with an explicit cast. /// This allows us to substitute E into other operator expressions without worrying /// about the different implicit conversion rules between assignments and //// other operators. Sema tree rewriting assumes that semantic /// analysis will recreate implicit casts. That doesn't happen properly if /// E is taken from an assignment expression and used in another operator expression. Expr *MakeAssignmentImplicitCastExplicit(Expr *E); enum BoundsDeclarationCheck { BDC_Assignment, BDC_Initialization }; /// \brief Check that address=of operation is not taking the /// address of members used in bounds. void CheckAddressTakenMembers(UnaryOperator *AddrOf); /// \brief Check whether E contains a return value expression. bool ContainsReturnValueExpr(Expr *E); /// \brief Wrap a call expression in a Checked C temporay binding /// expression, if a temporary is needed to describe the bounds /// of the result of the call expression. ExprResult CreateTemporaryForCallIfNeeded(ExprResult R); /// CheckFunctionBodyBoundsDecls - check bounds declarations within a function /// body. void CheckFunctionBodyBoundsDecls(FunctionDecl *FD, Stmt *Body); /// CheckTopLevelBoundsDecls - check bounds declarations for variable declarations /// not within a function body. void CheckTopLevelBoundsDecls(VarDecl *VD); // WarnDynamicCheckAlwaysFails - Adds a warning if an explicit dynamic check // will always fail. void WarnDynamicCheckAlwaysFails(const Expr *Condition); // If the VarDecl D has a byte_count or count bounds expression, // NormalizeBounds expands it to a range bounds expression. The expanded // range bounds are attached to the VarDecl D to avoid recomputing the // normalized bounds for D. BoundsExpr *NormalizeBounds(const VarDecl *D); // This is wrapper around CheckBoundsDeclaration::ExpandToRange. This // provides an easy way to invoke this function from outside the class. Given // a byte_count or count bounds expression for the VarDecl D, ExpandToRange // will expand it to a range bounds expression. BoundsExpr *ExpandBoundsToRange(const VarDecl *D, const BoundsExpr *B); // // Track variables that in-scope bounds declarations depend upon. // TODO: generalize this to other lvalue expressions. class BoundsDependencyTracker { public: typedef SmallVector<VarDecl *, 2> VarBoundsDecls; typedef VarBoundsDecls::iterator VarBoundsIterator; typedef llvm::iterator_range<VarBoundsIterator> VarBoundsIteratorRange; // mapping from variables to bounds that depend upon the variables. typedef std::map<VarDecl *, VarBoundsDecls> DependentMap; private: // Map variables to the bounds declarations that are // in scope and depend upon them. DependentMap Map; // Track the bounds that are in scope so that we can remove them from the // dependent map when the scope is exited. std::vector<VarDecl *> BoundsInScope; public: BoundsDependencyTracker() {} // Call these when entering/exiting scopes so that we can track when // variables go out of scope. EnterScope returns an integer // that should be passed to the corresponding ExitScope call. unsigned EnterScope(); void ExitScope(unsigned scopeBegin); // If D has a bounds declaration, add its dependencies to the existing // scope. void Add(VarDecl *D); VarBoundsIteratorRange DependentBoundsDecls(VarDecl *D) { auto Iter = Map.find(D); if (Iter == Map.end()) return VarBoundsIteratorRange(nullptr, nullptr); return VarBoundsIteratorRange(Iter->second.begin(),Iter->second.end()); } void Dump(raw_ostream &OS); }; BoundsDependencyTracker BoundsDependencies; // Map expressions that modify lvalues (assignments and pre/post // increment/decrement operations) to bounds that may depend on the modified // lvalues. We check the validity of bounds declarations after // expression statements using data flow analysis. During the analysis, // we need to know whether an expression modifies an lvalue involved in a // bounds invariant. The AST traversal order for determining this is lexical // and conflicts with preferred orderings for dataflow analysis, so we // precompute this information before analyzing a function body. class ModifiedBoundsDependencies { public: // A C lvalue expression with bounds on values stored in the lvalue. // It is either a variable or a member expression. struct LValueWithBounds { LValueWithBounds(llvm::PointerUnion<VarDecl *, MemberExpr *> Target, BoundsExpr *Bounds) : Target(Target), Bounds(Bounds) {} llvm::PointerUnion<VarDecl *, MemberExpr *> Target; BoundsExpr *Bounds; // Bounds for target. }; typedef SmallVector<LValueWithBounds,2> LValuesWithBounds; // Map assignments or pre/post increment/decrement expressions to bounds // that depend upon the lvalue modified by the expressions. typedef std::map<Expr *, LValuesWithBounds> DependentBounds; void Add(Expr *E, llvm::PointerUnion<VarDecl *, MemberExpr *> LValue, BoundsExpr *Bounds); void Dump(raw_ostream &OS); ModifiedBoundsDependencies() {} DependentBounds Tracker; }; /// \brief Compute a mapping from statements that modify lvalues to /// in-scope bounds declarations that depend on those lvalues. /// FD is the function being declared and Body is the body of the /// function. They are passed in separately because Body hasn't /// been attached to FD yet. void ComputeBoundsDependencies(ModifiedBoundsDependencies &Tracker, FunctionDecl *FD, Stmt *Body); /// \brief RAII class used to indicate that we are substituting an expression /// into another expression during bounds checking. We need to suppress /// diagnostics emission during this. We are doing type-preserving /// substitutions, so we don't expect semantic errors during substitution. /// There could be warnings, which would confuse users. The warnings could /// could also be escalated to errors, which would cause compilation failures. class ExprSubstitutionScope { Sema &SemaRef; bool PrevDisableSubstitionDiagnostics; public: explicit ExprSubstitutionScope(Sema &SemaRef, bool DisableDiagnostics = true) : SemaRef(SemaRef), PrevDisableSubstitionDiagnostics( SemaRef.DisableSubstitionDiagnostics) { SemaRef.DisableSubstitionDiagnostics = DisableDiagnostics; } ~ExprSubstitutionScope() { SemaRef.DisableSubstitionDiagnostics = PrevDisableSubstitionDiagnostics; } }; bool DisableSubstitionDiagnostics; ExprResult ActOnPackExpression(Expr *PackedExpr, QualType ExistType, TypeArgument SubstArg, SourceLocation StartLoc, SourceLocation EndLoc); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, ConceptDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion, bool isBoundsSafeInterfaceCast = false); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// IncompatibleCheckedCVoid - Assignments to/from void pointers to pointers /// to data containing checked pointers is not allowed in regular checked /// scopes. It is allowed only in unchecked and checked bounds_only scopes. IncompatibleCheckedCVoid, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true, QualType LHSInteropType = QualType()); public: /// \brief: Given a value with type Ty that has a bounds declaration, /// compute the bounds-safe interface type. Returns a null QualType /// if nnoe exists. QualType SynthesizeInteropType(QualType Ty, bool isParam); /// Rewrite function types with bounds-safe interfaces on unchecked /// types to use the checked types specified by the interfaces. Recursively /// apply the rewrite to function types nested within the type. QualType RewriteBoundsSafeInterfaceTypes(QualType Ty); /// \brief Get the bounds-safe interface type for LHS. /// Returns a null QualType if there isn't one. QualType GetCheckedCLValueInteropType(ExprResult LHS); /// \brief Get the bounds-safe interface type for RHS. /// Returns a null QualType if there isn't one. QualType GetCheckedCRValueInteropType(ExprResult RHS); /// \brief If T is an array type, create a checked array type version of T. /// This includes propagating the checked property to nested array types. If /// a valid checked array type cannot be constructed and Diagnose is true, /// print a diagnostic message for the problem. QualType MakeCheckedArrayType(QualType T, bool Diagnose = false, SourceLocation Loc = SourceLocation()); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; /// \brief RAII object that handles state changes for processing a member // bounds expressions. class EnterMemberBoundsExprRAII { Sema &S; bool SavedMemberBounds; public: EnterMemberBoundsExprRAII(Sema &S) : S(S), SavedMemberBounds(S.IsMemberBoundsExpr) { S.IsMemberBoundsExpr = true; } ~EnterMemberBoundsExprRAII() { S.IsMemberBoundsExpr = SavedMemberBounds; } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
GB_binop__min_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__min_uint8) // A.*B function (eWiseMult): GB (_AemultB_01__min_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__min_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__min_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__min_uint8) // A*D function (colscale): GB (_AxD__min_uint8) // D*A function (rowscale): GB (_DxB__min_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__min_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__min_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_uint8) // C=scalar+B GB (_bind1st__min_uint8) // C=scalar+B' GB (_bind1st_tran__min_uint8) // C=A+scalar GB (_bind2nd__min_uint8) // C=A'+scalar GB (_bind2nd_tran__min_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMIN (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_UINT8 || GxB_NO_MIN_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__min_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__min_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__min_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__min_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__min_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__min_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__min_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__min_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__min_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__min_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__min_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__min_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__min_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB (_bind1st_tran__min_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__min_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__min_uint8) // A.*B function (eWiseMult): GB (_AemultB_01__min_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__min_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__min_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__min_uint8) // A*D function (colscale): GB (_AxD__min_uint8) // D*A function (rowscale): GB (_DxB__min_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__min_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__min_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_uint8) // C=scalar+B GB (_bind1st__min_uint8) // C=scalar+B' GB (_bind1st_tran__min_uint8) // C=A+scalar GB (_bind2nd__min_uint8) // C=A'+scalar GB (_bind2nd_tran__min_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMIN (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_UINT8 || GxB_NO_MIN_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__min_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__min_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__min_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__min_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__min_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__min_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__min_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__min_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__min_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__min_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__min_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__min_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__min_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB (_bind1st_tran__min_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__min_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__min_uint8) // A.*B function (eWiseMult): GB (_AemultB_01__min_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__min_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__min_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__min_uint8) // A*D function (colscale): GB (_AxD__min_uint8) // D*A function (rowscale): GB (_DxB__min_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__min_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__min_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_uint8) // C=scalar+B GB (_bind1st__min_uint8) // C=scalar+B' GB (_bind1st_tran__min_uint8) // C=A+scalar GB (_bind2nd__min_uint8) // C=A'+scalar GB (_bind2nd_tran__min_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMIN (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_UINT8 || GxB_NO_MIN_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__min_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__min_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__min_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__min_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__min_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__min_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__min_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__min_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__min_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__min_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__min_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__min_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__min_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB (_bind1st_tran__min_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__min_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tree.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_TREE_H_ #define LIGHTGBM_TREE_H_ #include <LightGBM/dataset.h> #include <LightGBM/meta.h> #include <string> #include <map> #include <memory> #include <unordered_map> #include <vector> namespace LightGBM { #define kCategoricalMask (1) #define kDefaultLeftMask (2) /*! * \brief Tree model */ class Tree { public: /*! * \brief Constructor * \param max_leaves The number of max leaves * \param track_branch_features Whether to keep track of ancestors of leaf nodes */ explicit Tree(int max_leaves, bool track_branch_features); /*! * \brief Constructor, from a string * \param str Model string * \param used_len used count of str */ Tree(const char* str, size_t* used_len); ~Tree() noexcept = default; /*! * \brief Performing a split on tree leaves. * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split * \param threshold_double Threshold on feature value * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param left_weight Weight of left child * \param right_weight Weight of right child * \param gain Split gain * \param missing_type missing type * \param default_left default direction for missing value * \return The index of new leaf. */ int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin, double threshold_double, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type, bool default_left); /*! * \brief Performing a split on tree leaves, with categorical feature * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split, use bitset to represent * \param num_threshold_bin size of threshold_bin * \param threshold Thresholds of real feature value, use bitset to represent * \param num_threshold size of threshold * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param left_weight Weight of left child * \param right_weight Weight of right child * \param gain Split gain * \return The index of new leaf. */ int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin, const uint32_t* threshold, int num_threshold, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type); /*! \brief Get the output of one leaf */ inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; } /*! \brief Set the output of one leaf */ inline void SetLeafOutput(int leaf, double output) { leaf_value_[leaf] = MaybeRoundToZero(output); } /*! * \brief Adding prediction value of this tree model to scores * \param data The dataset * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, data_size_t num_data, double* score) const; /*! * \brief Adding prediction value of this tree model to scores * \param data The dataset * \param used_data_indices Indices of used data * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, const data_size_t* used_data_indices, data_size_t num_data, double* score) const; /*! * \brief Get upper bound leaf value of this tree model */ double GetUpperBoundValue() const; /*! * \brief Get lower bound leaf value of this tree model */ double GetLowerBoundValue() const; /*! * \brief Prediction on one record * \param feature_values Feature value of this record * \return Prediction result */ inline double Predict(const double* feature_values) const; inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const; inline int PredictLeafIndex(const double* feature_values) const; inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const; inline void PredictContrib(const double* feature_values, int num_features, double* output); inline void PredictContribByMap(const std::unordered_map<int, double>& feature_values, int num_features, std::unordered_map<int, double>* output); /*! \brief Get Number of leaves*/ inline int num_leaves() const { return num_leaves_; } /*! \brief Get depth of specific leaf*/ inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; } /*! \brief Get parent of specific leaf*/ inline int leaf_parent(int leaf_idx) const {return leaf_parent_[leaf_idx]; } /*! \brief Get feature of specific split*/ inline int split_feature(int split_idx) const { return split_feature_[split_idx]; } /*! \brief Get features on leaf's branch*/ inline std::vector<int> branch_features(int leaf) const { return branch_features_[leaf]; } inline double split_gain(int split_idx) const { return split_gain_[split_idx]; } inline double internal_value(int node_idx) const { return internal_value_[node_idx]; } inline bool IsNumericalSplit(int node_idx) const { return !GetDecisionType(decision_type_[node_idx], kCategoricalMask); } inline int left_child(int node_idx) const { return left_child_[node_idx]; } inline int right_child(int node_idx) const { return right_child_[node_idx]; } inline int split_feature_inner(int node_idx) const { return split_feature_inner_[node_idx]; } inline uint32_t threshold_in_bin(int node_idx) const { return threshold_in_bin_[node_idx]; } /*! \brief Get the number of data points that fall at or below this node*/ inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; } /*! * \brief Shrinkage for the tree's output * shrinkage rate (a.k.a learning rate) is used to tune the training process * \param rate The factor of shrinkage */ inline void Shrinkage(double rate) { #pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048) for (int i = 0; i < num_leaves_ - 1; ++i) { leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] * rate); internal_value_[i] = MaybeRoundToZero(internal_value_[i] * rate); } leaf_value_[num_leaves_ - 1] = MaybeRoundToZero(leaf_value_[num_leaves_ - 1] * rate); shrinkage_ *= rate; } inline double shrinkage() const { return shrinkage_; } inline void AddBias(double val) { #pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048) for (int i = 0; i < num_leaves_ - 1; ++i) { leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] + val); internal_value_[i] = MaybeRoundToZero(internal_value_[i] + val); } leaf_value_[num_leaves_ - 1] = MaybeRoundToZero(leaf_value_[num_leaves_ - 1] + val); // force to 1.0 shrinkage_ = 1.0f; } inline void AsConstantTree(double val) { num_leaves_ = 1; shrinkage_ = 1.0f; leaf_value_[0] = val; } /*! \brief Serialize this object to string*/ std::string ToString() const; /*! \brief Serialize this object to json*/ std::string ToJSON() const; /*! \brief Serialize this object to if-else statement*/ std::string ToIfElse(int index, bool predict_leaf_index) const; inline static bool IsZero(double fval) { return (fval >= -kZeroThreshold && fval <= kZeroThreshold); } inline static double MaybeRoundToZero(double fval) { return IsZero(fval) ? 0 : fval; } inline static bool GetDecisionType(int8_t decision_type, int8_t mask) { return (decision_type & mask) > 0; } inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) { if (input) { (*decision_type) |= mask; } else { (*decision_type) &= (127 - mask); } } inline static int8_t GetMissingType(int8_t decision_type) { return (decision_type >> 2) & 3; } inline static void SetMissingType(int8_t* decision_type, int8_t input) { (*decision_type) &= 3; (*decision_type) |= (input << 2); } void RecomputeMaxDepth(); int NextLeafId() const { return num_leaves_; } private: std::string NumericalDecisionIfElse(int node) const; std::string CategoricalDecisionIfElse(int node) const; inline int NumericalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if (std::isnan(fval) && missing_type != MissingType::NaN) { fval = 0.0f; } if ((missing_type == MissingType::Zero && IsZero(fval)) || (missing_type == MissingType::NaN && std::isnan(fval))) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if ((missing_type == MissingType::Zero && fval == default_bin) || (missing_type == MissingType::NaN && fval == max_bin)) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_in_bin_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int CategoricalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); int int_fval = static_cast<int>(fval); if (int_fval < 0) { return right_child_[node];; } else if (std::isnan(fval)) { // NaN is always in the right if (missing_type == MissingType::NaN) { return right_child_[node]; } int_fval = 0; } int cat_idx = static_cast<int>(threshold_[node]); if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx], cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) { return left_child_[node]; } return right_child_[node]; } inline int CategoricalDecisionInner(uint32_t fval, int node) const { int cat_idx = static_cast<int>(threshold_in_bin_[node]); if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx], cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) { return left_child_[node]; } return right_child_[node]; } inline int Decision(double fval, int node) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecision(fval, node); } else { return NumericalDecision(fval, node); } } inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecisionInner(fval, node); } else { return NumericalDecisionInner(fval, node, default_bin, max_bin); } } inline void Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain); /*! * \brief Find leaf index of which record belongs by features * \param feature_values Feature value of this record * \return Leaf index */ inline int GetLeaf(const double* feature_values) const; inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const; /*! \brief Serialize one node to json*/ std::string NodeToJSON(int index) const; /*! \brief Serialize one node to if-else statement*/ std::string NodeToIfElse(int index, bool predict_leaf_index) const; std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const; double ExpectedValue() const; /*! \brief This is used fill in leaf_depth_ after reloading a model*/ inline void RecomputeLeafDepths(int node = 0, int depth = 0); /*! * \brief Used by TreeSHAP for data we keep about our decision path */ struct PathElement { int feature_index; double zero_fraction; double one_fraction; // note that pweight is included for convenience and is not tied with the other attributes, // the pweight of the i'th path element is the permutation weight of paths with i-1 ones in them double pweight; PathElement() {} PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {} }; /*! \brief Polynomial time algorithm for SHAP values (arXiv:1706.06060)*/ void TreeSHAP(const double *feature_values, double *phi, int node, int unique_depth, PathElement *parent_unique_path, double parent_zero_fraction, double parent_one_fraction, int parent_feature_index) const; void TreeSHAPByMap(const std::unordered_map<int, double>& feature_values, std::unordered_map<int, double>* phi, int node, int unique_depth, PathElement *parent_unique_path, double parent_zero_fraction, double parent_one_fraction, int parent_feature_index) const; /*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/ static void ExtendPath(PathElement *unique_path, int unique_depth, double zero_fraction, double one_fraction, int feature_index); /*! \brief Undo a previous extension of the decision path for TreeSHAP*/ static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index); /*! determine what the total permutation weight would be if we unwound a previous extension in the decision path*/ static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index); /*! \brief Number of max leaves*/ int max_leaves_; /*! \brief Number of current leaves*/ int num_leaves_; // following values used for non-leaf node /*! \brief A non-leaf node's left child */ std::vector<int> left_child_; /*! \brief A non-leaf node's right child */ std::vector<int> right_child_; /*! \brief A non-leaf node's split feature */ std::vector<int> split_feature_inner_; /*! \brief A non-leaf node's split feature, the original index */ std::vector<int> split_feature_; /*! \brief A non-leaf node's split threshold in bin */ std::vector<uint32_t> threshold_in_bin_; /*! \brief A non-leaf node's split threshold in feature value */ std::vector<double> threshold_; int num_cat_; std::vector<int> cat_boundaries_inner_; std::vector<uint32_t> cat_threshold_inner_; std::vector<int> cat_boundaries_; std::vector<uint32_t> cat_threshold_; /*! \brief Store the information for categorical feature handle and missing value handle. */ std::vector<int8_t> decision_type_; /*! \brief A non-leaf node's split gain */ std::vector<float> split_gain_; // used for leaf node /*! \brief The parent of leaf */ std::vector<int> leaf_parent_; /*! \brief Output of leaves */ std::vector<double> leaf_value_; /*! \brief weight of leaves */ std::vector<double> leaf_weight_; /*! \brief DataCount of leaves */ std::vector<int> leaf_count_; /*! \brief Output of non-leaf nodes */ std::vector<double> internal_value_; /*! \brief weight of non-leaf nodes */ std::vector<double> internal_weight_; /*! \brief DataCount of non-leaf nodes */ std::vector<int> internal_count_; /*! \brief Depth for leaves */ std::vector<int> leaf_depth_; /*! \brief whether to keep track of ancestor nodes for each leaf (only needed when feature interactions are restricted) */ bool track_branch_features_; /*! \brief Features on leaf's branch, original index */ std::vector<std::vector<int>> branch_features_; double shrinkage_; int max_depth_; }; inline void Tree::Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain) { int new_node_idx = num_leaves_ - 1; // update parent info int parent = leaf_parent_[leaf]; if (parent >= 0) { // if cur node is left child if (left_child_[parent] == ~leaf) { left_child_[parent] = new_node_idx; } else { right_child_[parent] = new_node_idx; } } // add new node split_feature_inner_[new_node_idx] = feature; split_feature_[new_node_idx] = real_feature; split_gain_[new_node_idx] = gain; // add two new leaves left_child_[new_node_idx] = ~leaf; right_child_[new_node_idx] = ~num_leaves_; // update new leaves leaf_parent_[leaf] = new_node_idx; leaf_parent_[num_leaves_] = new_node_idx; // save current leaf value to internal node before change internal_weight_[new_node_idx] = leaf_weight_[leaf]; internal_value_[new_node_idx] = leaf_value_[leaf]; internal_count_[new_node_idx] = left_cnt + right_cnt; leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value; leaf_weight_[leaf] = left_weight; leaf_count_[leaf] = left_cnt; leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value; leaf_weight_[num_leaves_] = right_weight; leaf_count_[num_leaves_] = right_cnt; // update leaf depth leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1; leaf_depth_[leaf]++; if (track_branch_features_) { branch_features_[num_leaves_] = branch_features_[leaf]; branch_features_[num_leaves_].push_back(split_feature_[new_node_idx]); branch_features_[leaf].push_back(split_feature_[new_node_idx]); } } inline double Tree::Predict(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return LeafOutput(leaf); } else { return leaf_value_[0]; } } inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeafByMap(feature_values); return LeafOutput(leaf); } else { return leaf_value_[0]; } } inline int Tree::PredictLeafIndex(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return leaf; } else { return 0; } } inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeafByMap(feature_values); return leaf; } else { return 0; } } inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) { output[num_features] += ExpectedValue(); // Run the recursion with preallocated space for the unique path data if (num_leaves_ > 1) { CHECK_GE(max_depth_, 0); const int max_path_len = max_depth_ + 1; std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2); TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1); } } inline void Tree::PredictContribByMap(const std::unordered_map<int, double>& feature_values, int num_features, std::unordered_map<int, double>* output) { (*output)[num_features] += ExpectedValue(); // Run the recursion with preallocated space for the unique path data if (num_leaves_ > 1) { CHECK_GE(max_depth_, 0); const int max_path_len = max_depth_ + 1; std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2); TreeSHAPByMap(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1); } } inline void Tree::RecomputeLeafDepths(int node, int depth) { if (node == 0) leaf_depth_.resize(num_leaves()); if (node < 0) { leaf_depth_[~node] = depth; } else { RecomputeLeafDepths(left_child_[node], depth + 1); RecomputeLeafDepths(right_child_[node], depth + 1); } } inline int Tree::GetLeaf(const double* feature_values) const { int node = 0; if (num_cat_ > 0) { while (node >= 0) { node = Decision(feature_values[split_feature_[node]], node); } } else { while (node >= 0) { node = NumericalDecision(feature_values[split_feature_[node]], node); } } return ~node; } inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const { int node = 0; if (num_cat_ > 0) { while (node >= 0) { node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node); } } else { while (node >= 0) { node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node); } } return ~node; } } // namespace LightGBM #endif // LightGBM_TREE_H_
#ifndef LIGHTGBM_TREE_H_ #define LIGHTGBM_TREE_H_ #include <LightGBM/dataset.h> #include <LightGBM/meta.h> #include <string> #include <map> #include <memory> #include <unordered_map> #include <vector> namespace LightGBM { #define kCategoricalMask (1) #define kDefaultLeftMask (2) /*! * \brief Tree model */ class Tree { public: /*! * \brief Constructor * \param max_leaves The number of max leaves * \param track_branch_features Whether to keep track of ancestors of leaf nodes */ explicit Tree(int max_leaves, bool track_branch_features); /*! * \brief Constructor, from a string * \param str Model string * \param used_len used count of str */ Tree(const char* str, size_t* used_len); ~Tree() noexcept = default; /*! * \brief Performing a split on tree leaves. * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split * \param threshold_double Threshold on feature value * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param left_weight Weight of left child * \param right_weight Weight of right child * \param gain Split gain * \param missing_type missing type * \param default_left default direction for missing value * \return The index of new leaf. */ int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin, double threshold_double, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type, bool default_left); /*! * \brief Performing a split on tree leaves, with categorical feature * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split, use bitset to represent * \param num_threshold_bin size of threshold_bin * \param threshold Thresholds of real feature value, use bitset to represent * \param num_threshold size of threshold * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param left_weight Weight of left child * \param right_weight Weight of right child * \param gain Split gain * \return The index of new leaf. */ int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin, const uint32_t* threshold, int num_threshold, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type); /*! \brief Get the output of one leaf */ inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; } /*! \brief Set the output of one leaf */ inline void SetLeafOutput(int leaf, double output) { leaf_value_[leaf] = MaybeRoundToZero(output); } /*! * \brief Adding prediction value of this tree model to scores * \param data The dataset * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, data_size_t num_data, double* score) const; /*! * \brief Adding prediction value of this tree model to scores * \param data The dataset * \param used_data_indices Indices of used data * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, const data_size_t* used_data_indices, data_size_t num_data, double* score) const; /*! * \brief Get upper bound leaf value of this tree model */ double GetUpperBoundValue() const; /*! * \brief Get lower bound leaf value of this tree model */ double GetLowerBoundValue() const; /*! * \brief Prediction on one record * \param feature_values Feature value of this record * \return Prediction result */ inline double Predict(const double* feature_values) const; inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const; inline int PredictLeafIndex(const double* feature_values) const; inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const; inline void PredictContrib(const double* feature_values, int num_features, double* output); inline void PredictContribByMap(const std::unordered_map<int, double>& feature_values, int num_features, std::unordered_map<int, double>* output); /*! \brief Get Number of leaves*/ inline int num_leaves() const { return num_leaves_; } /*! \brief Get depth of specific leaf*/ inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; } /*! \brief Get parent of specific leaf*/ inline int leaf_parent(int leaf_idx) const {return leaf_parent_[leaf_idx]; } /*! \brief Get feature of specific split*/ inline int split_feature(int split_idx) const { return split_feature_[split_idx]; } /*! \brief Get features on leaf's branch*/ inline std::vector<int> branch_features(int leaf) const { return branch_features_[leaf]; } inline double split_gain(int split_idx) const { return split_gain_[split_idx]; } inline double internal_value(int node_idx) const { return internal_value_[node_idx]; } inline bool IsNumericalSplit(int node_idx) const { return !GetDecisionType(decision_type_[node_idx], kCategoricalMask); } inline int left_child(int node_idx) const { return left_child_[node_idx]; } inline int right_child(int node_idx) const { return right_child_[node_idx]; } inline int split_feature_inner(int node_idx) const { return split_feature_inner_[node_idx]; } inline uint32_t threshold_in_bin(int node_idx) const { return threshold_in_bin_[node_idx]; } /*! \brief Get the number of data points that fall at or below this node*/ inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; } /*! * \brief Shrinkage for the tree's output * shrinkage rate (a.k.a learning rate) is used to tune the training process * \param rate The factor of shrinkage */ inline void Shrinkage(double rate) { for (int i = 0; i < num_leaves_ - 1; ++i) { leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] * rate); internal_value_[i] = MaybeRoundToZero(internal_value_[i] * rate); } leaf_value_[num_leaves_ - 1] = MaybeRoundToZero(leaf_value_[num_leaves_ - 1] * rate); shrinkage_ *= rate; } inline double shrinkage() const { return shrinkage_; } inline void AddBias(double val) { for (int i = 0; i < num_leaves_ - 1; ++i) { leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] + val); internal_value_[i] = MaybeRoundToZero(internal_value_[i] + val); } leaf_value_[num_leaves_ - 1] = MaybeRoundToZero(leaf_value_[num_leaves_ - 1] + val); // force to 1.0 shrinkage_ = 1.0f; } inline void AsConstantTree(double val) { num_leaves_ = 1; shrinkage_ = 1.0f; leaf_value_[0] = val; } /*! \brief Serialize this object to string*/ std::string ToString() const; /*! \brief Serialize this object to json*/ std::string ToJSON() const; /*! \brief Serialize this object to if-else statement*/ std::string ToIfElse(int index, bool predict_leaf_index) const; inline static bool IsZero(double fval) { return (fval >= -kZeroThreshold && fval <= kZeroThreshold); } inline static double MaybeRoundToZero(double fval) { return IsZero(fval) ? 0 : fval; } inline static bool GetDecisionType(int8_t decision_type, int8_t mask) { return (decision_type & mask) > 0; } inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) { if (input) { (*decision_type) |= mask; } else { (*decision_type) &= (127 - mask); } } inline static int8_t GetMissingType(int8_t decision_type) { return (decision_type >> 2) & 3; } inline static void SetMissingType(int8_t* decision_type, int8_t input) { (*decision_type) &= 3; (*decision_type) |= (input << 2); } void RecomputeMaxDepth(); int NextLeafId() const { return num_leaves_; } private: std::string NumericalDecisionIfElse(int node) const; std::string CategoricalDecisionIfElse(int node) const; inline int NumericalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if (std::isnan(fval) && missing_type != MissingType::NaN) { fval = 0.0f; } if ((missing_type == MissingType::Zero && IsZero(fval)) || (missing_type == MissingType::NaN && std::isnan(fval))) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if ((missing_type == MissingType::Zero && fval == default_bin) || (missing_type == MissingType::NaN && fval == max_bin)) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_in_bin_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int CategoricalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); int int_fval = static_cast<int>(fval); if (int_fval < 0) { return right_child_[node];; } else if (std::isnan(fval)) { // NaN is always in the right if (missing_type == MissingType::NaN) { return right_child_[node]; } int_fval = 0; } int cat_idx = static_cast<int>(threshold_[node]); if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx], cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) { return left_child_[node]; } return right_child_[node]; } inline int CategoricalDecisionInner(uint32_t fval, int node) const { int cat_idx = static_cast<int>(threshold_in_bin_[node]); if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx], cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) { return left_child_[node]; } return right_child_[node]; } inline int Decision(double fval, int node) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecision(fval, node); } else { return NumericalDecision(fval, node); } } inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecisionInner(fval, node); } else { return NumericalDecisionInner(fval, node, default_bin, max_bin); } } inline void Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain); /*! * \brief Find leaf index of which record belongs by features * \param feature_values Feature value of this record * \return Leaf index */ inline int GetLeaf(const double* feature_values) const; inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const; /*! \brief Serialize one node to json*/ std::string NodeToJSON(int index) const; /*! \brief Serialize one node to if-else statement*/ std::string NodeToIfElse(int index, bool predict_leaf_index) const; std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const; double ExpectedValue() const; /*! \brief This is used fill in leaf_depth_ after reloading a model*/ inline void RecomputeLeafDepths(int node = 0, int depth = 0); /*! * \brief Used by TreeSHAP for data we keep about our decision path */ struct PathElement { int feature_index; double zero_fraction; double one_fraction; // note that pweight is included for convenience and is not tied with the other attributes, // the pweight of the i'th path element is the permutation weight of paths with i-1 ones in them double pweight; PathElement() {} PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {} }; /*! \brief Polynomial time algorithm for SHAP values (arXiv:1706.06060)*/ void TreeSHAP(const double *feature_values, double *phi, int node, int unique_depth, PathElement *parent_unique_path, double parent_zero_fraction, double parent_one_fraction, int parent_feature_index) const; void TreeSHAPByMap(const std::unordered_map<int, double>& feature_values, std::unordered_map<int, double>* phi, int node, int unique_depth, PathElement *parent_unique_path, double parent_zero_fraction, double parent_one_fraction, int parent_feature_index) const; /*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/ static void ExtendPath(PathElement *unique_path, int unique_depth, double zero_fraction, double one_fraction, int feature_index); /*! \brief Undo a previous extension of the decision path for TreeSHAP*/ static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index); /*! determine what the total permutation weight would be if we unwound a previous extension in the decision path*/ static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index); /*! \brief Number of max leaves*/ int max_leaves_; /*! \brief Number of current leaves*/ int num_leaves_; // following values used for non-leaf node /*! \brief A non-leaf node's left child */ std::vector<int> left_child_; /*! \brief A non-leaf node's right child */ std::vector<int> right_child_; /*! \brief A non-leaf node's split feature */ std::vector<int> split_feature_inner_; /*! \brief A non-leaf node's split feature, the original index */ std::vector<int> split_feature_; /*! \brief A non-leaf node's split threshold in bin */ std::vector<uint32_t> threshold_in_bin_; /*! \brief A non-leaf node's split threshold in feature value */ std::vector<double> threshold_; int num_cat_; std::vector<int> cat_boundaries_inner_; std::vector<uint32_t> cat_threshold_inner_; std::vector<int> cat_boundaries_; std::vector<uint32_t> cat_threshold_; /*! \brief Store the information for categorical feature handle and missing value handle. */ std::vector<int8_t> decision_type_; /*! \brief A non-leaf node's split gain */ std::vector<float> split_gain_; // used for leaf node /*! \brief The parent of leaf */ std::vector<int> leaf_parent_; /*! \brief Output of leaves */ std::vector<double> leaf_value_; /*! \brief weight of leaves */ std::vector<double> leaf_weight_; /*! \brief DataCount of leaves */ std::vector<int> leaf_count_; /*! \brief Output of non-leaf nodes */ std::vector<double> internal_value_; /*! \brief weight of non-leaf nodes */ std::vector<double> internal_weight_; /*! \brief DataCount of non-leaf nodes */ std::vector<int> internal_count_; /*! \brief Depth for leaves */ std::vector<int> leaf_depth_; /*! \brief whether to keep track of ancestor nodes for each leaf (only needed when feature interactions are restricted) */ bool track_branch_features_; /*! \brief Features on leaf's branch, original index */ std::vector<std::vector<int>> branch_features_; double shrinkage_; int max_depth_; }; inline void Tree::Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain) { int new_node_idx = num_leaves_ - 1; // update parent info int parent = leaf_parent_[leaf]; if (parent >= 0) { // if cur node is left child if (left_child_[parent] == ~leaf) { left_child_[parent] = new_node_idx; } else { right_child_[parent] = new_node_idx; } } // add new node split_feature_inner_[new_node_idx] = feature; split_feature_[new_node_idx] = real_feature; split_gain_[new_node_idx] = gain; // add two new leaves left_child_[new_node_idx] = ~leaf; right_child_[new_node_idx] = ~num_leaves_; // update new leaves leaf_parent_[leaf] = new_node_idx; leaf_parent_[num_leaves_] = new_node_idx; // save current leaf value to internal node before change internal_weight_[new_node_idx] = leaf_weight_[leaf]; internal_value_[new_node_idx] = leaf_value_[leaf]; internal_count_[new_node_idx] = left_cnt + right_cnt; leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value; leaf_weight_[leaf] = left_weight; leaf_count_[leaf] = left_cnt; leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value; leaf_weight_[num_leaves_] = right_weight; leaf_count_[num_leaves_] = right_cnt; // update leaf depth leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1; leaf_depth_[leaf]++; if (track_branch_features_) { branch_features_[num_leaves_] = branch_features_[leaf]; branch_features_[num_leaves_].push_back(split_feature_[new_node_idx]); branch_features_[leaf].push_back(split_feature_[new_node_idx]); } } inline double Tree::Predict(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return LeafOutput(leaf); } else { return leaf_value_[0]; } } inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeafByMap(feature_values); return LeafOutput(leaf); } else { return leaf_value_[0]; } } inline int Tree::PredictLeafIndex(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return leaf; } else { return 0; } } inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeafByMap(feature_values); return leaf; } else { return 0; } } inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) { output[num_features] += ExpectedValue(); // Run the recursion with preallocated space for the unique path data if (num_leaves_ > 1) { CHECK_GE(max_depth_, 0); const int max_path_len = max_depth_ + 1; std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2); TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1); } } inline void Tree::PredictContribByMap(const std::unordered_map<int, double>& feature_values, int num_features, std::unordered_map<int, double>* output) { (*output)[num_features] += ExpectedValue(); // Run the recursion with preallocated space for the unique path data if (num_leaves_ > 1) { CHECK_GE(max_depth_, 0); const int max_path_len = max_depth_ + 1; std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2); TreeSHAPByMap(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1); } } inline void Tree::RecomputeLeafDepths(int node, int depth) { if (node == 0) leaf_depth_.resize(num_leaves()); if (node < 0) { leaf_depth_[~node] = depth; } else { RecomputeLeafDepths(left_child_[node], depth + 1); RecomputeLeafDepths(right_child_[node], depth + 1); } } inline int Tree::GetLeaf(const double* feature_values) const { int node = 0; if (num_cat_ > 0) { while (node >= 0) { node = Decision(feature_values[split_feature_[node]], node); } } else { while (node >= 0) { node = NumericalDecision(feature_values[split_feature_[node]], node); } } return ~node; } inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const { int node = 0; if (num_cat_ > 0) { while (node >= 0) { node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node); } } else { while (node >= 0) { node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node); } } return ~node; } } // namespace LightGBM #endif // LightGBM_TREE_H_
#ifndef LIGHTGBM_TREE_H_ #define LIGHTGBM_TREE_H_ #include <LightGBM/dataset.h> #include <LightGBM/meta.h> #include <string> #include <map> #include <memory> #include <unordered_map> #include <vector> namespace LightGBM { #define kCategoricalMask (1) #define kDefaultLeftMask (2) /*! * \brief Tree model */ class Tree { public: /*! * \brief Constructor * \param max_leaves The number of max leaves * \param track_branch_features Whether to keep track of ancestors of leaf nodes */ explicit Tree(int max_leaves, bool track_branch_features); /*! * \brief Constructor, from a string * \param str Model string * \param used_len used count of str */ Tree(const char* str, size_t* used_len); ~Tree() noexcept = default; /*! * \brief Performing a split on tree leaves. * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split * \param threshold_double Threshold on feature value * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param left_weight Weight of left child * \param right_weight Weight of right child * \param gain Split gain * \param missing_type missing type * \param default_left default direction for missing value * \return The index of new leaf. */ int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin, double threshold_double, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type, bool default_left); /*! * \brief Performing a split on tree leaves, with categorical feature * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split, use bitset to represent * \param num_threshold_bin size of threshold_bin * \param threshold Thresholds of real feature value, use bitset to represent * \param num_threshold size of threshold * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param left_weight Weight of left child * \param right_weight Weight of right child * \param gain Split gain * \return The index of new leaf. */ int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin, const uint32_t* threshold, int num_threshold, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type); /*! \brief Get the output of one leaf */ inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; } /*! \brief Set the output of one leaf */ inline void SetLeafOutput(int leaf, double output) { leaf_value_[leaf] = MaybeRoundToZero(output); } /*! * \brief Adding prediction value of this tree model to scores * \param data The dataset * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, data_size_t num_data, double* score) const; /*! * \brief Adding prediction value of this tree model to scores * \param data The dataset * \param used_data_indices Indices of used data * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, const data_size_t* used_data_indices, data_size_t num_data, double* score) const; /*! * \brief Get upper bound leaf value of this tree model */ double GetUpperBoundValue() const; /*! * \brief Get lower bound leaf value of this tree model */ double GetLowerBoundValue() const; /*! * \brief Prediction on one record * \param feature_values Feature value of this record * \return Prediction result */ inline double Predict(const double* feature_values) const; inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const; inline int PredictLeafIndex(const double* feature_values) const; inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const; inline void PredictContrib(const double* feature_values, int num_features, double* output); inline void PredictContribByMap(const std::unordered_map<int, double>& feature_values, int num_features, std::unordered_map<int, double>* output); /*! \brief Get Number of leaves*/ inline int num_leaves() const { return num_leaves_; } /*! \brief Get depth of specific leaf*/ inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; } /*! \brief Get parent of specific leaf*/ inline int leaf_parent(int leaf_idx) const {return leaf_parent_[leaf_idx]; } /*! \brief Get feature of specific split*/ inline int split_feature(int split_idx) const { return split_feature_[split_idx]; } /*! \brief Get features on leaf's branch*/ inline std::vector<int> branch_features(int leaf) const { return branch_features_[leaf]; } inline double split_gain(int split_idx) const { return split_gain_[split_idx]; } inline double internal_value(int node_idx) const { return internal_value_[node_idx]; } inline bool IsNumericalSplit(int node_idx) const { return !GetDecisionType(decision_type_[node_idx], kCategoricalMask); } inline int left_child(int node_idx) const { return left_child_[node_idx]; } inline int right_child(int node_idx) const { return right_child_[node_idx]; } inline int split_feature_inner(int node_idx) const { return split_feature_inner_[node_idx]; } inline uint32_t threshold_in_bin(int node_idx) const { return threshold_in_bin_[node_idx]; } /*! \brief Get the number of data points that fall at or below this node*/ inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; } /*! * \brief Shrinkage for the tree's output * shrinkage rate (a.k.a learning rate) is used to tune the training process * \param rate The factor of shrinkage */ inline void Shrinkage(double rate) { #pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048) for (int i = 0; i < num_leaves_ - 1; ++i) { leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] * rate); internal_value_[i] = MaybeRoundToZero(internal_value_[i] * rate); } leaf_value_[num_leaves_ - 1] = MaybeRoundToZero(leaf_value_[num_leaves_ - 1] * rate); shrinkage_ *= rate; } inline double shrinkage() const { return shrinkage_; } inline void AddBias(double val) { #pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048) for (int i = 0; i < num_leaves_ - 1; ++i) { leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] + val); internal_value_[i] = MaybeRoundToZero(internal_value_[i] + val); } leaf_value_[num_leaves_ - 1] = MaybeRoundToZero(leaf_value_[num_leaves_ - 1] + val); // force to 1.0 shrinkage_ = 1.0f; } inline void AsConstantTree(double val) { num_leaves_ = 1; shrinkage_ = 1.0f; leaf_value_[0] = val; } /*! \brief Serialize this object to string*/ std::string ToString() const; /*! \brief Serialize this object to json*/ std::string ToJSON() const; /*! \brief Serialize this object to if-else statement*/ std::string ToIfElse(int index, bool predict_leaf_index) const; inline static bool IsZero(double fval) { return (fval >= -kZeroThreshold && fval <= kZeroThreshold); } inline static double MaybeRoundToZero(double fval) { return IsZero(fval) ? 0 : fval; } inline static bool GetDecisionType(int8_t decision_type, int8_t mask) { return (decision_type & mask) > 0; } inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) { if (input) { (*decision_type) |= mask; } else { (*decision_type) &= (127 - mask); } } inline static int8_t GetMissingType(int8_t decision_type) { return (decision_type >> 2) & 3; } inline static void SetMissingType(int8_t* decision_type, int8_t input) { (*decision_type) &= 3; (*decision_type) |= (input << 2); } void RecomputeMaxDepth(); int NextLeafId() const { return num_leaves_; } private: std::string NumericalDecisionIfElse(int node) const; std::string CategoricalDecisionIfElse(int node) const; inline int NumericalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if (std::isnan(fval) && missing_type != MissingType::NaN) { fval = 0.0f; } if ((missing_type == MissingType::Zero && IsZero(fval)) || (missing_type == MissingType::NaN && std::isnan(fval))) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if ((missing_type == MissingType::Zero && fval == default_bin) || (missing_type == MissingType::NaN && fval == max_bin)) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_in_bin_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int CategoricalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); int int_fval = static_cast<int>(fval); if (int_fval < 0) { return right_child_[node];; } else if (std::isnan(fval)) { // NaN is always in the right if (missing_type == MissingType::NaN) { return right_child_[node]; } int_fval = 0; } int cat_idx = static_cast<int>(threshold_[node]); if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx], cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) { return left_child_[node]; } return right_child_[node]; } inline int CategoricalDecisionInner(uint32_t fval, int node) const { int cat_idx = static_cast<int>(threshold_in_bin_[node]); if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx], cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) { return left_child_[node]; } return right_child_[node]; } inline int Decision(double fval, int node) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecision(fval, node); } else { return NumericalDecision(fval, node); } } inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecisionInner(fval, node); } else { return NumericalDecisionInner(fval, node, default_bin, max_bin); } } inline void Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain); /*! * \brief Find leaf index of which record belongs by features * \param feature_values Feature value of this record * \return Leaf index */ inline int GetLeaf(const double* feature_values) const; inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const; /*! \brief Serialize one node to json*/ std::string NodeToJSON(int index) const; /*! \brief Serialize one node to if-else statement*/ std::string NodeToIfElse(int index, bool predict_leaf_index) const; std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const; double ExpectedValue() const; /*! \brief This is used fill in leaf_depth_ after reloading a model*/ inline void RecomputeLeafDepths(int node = 0, int depth = 0); /*! * \brief Used by TreeSHAP for data we keep about our decision path */ struct PathElement { int feature_index; double zero_fraction; double one_fraction; // note that pweight is included for convenience and is not tied with the other attributes, // the pweight of the i'th path element is the permutation weight of paths with i-1 ones in them double pweight; PathElement() {} PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {} }; /*! \brief Polynomial time algorithm for SHAP values (arXiv:1706.06060)*/ void TreeSHAP(const double *feature_values, double *phi, int node, int unique_depth, PathElement *parent_unique_path, double parent_zero_fraction, double parent_one_fraction, int parent_feature_index) const; void TreeSHAPByMap(const std::unordered_map<int, double>& feature_values, std::unordered_map<int, double>* phi, int node, int unique_depth, PathElement *parent_unique_path, double parent_zero_fraction, double parent_one_fraction, int parent_feature_index) const; /*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/ static void ExtendPath(PathElement *unique_path, int unique_depth, double zero_fraction, double one_fraction, int feature_index); /*! \brief Undo a previous extension of the decision path for TreeSHAP*/ static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index); /*! determine what the total permutation weight would be if we unwound a previous extension in the decision path*/ static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index); /*! \brief Number of max leaves*/ int max_leaves_; /*! \brief Number of current leaves*/ int num_leaves_; // following values used for non-leaf node /*! \brief A non-leaf node's left child */ std::vector<int> left_child_; /*! \brief A non-leaf node's right child */ std::vector<int> right_child_; /*! \brief A non-leaf node's split feature */ std::vector<int> split_feature_inner_; /*! \brief A non-leaf node's split feature, the original index */ std::vector<int> split_feature_; /*! \brief A non-leaf node's split threshold in bin */ std::vector<uint32_t> threshold_in_bin_; /*! \brief A non-leaf node's split threshold in feature value */ std::vector<double> threshold_; int num_cat_; std::vector<int> cat_boundaries_inner_; std::vector<uint32_t> cat_threshold_inner_; std::vector<int> cat_boundaries_; std::vector<uint32_t> cat_threshold_; /*! \brief Store the information for categorical feature handle and missing value handle. */ std::vector<int8_t> decision_type_; /*! \brief A non-leaf node's split gain */ std::vector<float> split_gain_; // used for leaf node /*! \brief The parent of leaf */ std::vector<int> leaf_parent_; /*! \brief Output of leaves */ std::vector<double> leaf_value_; /*! \brief weight of leaves */ std::vector<double> leaf_weight_; /*! \brief DataCount of leaves */ std::vector<int> leaf_count_; /*! \brief Output of non-leaf nodes */ std::vector<double> internal_value_; /*! \brief weight of non-leaf nodes */ std::vector<double> internal_weight_; /*! \brief DataCount of non-leaf nodes */ std::vector<int> internal_count_; /*! \brief Depth for leaves */ std::vector<int> leaf_depth_; /*! \brief whether to keep track of ancestor nodes for each leaf (only needed when feature interactions are restricted) */ bool track_branch_features_; /*! \brief Features on leaf's branch, original index */ std::vector<std::vector<int>> branch_features_; double shrinkage_; int max_depth_; }; inline void Tree::Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain) { int new_node_idx = num_leaves_ - 1; // update parent info int parent = leaf_parent_[leaf]; if (parent >= 0) { // if cur node is left child if (left_child_[parent] == ~leaf) { left_child_[parent] = new_node_idx; } else { right_child_[parent] = new_node_idx; } } // add new node split_feature_inner_[new_node_idx] = feature; split_feature_[new_node_idx] = real_feature; split_gain_[new_node_idx] = gain; // add two new leaves left_child_[new_node_idx] = ~leaf; right_child_[new_node_idx] = ~num_leaves_; // update new leaves leaf_parent_[leaf] = new_node_idx; leaf_parent_[num_leaves_] = new_node_idx; // save current leaf value to internal node before change internal_weight_[new_node_idx] = leaf_weight_[leaf]; internal_value_[new_node_idx] = leaf_value_[leaf]; internal_count_[new_node_idx] = left_cnt + right_cnt; leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value; leaf_weight_[leaf] = left_weight; leaf_count_[leaf] = left_cnt; leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value; leaf_weight_[num_leaves_] = right_weight; leaf_count_[num_leaves_] = right_cnt; // update leaf depth leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1; leaf_depth_[leaf]++; if (track_branch_features_) { branch_features_[num_leaves_] = branch_features_[leaf]; branch_features_[num_leaves_].push_back(split_feature_[new_node_idx]); branch_features_[leaf].push_back(split_feature_[new_node_idx]); } } inline double Tree::Predict(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return LeafOutput(leaf); } else { return leaf_value_[0]; } } inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeafByMap(feature_values); return LeafOutput(leaf); } else { return leaf_value_[0]; } } inline int Tree::PredictLeafIndex(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return leaf; } else { return 0; } } inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeafByMap(feature_values); return leaf; } else { return 0; } } inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) { output[num_features] += ExpectedValue(); // Run the recursion with preallocated space for the unique path data if (num_leaves_ > 1) { CHECK_GE(max_depth_, 0); const int max_path_len = max_depth_ + 1; std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2); TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1); } } inline void Tree::PredictContribByMap(const std::unordered_map<int, double>& feature_values, int num_features, std::unordered_map<int, double>* output) { (*output)[num_features] += ExpectedValue(); // Run the recursion with preallocated space for the unique path data if (num_leaves_ > 1) { CHECK_GE(max_depth_, 0); const int max_path_len = max_depth_ + 1; std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2); TreeSHAPByMap(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1); } } inline void Tree::RecomputeLeafDepths(int node, int depth) { if (node == 0) leaf_depth_.resize(num_leaves()); if (node < 0) { leaf_depth_[~node] = depth; } else { RecomputeLeafDepths(left_child_[node], depth + 1); RecomputeLeafDepths(right_child_[node], depth + 1); } } inline int Tree::GetLeaf(const double* feature_values) const { int node = 0; if (num_cat_ > 0) { while (node >= 0) { node = Decision(feature_values[split_feature_[node]], node); } } else { while (node >= 0) { node = NumericalDecision(feature_values[split_feature_[node]], node); } } return ~node; } inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const { int node = 0; if (num_cat_ > 0) { while (node >= 0) { node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node); } } else { while (node >= 0) { node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node); } } return ~node; } } // namespace LightGBM #endif // LightGBM_TREE_H_
broadcast_reduce-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015-2017 by Contributors * \file broadcast_reduce-inl.h * \brief CPU-specific Function definition of broadcast and reduce operators */ #ifndef MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #define MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #include <mxnet/operator_util.h> #include <algorithm> #include <vector> #include <string> #include <utility> #include "../mshadow_op.h" #include "../mxnet_op.h" #include "../operator_common.h" namespace mxnet { namespace op { namespace mxnet_op { template<int ndim, typename OP> struct binary_broadcast_kernel { /*! \brief Map function for binary_broadcast_kernel */ template<typename IType, typename DType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape <ndim> &lstride, const Shape <ndim> &rstride, const Shape <ndim> &oshape, IType *lhs, IType *rhs, DType *out) { Shape <ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ template<typename LType, typename RType, typename OType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape <ndim> &lstride, const Shape <ndim> &rstride, const Shape <ndim> &oshape, LType *lhs, RType *rhs, OType *out) { Shape <ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ template<typename IType, typename DType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape <ndim> &lstride, const Shape <ndim> &rstride, const Shape <ndim> &oshape, IType lhs, IType *rhs, DType *out) { Shape <ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs, rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs, rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ /* used for mixed type binary ops */ template<typename IType, typename DType, typename std::enable_if<!std::is_same<IType, DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape <ndim> &lstride, const Shape <ndim> &rstride, const Shape <ndim> &oshape, IType *lhs, DType *rhs, DType *out) { Shape <ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ /* used for mixed type binary ops */ template<typename IType, typename DType, typename std::enable_if<!std::is_same<IType, DType>::value && !std::is_pointer<IType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape <ndim> &lstride, const Shape <ndim> &rstride, const Shape <ndim> &oshape, IType lhs, DType *rhs, DType *out) { Shape <ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs, rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs, rhs[ridx])); } } }; template<int req, typename OP, bool col_vec> struct csr_dns_csr_broadcast_kernel { /*! * \brief Map function for broadcast between csr and 1D vector * \param row global thread id/assigned row id * \param csr_data ptr to data buffer of csr matrix * \param csr_indices ptr to indices buffer of csr matrix * \param csr_indptr ptr to indptr buffer of csr matrix * \param dns ptr to data buffer of the dense vector * \param out ptr to the data buffer of the result csr matrix */ template<typename DType, typename CType, typename RType> MSHADOW_XINLINE static void Map(index_t row, const DType *csr_data, const CType *csr_indices, const RType *csr_indptr, const DType *dns, DType *out) { const nnvm::dim_t curr_row_i = csr_indptr[row]; const nnvm::dim_t next_row_i = csr_indptr[row + 1]; for (nnvm::dim_t iter = curr_row_i; iter < next_row_i; iter++) { KERNEL_ASSIGN(out[iter], req, OP::Map(csr_data[iter], (col_vec)? dns[row] : dns[csr_indices[iter]])); } } /*! * \brief Map function for broadcast between csr and a scalar * \param i global thread id * \param csr_data ptr to data buffer of csr matrix * \param scalar_ptr ptr to data buffer of the scalar tensor, only the 0-th element is used * \param out ptr to the data buffer of output csr matrix * \param nnz number of non-zero elements in input csr matrix */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, const DType *csr_data, const DType* scalar_ptr, DType *out, const nnvm::dim_t nnz) { const DType scale = scalar_ptr[0]; if (i < nnz) { KERNEL_ASSIGN(out[i], req, OP::Map(csr_data[i], scale)); } } }; template<int req, typename OP, bool reverse = false> struct csr_dns_map_kernel { template <typename DType, typename CType, typename RType> MSHADOW_XINLINE static void Map(index_t row, const DType *csr_data, const CType *csr_indices, const RType *csr_indptr, DType *out, const nnvm::dim_t num_rows, const nnvm::dim_t num_cols) { if (row < num_rows) { const nnvm::dim_t curr_row_i = csr_indptr[row]; const nnvm::dim_t next_row_i = csr_indptr[row + 1]; for (nnvm::dim_t iter = curr_row_i; iter < next_row_i; iter++) { const nnvm::dim_t target = row * num_cols + csr_indices[iter]; KERNEL_ASSIGN(out[target], req, reverse ? OP::Map(out[target], csr_data[iter]) : OP::Map(csr_data[iter], out[target])); } } } }; } // namespace mxnet_op namespace broadcast { using namespace mshadow; const int MAX_DIM = 5; template<int ndim> MSHADOW_XINLINE void unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stridej, const Shape<ndim>& stridek, index_t* j, index_t* k) { *j = 0; *k = 0; #pragma unroll for (index_t i = ndim-1, idx_t = idx; i >=0; --i) { const auto tmp = idx_t / shape[i]; const auto coord = idx_t - tmp*shape[i]; *j += coord*stridej[i]; *k += coord*stridek[i]; idx_t = tmp; } } template<int ndim> MSHADOW_XINLINE int diff(const Shape<ndim>& small, const Shape<ndim>& big, Shape<ndim>* dims, Shape<ndim>* stride) { int mdim = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { mdim += small[i] != big[i]; (*dims)[i] = (*stride)[i] = 1; } index_t s = 1; #pragma unroll for (int i = ndim - 1, j = mdim; i >= 0; --i) { if (small[i] != big[i]) { --j; (*stride)[j] = s; (*dims)[j] = big[i]; } s *= big[i]; } return mdim; } template<typename DType> MSHADOW_XINLINE void assign(DType* dst, const bool addto, const DType src) { if (addto) { *dst += src; } else { *dst = src; } } template<int ndim, typename DType, typename OP> MSHADOW_XINLINE void binary_broadcast_assign(const index_t idx, const bool addto, const DType* __restrict lhs, const DType* __restrict rhs, DType* out, const Shape<ndim>& lshape, const Shape<ndim>& rshape, const Shape<ndim>& oshape) { const Shape<ndim> coord = mxnet_op::unravel(idx, oshape); const index_t j = mxnet_op::ravel(coord, lshape); const index_t k = mxnet_op::ravel(coord, rshape); assign(&out[idx], addto, OP::Map(lhs[j], rhs[k])); } template<typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP> MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto, const DType* __restrict big, OType *small, const Shape<ndim>& bshape, const Shape<ndim>& sshape, const Shape<ndim>& rshape, const Shape<ndim>& rstride) { Shape<ndim> coord = mxnet_op::unravel(idx, sshape); index_t j = mxnet_op::ravel(coord, bshape); AType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { coord = mxnet_op::unravel(k, rshape); Reducer::Reduce(val, AType(OP::Map(big[j + mxnet_op::dot(coord, rstride)])), residual); } Reducer::Finalize(val, residual); assign(&small[idx], addto, OType(val)); } namespace { // Returns the stride with which the fastest dimension is moving. // Used to detect memory access scatter. inline int fastest_stride(const TShape &small, const TShape &big, const TShape &big_stride) { const int ndim = small.ndim(); for (int i = ndim-1; i >= 0; --i) { if (big[i] != 1) { return (small[i] == big[i]) ? 1 : big_stride[i]; } } return 1; } } // namespace template<int ndim, typename DType, typename OP> void BinaryBroadcastComputeImpl(Stream<cpu> *s, const OpReqType req, const TBlob& lhs, const TBlob& rhs, const TBlob& out) { mshadow::Shape<ndim> oshape = out.shape_.get<ndim>(); mshadow::Shape<ndim> lstride = mxnet_op::calc_stride(lhs.shape_.get<ndim>()); mshadow::Shape<ndim> rstride = mxnet_op::calc_stride(rhs.shape_.get<ndim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<ndim, OP>, cpu>:: template LaunchEx(s, out.shape_.Size(), req, lstride, rstride, oshape, lhs.dptr<DType>(), rhs.dptr<DType>(), out.dptr<DType>()); } template<typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP> void seq_reduce_compute(const size_t N, const size_t M, const bool addto, const DType *big, OType *small, const Shape<ndim> bshape, const Shape<ndim> sshape, const Shape<ndim> rshape, const Shape<ndim> rstride) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign<Reducer, ndim, AType, DType, OType, OP>(idx, M, addto, big, small, bshape, sshape, rshape, rstride); } } template <typename Reducer, int ndim, typename DType, typename OP> void seq_reduce_compute_extra_mem(const size_t N, const size_t M, const bool addto, const DType* big, DType* small, const Shape<ndim> bshape, const Shape<ndim> sshape, const Shape<ndim> rshape, const Shape<ndim> rstride, const index_t* ws_dptr) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { Shape<ndim> coord = mxnet_op::unravel(idx, sshape); index_t j = mxnet_op::ravel(coord, bshape); DType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { Reducer::Reduce(val, OP::Map(big[j + ws_dptr[k]]), residual); } assign(&small[idx], addto, val); } } template <typename Reducer, int ndim, typename DType, typename OP, bool safe_acc = false> void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(), M = rshape.Size(); if (!safe_acc) { seq_reduce_compute<Reducer, ndim, DType, DType, DType, OP>( N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); } else { MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, { typedef typename std::conditional<safe_acc, AType, DataType>::type AccType; MSHADOW_TYPE_SWITCH_WITH_BOOL(small.type_flag_, OType, { typedef typename std::conditional<safe_acc, OType, DataType>::type OutType; seq_reduce_compute<Reducer, ndim, AccType, DataType, OutType, OP>( N, M, req == kAddTo, big.dptr<DataType>(), small.dptr<OutType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); }); }); } } template <typename Reducer, int ndim, typename DType, typename OP> void ReduceBool(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(), M = rshape.Size(); seq_reduce_compute<Reducer, ndim, bool, DType, bool, OP>( N, M, req == kAddTo, big.dptr<DType>(), small.dptr<bool>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); } template <typename Reducer, int ndim, typename DType, typename OP> void ReduceWithExtraMem(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { using namespace mxnet_op; if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); index_t* ws_dptr = reinterpret_cast<index_t*>(workspace.dptr_); size_t N = small.shape_.Size(), M = rshape.Size(); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t k = 0; k < static_cast<index_t>(M); k++) { Shape<ndim> coord = mxnet_op::unravel(k, rshape); ws_dptr[k] = mxnet_op::dot(coord, rstride); } seq_reduce_compute_extra_mem<Reducer, ndim, DType, OP>( N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, ws_dptr); } inline size_t ReduceWorkspaceSize(Stream<cpu> *s, const mxnet::TShape& small, const OpReqType req, const mxnet::TShape& big, const int type_size) { return 0; } inline size_t ReduceWorkspaceSize(Stream<cpu> *s, const mxnet::TShape& small, const OpReqType req, const mxnet::TShape& big, const mxnet::TShape& lhs, const mxnet::TShape& rhs, const int type_size) { return 0; } #if MXNET_USE_CUDA namespace { constexpr int warpSize = 32; constexpr int unroll_reduce = 2; // Returns a/b integer division rounded up template<typename Type> Type ceil_idiv(const Type a, const Type b) { return (a + b - 1)/b; } uint64_t calc_num_load(const int X, const int Y, const int* strides) { // Number of full warps uint64_t num_full_warp = X / warpSize; // Length of the partial warp i.e. number of threads that are performing loads uint64_t len_part_warp = X % warpSize; uint64_t num_load_full = (std::min(warpSize, strides[0]) + std::min(warpSize, strides[1]) + std::min(warpSize, strides[2]))*num_full_warp; uint64_t num_load_part = (std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp*strides[0], warpSize)) + std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp*strides[1], warpSize)) + std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp*strides[2], warpSize)))* (len_part_warp != 0); uint64_t num_load = (num_load_full + num_load_part)*(uint64_t)Y; return num_load; } inline int diff(const TShape& small, const TShape& big, TShape* dims, TShape* stride) { int ndim = small.ndim(); int mdim = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { mdim += small[i] != big[i]; (*dims)[i] = (*stride)[i] = 1; } index_t s = 1; #pragma unroll for (int i = ndim - 1, j = mdim; i >= 0; --i) { if (small[i] != big[i]) { --j; (*stride)[j] = s; (*dims)[j] = big[i]; } s *= big[i]; } return mdim; } constexpr int nthread_reduce = 512; constexpr index_t kBaseGridNum = 1024; } // namespace // Configuration for ReduceImpl() struct ReduceImplConfig { index_t N; index_t M; index_t Mnext; struct { dim3 blockDim; dim3 gridDim; int shMemSize; bool do_transpose; } kernel_1; struct { int blockSize; int gridSize; } kernel_2; size_t workspace_size; TShape rshape, rstride; TShape lhs_shape, lhs_stride; TShape rhs_shape, rhs_stride; inline ReduceImplConfig(const ::mxnet::TShape& small, const ::mxnet::TShape& big, const ::mxnet::TShape* lhs, const ::mxnet::TShape* rhs, const size_t type_size) : rshape(small.ndim(), 1), rstride(small.ndim(), 1), lhs_shape(small.ndim(), 1), lhs_stride(small.ndim(), 1), rhs_shape(small.ndim(), 1), rhs_stride(small.ndim(), 1) { constexpr int maxLoopPerTB = 64; int ndim = small.ndim(); diff(small, big, &rshape, &rstride); N = small.Size(); M = rshape[0]; for (int i = 1; i < ndim; ++i) { M *= rshape[i]; } bool multiOp = false; if (lhs != nullptr) { CHECK_NOTNULL(rhs); diff(small, *lhs, &lhs_shape, &lhs_stride); diff(small, *rhs, &rhs_shape, &rhs_stride); multiOp = true; } workspace_size = 0; kernel_1.shMemSize = 0; kernel_1.do_transpose = false; if (M == 1) { kernel_1.blockDim.x = nthread_reduce; kernel_1.gridDim.x = std::min(kBaseGridNum, static_cast<index_t>((N + kernel_1.blockDim.x - 1)/kernel_1.blockDim.x)); } else { int reduce_strides[3]; reduce_strides[0] = fastest_stride(small, big, big); reduce_strides[1] = (multiOp) ? fastest_stride(small, *lhs, *lhs) : 1; reduce_strides[2] = (multiOp) ? fastest_stride(small, *rhs, *rhs) : 1; int reduce_strides_transp[3]; reduce_strides_transp[0] = fastest_stride(small, rshape, rstride); reduce_strides_transp[1] = (multiOp) ? fastest_stride(small, lhs_shape, lhs_stride) : 1; reduce_strides_transp[2] = (multiOp) ? fastest_stride(small, rhs_shape, rhs_stride) : 1; uint64_t num_load = calc_num_load(N, M, reduce_strides); uint64_t num_load_transp = calc_num_load(M, N, reduce_strides_transp); Mnext = 1; kernel_1.do_transpose = (num_load > num_load_transp); kernel_1.blockDim.x = 0; kernel_1.blockDim.y = 0; if (kernel_1.do_transpose) { // Fastest thread ID goes through M // Loop over N has step size kernel_1.blockDim.y if (N < 8) { kernel_1.blockDim.y = 1; } else if (N < 256) { kernel_1.blockDim.y = 4; } else { if (M < 8) { kernel_1.blockDim.x = 1; } else if (M < 256) { kernel_1.blockDim.x = 4; } else { kernel_1.blockDim.x = warpSize; } } } else { // Fastest thread ID goes through N // Loop over M has step size kernel_1.blockDim.y if (M < 8) { kernel_1.blockDim.y = 1; } else if (M < 256) { kernel_1.blockDim.y = 4; } else { if (N < 8) { kernel_1.blockDim.x = 1; } else if (N < 256) { kernel_1.blockDim.x = 4; } else { kernel_1.blockDim.x = warpSize; } } } if (kernel_1.blockDim.x == 0 && kernel_1.blockDim.y == 0) { LOG(FATAL) << "Unable to set blockDim"; } else if (kernel_1.blockDim.x == 0) { kernel_1.blockDim.x = nthread_reduce / kernel_1.blockDim.y; } else if (kernel_1.blockDim.y == 0) { kernel_1.blockDim.y = nthread_reduce / kernel_1.blockDim.x; } if (kernel_1.do_transpose) { // Fastest thread ID goes through M kernel_1.gridDim.x = std::min((unsigned int)kBaseGridNum, ceil_idiv<unsigned int>(N, kernel_1.blockDim.y)); kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); int by = kernel_1.blockDim.y; if (kernel_1.blockDim.y % warpSize == 0) { // Fix shared memory bank conflict by++; } kernel_1.shMemSize = (kernel_1.blockDim.x > 1) ? kernel_1.blockDim.x*by*type_size * 2 : 0; // Maximum number of times we want TB to loop in M // Max size of M-block each TB can handle int maxMblock = kernel_1.blockDim.x*maxLoopPerTB; Mnext = (M + maxMblock - 1) / maxMblock; } else { // Fastest thread ID goes through N kernel_1.gridDim.x = std::min((unsigned int)kBaseGridNum, ceil_idiv<unsigned int>(N, kernel_1.blockDim.x)); kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); kernel_1.shMemSize = (kernel_1.blockDim.y > 1) ? kernel_1.blockDim.x*kernel_1.blockDim.y*type_size * 2 : 0; // Maximum number of times we want TB to loop in M // Max size of M-block each TB can handle int maxMblock = kernel_1.blockDim.y*maxLoopPerTB; Mnext = (M + maxMblock - 1) / maxMblock; } if (Mnext > 1) { // small_dptr[] is N*Mnext*type_size bytes workspace_size += N*Mnext*sizeof(double); // Set gridDim.y to Mnext kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); } if (Mnext > 1) { kernel_2.blockSize = nthread_reduce; kernel_2.gridSize = std::min(kBaseGridNum, static_cast<index_t>((N + kernel_2.blockSize - 1)/kernel_2.blockSize)); } } } }; inline size_t ReduceWorkspaceSize(Stream<gpu> *s, const ::mxnet::TShape& small, const OpReqType req, const ::mxnet::TShape& big, const int type_size) { if (req == kNullOp) return 0; ReduceImplConfig config(small, big, nullptr, nullptr, type_size); return config.workspace_size; } inline size_t ReduceWorkspaceSize(Stream<gpu> *s, const ::mxnet::TShape& small, const OpReqType req, const ::mxnet::TShape& big, const ::mxnet::TShape& lhs, const ::mxnet::TShape& rhs, const int type_size) { if (req == kNullOp) return 0; ReduceImplConfig config(small, big, &lhs, &rhs, type_size); return config.workspace_size; } #ifdef __CUDACC__ #include "broadcast_reduce-inl.cuh" #endif #endif // MXNET_USE_CUDA template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto, const DType* __restrict big, const DType* __restrict lhs, const DType* __restrict rhs, DType *small, const Shape<ndim>& big_shape, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0, const Shape<ndim>& small_shape, const Shape<ndim>& rshape, const Shape<ndim>& lhs_shape, const Shape<ndim>& rhs_shape, const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride, const Shape<ndim>& rhs_stride) { Shape<ndim> coord = mxnet_op::unravel(idx, small_shape); const index_t idx_big0 = mxnet_op::ravel(coord, big_shape); const index_t idx_lhs0 = mxnet_op::ravel(coord, lhs_shape0); const index_t idx_rhs0 = mxnet_op::ravel(coord, rhs_shape0); DType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { Shape<ndim> coord_big = mxnet_op::unravel(k, rshape); index_t idx_big = idx_big0 + mxnet_op::dot(coord_big, rstride); Shape<ndim> coord_lhs = mxnet_op::unravel(k, lhs_shape); index_t idx_lhs = idx_lhs0 + mxnet_op::dot(coord_lhs, lhs_stride); Shape<ndim> coord_rhs = mxnet_op::unravel(k, rhs_shape); index_t idx_rhs = idx_rhs0 + mxnet_op::dot(coord_rhs, rhs_stride); Reducer::Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual); } Reducer::Finalize(val, residual); assign(&small[idx], addto, val); } template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void seq_reduce_compute(const size_t N, const size_t M, const bool addto, const DType *big, const DType *lhs, const DType *rhs, DType *small, const Shape<ndim> big_shape, const Shape<ndim> small_shape, const Shape<ndim> rshape, const Shape<ndim> rstride, const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride, const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small, big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride, lhs_stride, rhs_stride); } } template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void Reduce(Stream<cpu> *s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs, const TBlob& rhs) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(); size_t M = rshape.Size(); Shape<ndim> lhs_shape, lhs_stride; diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride); Shape<ndim> rhs_shape, rhs_stride; diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride); seq_reduce_compute<Reducer, ndim, DType, OP1, OP2>( N, M, req == kAddTo, big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, lhs_shape, lhs_stride, rhs_shape, rhs_stride, lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>()); } #if MXNET_USE_CUDA void RTCReduce(const OpContext& ctx, const TBlob& small, const OpReqType req, const Tensor<gpu, 1, char>& workspace, const TBlob& big, const std::string& reducer, int ndim, const std::string& OP); void RTCReduce(const OpContext& ctx, const TBlob& small, const OpReqType req, const Tensor<gpu, 1, char>& workspace, const TBlob& big, const TBlob &lhs, const TBlob &rhs, const std::string& reducer, int ndim, const std::string& OP1, const std::string& OP2); #endif } // namespace broadcast } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
/*! * Copyright (c) 2015-2017 by Contributors * \file broadcast_reduce-inl.h * \brief CPU-specific Function definition of broadcast and reduce operators */ #ifndef MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #define MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #include <mxnet/operator_util.h> #include <algorithm> #include <vector> #include <string> #include <utility> #include "../mshadow_op.h" #include "../mxnet_op.h" #include "../operator_common.h" namespace mxnet { namespace op { namespace mxnet_op { template<int ndim, typename OP> struct binary_broadcast_kernel { /*! \brief Map function for binary_broadcast_kernel */ template<typename IType, typename DType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape <ndim> &lstride, const Shape <ndim> &rstride, const Shape <ndim> &oshape, IType *lhs, IType *rhs, DType *out) { Shape <ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ template<typename LType, typename RType, typename OType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape <ndim> &lstride, const Shape <ndim> &rstride, const Shape <ndim> &oshape, LType *lhs, RType *rhs, OType *out) { Shape <ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ template<typename IType, typename DType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape <ndim> &lstride, const Shape <ndim> &rstride, const Shape <ndim> &oshape, IType lhs, IType *rhs, DType *out) { Shape <ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs, rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs, rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ /* used for mixed type binary ops */ template<typename IType, typename DType, typename std::enable_if<!std::is_same<IType, DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape <ndim> &lstride, const Shape <ndim> &rstride, const Shape <ndim> &oshape, IType *lhs, DType *rhs, DType *out) { Shape <ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ /* used for mixed type binary ops */ template<typename IType, typename DType, typename std::enable_if<!std::is_same<IType, DType>::value && !std::is_pointer<IType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape <ndim> &lstride, const Shape <ndim> &rstride, const Shape <ndim> &oshape, IType lhs, DType *rhs, DType *out) { Shape <ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs, rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs, rhs[ridx])); } } }; template<int req, typename OP, bool col_vec> struct csr_dns_csr_broadcast_kernel { /*! * \brief Map function for broadcast between csr and 1D vector * \param row global thread id/assigned row id * \param csr_data ptr to data buffer of csr matrix * \param csr_indices ptr to indices buffer of csr matrix * \param csr_indptr ptr to indptr buffer of csr matrix * \param dns ptr to data buffer of the dense vector * \param out ptr to the data buffer of the result csr matrix */ template<typename DType, typename CType, typename RType> MSHADOW_XINLINE static void Map(index_t row, const DType *csr_data, const CType *csr_indices, const RType *csr_indptr, const DType *dns, DType *out) { const nnvm::dim_t curr_row_i = csr_indptr[row]; const nnvm::dim_t next_row_i = csr_indptr[row + 1]; for (nnvm::dim_t iter = curr_row_i; iter < next_row_i; iter++) { KERNEL_ASSIGN(out[iter], req, OP::Map(csr_data[iter], (col_vec)? dns[row] : dns[csr_indices[iter]])); } } /*! * \brief Map function for broadcast between csr and a scalar * \param i global thread id * \param csr_data ptr to data buffer of csr matrix * \param scalar_ptr ptr to data buffer of the scalar tensor, only the 0-th element is used * \param out ptr to the data buffer of output csr matrix * \param nnz number of non-zero elements in input csr matrix */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, const DType *csr_data, const DType* scalar_ptr, DType *out, const nnvm::dim_t nnz) { const DType scale = scalar_ptr[0]; if (i < nnz) { KERNEL_ASSIGN(out[i], req, OP::Map(csr_data[i], scale)); } } }; template<int req, typename OP, bool reverse = false> struct csr_dns_map_kernel { template <typename DType, typename CType, typename RType> MSHADOW_XINLINE static void Map(index_t row, const DType *csr_data, const CType *csr_indices, const RType *csr_indptr, DType *out, const nnvm::dim_t num_rows, const nnvm::dim_t num_cols) { if (row < num_rows) { const nnvm::dim_t curr_row_i = csr_indptr[row]; const nnvm::dim_t next_row_i = csr_indptr[row + 1]; for (nnvm::dim_t iter = curr_row_i; iter < next_row_i; iter++) { const nnvm::dim_t target = row * num_cols + csr_indices[iter]; KERNEL_ASSIGN(out[target], req, reverse ? OP::Map(out[target], csr_data[iter]) : OP::Map(csr_data[iter], out[target])); } } } }; } // namespace mxnet_op namespace broadcast { using namespace mshadow; const int MAX_DIM = 5; template<int ndim> MSHADOW_XINLINE void unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stridej, const Shape<ndim>& stridek, index_t* j, index_t* k) { *j = 0; *k = 0; #pragma unroll for (index_t i = ndim-1, idx_t = idx; i >=0; --i) { const auto tmp = idx_t / shape[i]; const auto coord = idx_t - tmp*shape[i]; *j += coord*stridej[i]; *k += coord*stridek[i]; idx_t = tmp; } } template<int ndim> MSHADOW_XINLINE int diff(const Shape<ndim>& small, const Shape<ndim>& big, Shape<ndim>* dims, Shape<ndim>* stride) { int mdim = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { mdim += small[i] != big[i]; (*dims)[i] = (*stride)[i] = 1; } index_t s = 1; #pragma unroll for (int i = ndim - 1, j = mdim; i >= 0; --i) { if (small[i] != big[i]) { --j; (*stride)[j] = s; (*dims)[j] = big[i]; } s *= big[i]; } return mdim; } template<typename DType> MSHADOW_XINLINE void assign(DType* dst, const bool addto, const DType src) { if (addto) { *dst += src; } else { *dst = src; } } template<int ndim, typename DType, typename OP> MSHADOW_XINLINE void binary_broadcast_assign(const index_t idx, const bool addto, const DType* __restrict lhs, const DType* __restrict rhs, DType* out, const Shape<ndim>& lshape, const Shape<ndim>& rshape, const Shape<ndim>& oshape) { const Shape<ndim> coord = mxnet_op::unravel(idx, oshape); const index_t j = mxnet_op::ravel(coord, lshape); const index_t k = mxnet_op::ravel(coord, rshape); assign(&out[idx], addto, OP::Map(lhs[j], rhs[k])); } template<typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP> MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto, const DType* __restrict big, OType *small, const Shape<ndim>& bshape, const Shape<ndim>& sshape, const Shape<ndim>& rshape, const Shape<ndim>& rstride) { Shape<ndim> coord = mxnet_op::unravel(idx, sshape); index_t j = mxnet_op::ravel(coord, bshape); AType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { coord = mxnet_op::unravel(k, rshape); Reducer::Reduce(val, AType(OP::Map(big[j + mxnet_op::dot(coord, rstride)])), residual); } Reducer::Finalize(val, residual); assign(&small[idx], addto, OType(val)); } namespace { // Returns the stride with which the fastest dimension is moving. // Used to detect memory access scatter. inline int fastest_stride(const TShape &small, const TShape &big, const TShape &big_stride) { const int ndim = small.ndim(); for (int i = ndim-1; i >= 0; --i) { if (big[i] != 1) { return (small[i] == big[i]) ? 1 : big_stride[i]; } } return 1; } } // namespace template<int ndim, typename DType, typename OP> void BinaryBroadcastComputeImpl(Stream<cpu> *s, const OpReqType req, const TBlob& lhs, const TBlob& rhs, const TBlob& out) { mshadow::Shape<ndim> oshape = out.shape_.get<ndim>(); mshadow::Shape<ndim> lstride = mxnet_op::calc_stride(lhs.shape_.get<ndim>()); mshadow::Shape<ndim> rstride = mxnet_op::calc_stride(rhs.shape_.get<ndim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<ndim, OP>, cpu>:: template LaunchEx(s, out.shape_.Size(), req, lstride, rstride, oshape, lhs.dptr<DType>(), rhs.dptr<DType>(), out.dptr<DType>()); } template<typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP> void seq_reduce_compute(const size_t N, const size_t M, const bool addto, const DType *big, OType *small, const Shape<ndim> bshape, const Shape<ndim> sshape, const Shape<ndim> rshape, const Shape<ndim> rstride) { for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign<Reducer, ndim, AType, DType, OType, OP>(idx, M, addto, big, small, bshape, sshape, rshape, rstride); } } template <typename Reducer, int ndim, typename DType, typename OP> void seq_reduce_compute_extra_mem(const size_t N, const size_t M, const bool addto, const DType* big, DType* small, const Shape<ndim> bshape, const Shape<ndim> sshape, const Shape<ndim> rshape, const Shape<ndim> rstride, const index_t* ws_dptr) { for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { Shape<ndim> coord = mxnet_op::unravel(idx, sshape); index_t j = mxnet_op::ravel(coord, bshape); DType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { Reducer::Reduce(val, OP::Map(big[j + ws_dptr[k]]), residual); } assign(&small[idx], addto, val); } } template <typename Reducer, int ndim, typename DType, typename OP, bool safe_acc = false> void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(), M = rshape.Size(); if (!safe_acc) { seq_reduce_compute<Reducer, ndim, DType, DType, DType, OP>( N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); } else { MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, { typedef typename std::conditional<safe_acc, AType, DataType>::type AccType; MSHADOW_TYPE_SWITCH_WITH_BOOL(small.type_flag_, OType, { typedef typename std::conditional<safe_acc, OType, DataType>::type OutType; seq_reduce_compute<Reducer, ndim, AccType, DataType, OutType, OP>( N, M, req == kAddTo, big.dptr<DataType>(), small.dptr<OutType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); }); }); } } template <typename Reducer, int ndim, typename DType, typename OP> void ReduceBool(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(), M = rshape.Size(); seq_reduce_compute<Reducer, ndim, bool, DType, bool, OP>( N, M, req == kAddTo, big.dptr<DType>(), small.dptr<bool>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); } template <typename Reducer, int ndim, typename DType, typename OP> void ReduceWithExtraMem(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { using namespace mxnet_op; if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); index_t* ws_dptr = reinterpret_cast<index_t*>(workspace.dptr_); size_t N = small.shape_.Size(), M = rshape.Size(); for (index_t k = 0; k < static_cast<index_t>(M); k++) { Shape<ndim> coord = mxnet_op::unravel(k, rshape); ws_dptr[k] = mxnet_op::dot(coord, rstride); } seq_reduce_compute_extra_mem<Reducer, ndim, DType, OP>( N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, ws_dptr); } inline size_t ReduceWorkspaceSize(Stream<cpu> *s, const mxnet::TShape& small, const OpReqType req, const mxnet::TShape& big, const int type_size) { return 0; } inline size_t ReduceWorkspaceSize(Stream<cpu> *s, const mxnet::TShape& small, const OpReqType req, const mxnet::TShape& big, const mxnet::TShape& lhs, const mxnet::TShape& rhs, const int type_size) { return 0; } #if MXNET_USE_CUDA namespace { constexpr int warpSize = 32; constexpr int unroll_reduce = 2; // Returns a/b integer division rounded up template<typename Type> Type ceil_idiv(const Type a, const Type b) { return (a + b - 1)/b; } uint64_t calc_num_load(const int X, const int Y, const int* strides) { // Number of full warps uint64_t num_full_warp = X / warpSize; // Length of the partial warp i.e. number of threads that are performing loads uint64_t len_part_warp = X % warpSize; uint64_t num_load_full = (std::min(warpSize, strides[0]) + std::min(warpSize, strides[1]) + std::min(warpSize, strides[2]))*num_full_warp; uint64_t num_load_part = (std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp*strides[0], warpSize)) + std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp*strides[1], warpSize)) + std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp*strides[2], warpSize)))* (len_part_warp != 0); uint64_t num_load = (num_load_full + num_load_part)*(uint64_t)Y; return num_load; } inline int diff(const TShape& small, const TShape& big, TShape* dims, TShape* stride) { int ndim = small.ndim(); int mdim = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { mdim += small[i] != big[i]; (*dims)[i] = (*stride)[i] = 1; } index_t s = 1; #pragma unroll for (int i = ndim - 1, j = mdim; i >= 0; --i) { if (small[i] != big[i]) { --j; (*stride)[j] = s; (*dims)[j] = big[i]; } s *= big[i]; } return mdim; } constexpr int nthread_reduce = 512; constexpr index_t kBaseGridNum = 1024; } // namespace // Configuration for ReduceImpl() struct ReduceImplConfig { index_t N; index_t M; index_t Mnext; struct { dim3 blockDim; dim3 gridDim; int shMemSize; bool do_transpose; } kernel_1; struct { int blockSize; int gridSize; } kernel_2; size_t workspace_size; TShape rshape, rstride; TShape lhs_shape, lhs_stride; TShape rhs_shape, rhs_stride; inline ReduceImplConfig(const ::mxnet::TShape& small, const ::mxnet::TShape& big, const ::mxnet::TShape* lhs, const ::mxnet::TShape* rhs, const size_t type_size) : rshape(small.ndim(), 1), rstride(small.ndim(), 1), lhs_shape(small.ndim(), 1), lhs_stride(small.ndim(), 1), rhs_shape(small.ndim(), 1), rhs_stride(small.ndim(), 1) { constexpr int maxLoopPerTB = 64; int ndim = small.ndim(); diff(small, big, &rshape, &rstride); N = small.Size(); M = rshape[0]; for (int i = 1; i < ndim; ++i) { M *= rshape[i]; } bool multiOp = false; if (lhs != nullptr) { CHECK_NOTNULL(rhs); diff(small, *lhs, &lhs_shape, &lhs_stride); diff(small, *rhs, &rhs_shape, &rhs_stride); multiOp = true; } workspace_size = 0; kernel_1.shMemSize = 0; kernel_1.do_transpose = false; if (M == 1) { kernel_1.blockDim.x = nthread_reduce; kernel_1.gridDim.x = std::min(kBaseGridNum, static_cast<index_t>((N + kernel_1.blockDim.x - 1)/kernel_1.blockDim.x)); } else { int reduce_strides[3]; reduce_strides[0] = fastest_stride(small, big, big); reduce_strides[1] = (multiOp) ? fastest_stride(small, *lhs, *lhs) : 1; reduce_strides[2] = (multiOp) ? fastest_stride(small, *rhs, *rhs) : 1; int reduce_strides_transp[3]; reduce_strides_transp[0] = fastest_stride(small, rshape, rstride); reduce_strides_transp[1] = (multiOp) ? fastest_stride(small, lhs_shape, lhs_stride) : 1; reduce_strides_transp[2] = (multiOp) ? fastest_stride(small, rhs_shape, rhs_stride) : 1; uint64_t num_load = calc_num_load(N, M, reduce_strides); uint64_t num_load_transp = calc_num_load(M, N, reduce_strides_transp); Mnext = 1; kernel_1.do_transpose = (num_load > num_load_transp); kernel_1.blockDim.x = 0; kernel_1.blockDim.y = 0; if (kernel_1.do_transpose) { // Fastest thread ID goes through M // Loop over N has step size kernel_1.blockDim.y if (N < 8) { kernel_1.blockDim.y = 1; } else if (N < 256) { kernel_1.blockDim.y = 4; } else { if (M < 8) { kernel_1.blockDim.x = 1; } else if (M < 256) { kernel_1.blockDim.x = 4; } else { kernel_1.blockDim.x = warpSize; } } } else { // Fastest thread ID goes through N // Loop over M has step size kernel_1.blockDim.y if (M < 8) { kernel_1.blockDim.y = 1; } else if (M < 256) { kernel_1.blockDim.y = 4; } else { if (N < 8) { kernel_1.blockDim.x = 1; } else if (N < 256) { kernel_1.blockDim.x = 4; } else { kernel_1.blockDim.x = warpSize; } } } if (kernel_1.blockDim.x == 0 && kernel_1.blockDim.y == 0) { LOG(FATAL) << "Unable to set blockDim"; } else if (kernel_1.blockDim.x == 0) { kernel_1.blockDim.x = nthread_reduce / kernel_1.blockDim.y; } else if (kernel_1.blockDim.y == 0) { kernel_1.blockDim.y = nthread_reduce / kernel_1.blockDim.x; } if (kernel_1.do_transpose) { // Fastest thread ID goes through M kernel_1.gridDim.x = std::min((unsigned int)kBaseGridNum, ceil_idiv<unsigned int>(N, kernel_1.blockDim.y)); kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); int by = kernel_1.blockDim.y; if (kernel_1.blockDim.y % warpSize == 0) { // Fix shared memory bank conflict by++; } kernel_1.shMemSize = (kernel_1.blockDim.x > 1) ? kernel_1.blockDim.x*by*type_size * 2 : 0; // Maximum number of times we want TB to loop in M // Max size of M-block each TB can handle int maxMblock = kernel_1.blockDim.x*maxLoopPerTB; Mnext = (M + maxMblock - 1) / maxMblock; } else { // Fastest thread ID goes through N kernel_1.gridDim.x = std::min((unsigned int)kBaseGridNum, ceil_idiv<unsigned int>(N, kernel_1.blockDim.x)); kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); kernel_1.shMemSize = (kernel_1.blockDim.y > 1) ? kernel_1.blockDim.x*kernel_1.blockDim.y*type_size * 2 : 0; // Maximum number of times we want TB to loop in M // Max size of M-block each TB can handle int maxMblock = kernel_1.blockDim.y*maxLoopPerTB; Mnext = (M + maxMblock - 1) / maxMblock; } if (Mnext > 1) { // small_dptr[] is N*Mnext*type_size bytes workspace_size += N*Mnext*sizeof(double); // Set gridDim.y to Mnext kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); } if (Mnext > 1) { kernel_2.blockSize = nthread_reduce; kernel_2.gridSize = std::min(kBaseGridNum, static_cast<index_t>((N + kernel_2.blockSize - 1)/kernel_2.blockSize)); } } } }; inline size_t ReduceWorkspaceSize(Stream<gpu> *s, const ::mxnet::TShape& small, const OpReqType req, const ::mxnet::TShape& big, const int type_size) { if (req == kNullOp) return 0; ReduceImplConfig config(small, big, nullptr, nullptr, type_size); return config.workspace_size; } inline size_t ReduceWorkspaceSize(Stream<gpu> *s, const ::mxnet::TShape& small, const OpReqType req, const ::mxnet::TShape& big, const ::mxnet::TShape& lhs, const ::mxnet::TShape& rhs, const int type_size) { if (req == kNullOp) return 0; ReduceImplConfig config(small, big, &lhs, &rhs, type_size); return config.workspace_size; } #ifdef __CUDACC__ #include "broadcast_reduce-inl.cuh" #endif #endif // MXNET_USE_CUDA template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto, const DType* __restrict big, const DType* __restrict lhs, const DType* __restrict rhs, DType *small, const Shape<ndim>& big_shape, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0, const Shape<ndim>& small_shape, const Shape<ndim>& rshape, const Shape<ndim>& lhs_shape, const Shape<ndim>& rhs_shape, const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride, const Shape<ndim>& rhs_stride) { Shape<ndim> coord = mxnet_op::unravel(idx, small_shape); const index_t idx_big0 = mxnet_op::ravel(coord, big_shape); const index_t idx_lhs0 = mxnet_op::ravel(coord, lhs_shape0); const index_t idx_rhs0 = mxnet_op::ravel(coord, rhs_shape0); DType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { Shape<ndim> coord_big = mxnet_op::unravel(k, rshape); index_t idx_big = idx_big0 + mxnet_op::dot(coord_big, rstride); Shape<ndim> coord_lhs = mxnet_op::unravel(k, lhs_shape); index_t idx_lhs = idx_lhs0 + mxnet_op::dot(coord_lhs, lhs_stride); Shape<ndim> coord_rhs = mxnet_op::unravel(k, rhs_shape); index_t idx_rhs = idx_rhs0 + mxnet_op::dot(coord_rhs, rhs_stride); Reducer::Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual); } Reducer::Finalize(val, residual); assign(&small[idx], addto, val); } template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void seq_reduce_compute(const size_t N, const size_t M, const bool addto, const DType *big, const DType *lhs, const DType *rhs, DType *small, const Shape<ndim> big_shape, const Shape<ndim> small_shape, const Shape<ndim> rshape, const Shape<ndim> rstride, const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride, const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0) { for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small, big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride, lhs_stride, rhs_stride); } } template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void Reduce(Stream<cpu> *s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs, const TBlob& rhs) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(); size_t M = rshape.Size(); Shape<ndim> lhs_shape, lhs_stride; diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride); Shape<ndim> rhs_shape, rhs_stride; diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride); seq_reduce_compute<Reducer, ndim, DType, OP1, OP2>( N, M, req == kAddTo, big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, lhs_shape, lhs_stride, rhs_shape, rhs_stride, lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>()); } #if MXNET_USE_CUDA void RTCReduce(const OpContext& ctx, const TBlob& small, const OpReqType req, const Tensor<gpu, 1, char>& workspace, const TBlob& big, const std::string& reducer, int ndim, const std::string& OP); void RTCReduce(const OpContext& ctx, const TBlob& small, const OpReqType req, const Tensor<gpu, 1, char>& workspace, const TBlob& big, const TBlob &lhs, const TBlob &rhs, const std::string& reducer, int ndim, const std::string& OP1, const std::string& OP2); #endif } // namespace broadcast } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
/*! * Copyright (c) 2015-2017 by Contributors * \file broadcast_reduce-inl.h * \brief CPU-specific Function definition of broadcast and reduce operators */ #ifndef MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #define MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #include <mxnet/operator_util.h> #include <algorithm> #include <vector> #include <string> #include <utility> #include "../mshadow_op.h" #include "../mxnet_op.h" #include "../operator_common.h" namespace mxnet { namespace op { namespace mxnet_op { template<int ndim, typename OP> struct binary_broadcast_kernel { /*! \brief Map function for binary_broadcast_kernel */ template<typename IType, typename DType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape <ndim> &lstride, const Shape <ndim> &rstride, const Shape <ndim> &oshape, IType *lhs, IType *rhs, DType *out) { Shape <ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ template<typename LType, typename RType, typename OType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape <ndim> &lstride, const Shape <ndim> &rstride, const Shape <ndim> &oshape, LType *lhs, RType *rhs, OType *out) { Shape <ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ template<typename IType, typename DType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape <ndim> &lstride, const Shape <ndim> &rstride, const Shape <ndim> &oshape, IType lhs, IType *rhs, DType *out) { Shape <ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs, rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs, rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ /* used for mixed type binary ops */ template<typename IType, typename DType, typename std::enable_if<!std::is_same<IType, DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape <ndim> &lstride, const Shape <ndim> &rstride, const Shape <ndim> &oshape, IType *lhs, DType *rhs, DType *out) { Shape <ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ /* used for mixed type binary ops */ template<typename IType, typename DType, typename std::enable_if<!std::is_same<IType, DType>::value && !std::is_pointer<IType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape <ndim> &lstride, const Shape <ndim> &rstride, const Shape <ndim> &oshape, IType lhs, DType *rhs, DType *out) { Shape <ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs, rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs, rhs[ridx])); } } }; template<int req, typename OP, bool col_vec> struct csr_dns_csr_broadcast_kernel { /*! * \brief Map function for broadcast between csr and 1D vector * \param row global thread id/assigned row id * \param csr_data ptr to data buffer of csr matrix * \param csr_indices ptr to indices buffer of csr matrix * \param csr_indptr ptr to indptr buffer of csr matrix * \param dns ptr to data buffer of the dense vector * \param out ptr to the data buffer of the result csr matrix */ template<typename DType, typename CType, typename RType> MSHADOW_XINLINE static void Map(index_t row, const DType *csr_data, const CType *csr_indices, const RType *csr_indptr, const DType *dns, DType *out) { const nnvm::dim_t curr_row_i = csr_indptr[row]; const nnvm::dim_t next_row_i = csr_indptr[row + 1]; for (nnvm::dim_t iter = curr_row_i; iter < next_row_i; iter++) { KERNEL_ASSIGN(out[iter], req, OP::Map(csr_data[iter], (col_vec)? dns[row] : dns[csr_indices[iter]])); } } /*! * \brief Map function for broadcast between csr and a scalar * \param i global thread id * \param csr_data ptr to data buffer of csr matrix * \param scalar_ptr ptr to data buffer of the scalar tensor, only the 0-th element is used * \param out ptr to the data buffer of output csr matrix * \param nnz number of non-zero elements in input csr matrix */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, const DType *csr_data, const DType* scalar_ptr, DType *out, const nnvm::dim_t nnz) { const DType scale = scalar_ptr[0]; if (i < nnz) { KERNEL_ASSIGN(out[i], req, OP::Map(csr_data[i], scale)); } } }; template<int req, typename OP, bool reverse = false> struct csr_dns_map_kernel { template <typename DType, typename CType, typename RType> MSHADOW_XINLINE static void Map(index_t row, const DType *csr_data, const CType *csr_indices, const RType *csr_indptr, DType *out, const nnvm::dim_t num_rows, const nnvm::dim_t num_cols) { if (row < num_rows) { const nnvm::dim_t curr_row_i = csr_indptr[row]; const nnvm::dim_t next_row_i = csr_indptr[row + 1]; for (nnvm::dim_t iter = curr_row_i; iter < next_row_i; iter++) { const nnvm::dim_t target = row * num_cols + csr_indices[iter]; KERNEL_ASSIGN(out[target], req, reverse ? OP::Map(out[target], csr_data[iter]) : OP::Map(csr_data[iter], out[target])); } } } }; } // namespace mxnet_op namespace broadcast { using namespace mshadow; const int MAX_DIM = 5; template<int ndim> MSHADOW_XINLINE void unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stridej, const Shape<ndim>& stridek, index_t* j, index_t* k) { *j = 0; *k = 0; #pragma unroll for (index_t i = ndim-1, idx_t = idx; i >=0; --i) { const auto tmp = idx_t / shape[i]; const auto coord = idx_t - tmp*shape[i]; *j += coord*stridej[i]; *k += coord*stridek[i]; idx_t = tmp; } } template<int ndim> MSHADOW_XINLINE int diff(const Shape<ndim>& small, const Shape<ndim>& big, Shape<ndim>* dims, Shape<ndim>* stride) { int mdim = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { mdim += small[i] != big[i]; (*dims)[i] = (*stride)[i] = 1; } index_t s = 1; #pragma unroll for (int i = ndim - 1, j = mdim; i >= 0; --i) { if (small[i] != big[i]) { --j; (*stride)[j] = s; (*dims)[j] = big[i]; } s *= big[i]; } return mdim; } template<typename DType> MSHADOW_XINLINE void assign(DType* dst, const bool addto, const DType src) { if (addto) { *dst += src; } else { *dst = src; } } template<int ndim, typename DType, typename OP> MSHADOW_XINLINE void binary_broadcast_assign(const index_t idx, const bool addto, const DType* __restrict lhs, const DType* __restrict rhs, DType* out, const Shape<ndim>& lshape, const Shape<ndim>& rshape, const Shape<ndim>& oshape) { const Shape<ndim> coord = mxnet_op::unravel(idx, oshape); const index_t j = mxnet_op::ravel(coord, lshape); const index_t k = mxnet_op::ravel(coord, rshape); assign(&out[idx], addto, OP::Map(lhs[j], rhs[k])); } template<typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP> MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto, const DType* __restrict big, OType *small, const Shape<ndim>& bshape, const Shape<ndim>& sshape, const Shape<ndim>& rshape, const Shape<ndim>& rstride) { Shape<ndim> coord = mxnet_op::unravel(idx, sshape); index_t j = mxnet_op::ravel(coord, bshape); AType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { coord = mxnet_op::unravel(k, rshape); Reducer::Reduce(val, AType(OP::Map(big[j + mxnet_op::dot(coord, rstride)])), residual); } Reducer::Finalize(val, residual); assign(&small[idx], addto, OType(val)); } namespace { // Returns the stride with which the fastest dimension is moving. // Used to detect memory access scatter. inline int fastest_stride(const TShape &small, const TShape &big, const TShape &big_stride) { const int ndim = small.ndim(); for (int i = ndim-1; i >= 0; --i) { if (big[i] != 1) { return (small[i] == big[i]) ? 1 : big_stride[i]; } } return 1; } } // namespace template<int ndim, typename DType, typename OP> void BinaryBroadcastComputeImpl(Stream<cpu> *s, const OpReqType req, const TBlob& lhs, const TBlob& rhs, const TBlob& out) { mshadow::Shape<ndim> oshape = out.shape_.get<ndim>(); mshadow::Shape<ndim> lstride = mxnet_op::calc_stride(lhs.shape_.get<ndim>()); mshadow::Shape<ndim> rstride = mxnet_op::calc_stride(rhs.shape_.get<ndim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<ndim, OP>, cpu>:: template LaunchEx(s, out.shape_.Size(), req, lstride, rstride, oshape, lhs.dptr<DType>(), rhs.dptr<DType>(), out.dptr<DType>()); } template<typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP> void seq_reduce_compute(const size_t N, const size_t M, const bool addto, const DType *big, OType *small, const Shape<ndim> bshape, const Shape<ndim> sshape, const Shape<ndim> rshape, const Shape<ndim> rstride) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign<Reducer, ndim, AType, DType, OType, OP>(idx, M, addto, big, small, bshape, sshape, rshape, rstride); } } template <typename Reducer, int ndim, typename DType, typename OP> void seq_reduce_compute_extra_mem(const size_t N, const size_t M, const bool addto, const DType* big, DType* small, const Shape<ndim> bshape, const Shape<ndim> sshape, const Shape<ndim> rshape, const Shape<ndim> rstride, const index_t* ws_dptr) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { Shape<ndim> coord = mxnet_op::unravel(idx, sshape); index_t j = mxnet_op::ravel(coord, bshape); DType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { Reducer::Reduce(val, OP::Map(big[j + ws_dptr[k]]), residual); } assign(&small[idx], addto, val); } } template <typename Reducer, int ndim, typename DType, typename OP, bool safe_acc = false> void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(), M = rshape.Size(); if (!safe_acc) { seq_reduce_compute<Reducer, ndim, DType, DType, DType, OP>( N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); } else { MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, { typedef typename std::conditional<safe_acc, AType, DataType>::type AccType; MSHADOW_TYPE_SWITCH_WITH_BOOL(small.type_flag_, OType, { typedef typename std::conditional<safe_acc, OType, DataType>::type OutType; seq_reduce_compute<Reducer, ndim, AccType, DataType, OutType, OP>( N, M, req == kAddTo, big.dptr<DataType>(), small.dptr<OutType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); }); }); } } template <typename Reducer, int ndim, typename DType, typename OP> void ReduceBool(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(), M = rshape.Size(); seq_reduce_compute<Reducer, ndim, bool, DType, bool, OP>( N, M, req == kAddTo, big.dptr<DType>(), small.dptr<bool>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); } template <typename Reducer, int ndim, typename DType, typename OP> void ReduceWithExtraMem(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { using namespace mxnet_op; if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); index_t* ws_dptr = reinterpret_cast<index_t*>(workspace.dptr_); size_t N = small.shape_.Size(), M = rshape.Size(); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t k = 0; k < static_cast<index_t>(M); k++) { Shape<ndim> coord = mxnet_op::unravel(k, rshape); ws_dptr[k] = mxnet_op::dot(coord, rstride); } seq_reduce_compute_extra_mem<Reducer, ndim, DType, OP>( N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, ws_dptr); } inline size_t ReduceWorkspaceSize(Stream<cpu> *s, const mxnet::TShape& small, const OpReqType req, const mxnet::TShape& big, const int type_size) { return 0; } inline size_t ReduceWorkspaceSize(Stream<cpu> *s, const mxnet::TShape& small, const OpReqType req, const mxnet::TShape& big, const mxnet::TShape& lhs, const mxnet::TShape& rhs, const int type_size) { return 0; } #if MXNET_USE_CUDA namespace { constexpr int warpSize = 32; constexpr int unroll_reduce = 2; // Returns a/b integer division rounded up template<typename Type> Type ceil_idiv(const Type a, const Type b) { return (a + b - 1)/b; } uint64_t calc_num_load(const int X, const int Y, const int* strides) { // Number of full warps uint64_t num_full_warp = X / warpSize; // Length of the partial warp i.e. number of threads that are performing loads uint64_t len_part_warp = X % warpSize; uint64_t num_load_full = (std::min(warpSize, strides[0]) + std::min(warpSize, strides[1]) + std::min(warpSize, strides[2]))*num_full_warp; uint64_t num_load_part = (std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp*strides[0], warpSize)) + std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp*strides[1], warpSize)) + std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp*strides[2], warpSize)))* (len_part_warp != 0); uint64_t num_load = (num_load_full + num_load_part)*(uint64_t)Y; return num_load; } inline int diff(const TShape& small, const TShape& big, TShape* dims, TShape* stride) { int ndim = small.ndim(); int mdim = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { mdim += small[i] != big[i]; (*dims)[i] = (*stride)[i] = 1; } index_t s = 1; #pragma unroll for (int i = ndim - 1, j = mdim; i >= 0; --i) { if (small[i] != big[i]) { --j; (*stride)[j] = s; (*dims)[j] = big[i]; } s *= big[i]; } return mdim; } constexpr int nthread_reduce = 512; constexpr index_t kBaseGridNum = 1024; } // namespace // Configuration for ReduceImpl() struct ReduceImplConfig { index_t N; index_t M; index_t Mnext; struct { dim3 blockDim; dim3 gridDim; int shMemSize; bool do_transpose; } kernel_1; struct { int blockSize; int gridSize; } kernel_2; size_t workspace_size; TShape rshape, rstride; TShape lhs_shape, lhs_stride; TShape rhs_shape, rhs_stride; inline ReduceImplConfig(const ::mxnet::TShape& small, const ::mxnet::TShape& big, const ::mxnet::TShape* lhs, const ::mxnet::TShape* rhs, const size_t type_size) : rshape(small.ndim(), 1), rstride(small.ndim(), 1), lhs_shape(small.ndim(), 1), lhs_stride(small.ndim(), 1), rhs_shape(small.ndim(), 1), rhs_stride(small.ndim(), 1) { constexpr int maxLoopPerTB = 64; int ndim = small.ndim(); diff(small, big, &rshape, &rstride); N = small.Size(); M = rshape[0]; for (int i = 1; i < ndim; ++i) { M *= rshape[i]; } bool multiOp = false; if (lhs != nullptr) { CHECK_NOTNULL(rhs); diff(small, *lhs, &lhs_shape, &lhs_stride); diff(small, *rhs, &rhs_shape, &rhs_stride); multiOp = true; } workspace_size = 0; kernel_1.shMemSize = 0; kernel_1.do_transpose = false; if (M == 1) { kernel_1.blockDim.x = nthread_reduce; kernel_1.gridDim.x = std::min(kBaseGridNum, static_cast<index_t>((N + kernel_1.blockDim.x - 1)/kernel_1.blockDim.x)); } else { int reduce_strides[3]; reduce_strides[0] = fastest_stride(small, big, big); reduce_strides[1] = (multiOp) ? fastest_stride(small, *lhs, *lhs) : 1; reduce_strides[2] = (multiOp) ? fastest_stride(small, *rhs, *rhs) : 1; int reduce_strides_transp[3]; reduce_strides_transp[0] = fastest_stride(small, rshape, rstride); reduce_strides_transp[1] = (multiOp) ? fastest_stride(small, lhs_shape, lhs_stride) : 1; reduce_strides_transp[2] = (multiOp) ? fastest_stride(small, rhs_shape, rhs_stride) : 1; uint64_t num_load = calc_num_load(N, M, reduce_strides); uint64_t num_load_transp = calc_num_load(M, N, reduce_strides_transp); Mnext = 1; kernel_1.do_transpose = (num_load > num_load_transp); kernel_1.blockDim.x = 0; kernel_1.blockDim.y = 0; if (kernel_1.do_transpose) { // Fastest thread ID goes through M // Loop over N has step size kernel_1.blockDim.y if (N < 8) { kernel_1.blockDim.y = 1; } else if (N < 256) { kernel_1.blockDim.y = 4; } else { if (M < 8) { kernel_1.blockDim.x = 1; } else if (M < 256) { kernel_1.blockDim.x = 4; } else { kernel_1.blockDim.x = warpSize; } } } else { // Fastest thread ID goes through N // Loop over M has step size kernel_1.blockDim.y if (M < 8) { kernel_1.blockDim.y = 1; } else if (M < 256) { kernel_1.blockDim.y = 4; } else { if (N < 8) { kernel_1.blockDim.x = 1; } else if (N < 256) { kernel_1.blockDim.x = 4; } else { kernel_1.blockDim.x = warpSize; } } } if (kernel_1.blockDim.x == 0 && kernel_1.blockDim.y == 0) { LOG(FATAL) << "Unable to set blockDim"; } else if (kernel_1.blockDim.x == 0) { kernel_1.blockDim.x = nthread_reduce / kernel_1.blockDim.y; } else if (kernel_1.blockDim.y == 0) { kernel_1.blockDim.y = nthread_reduce / kernel_1.blockDim.x; } if (kernel_1.do_transpose) { // Fastest thread ID goes through M kernel_1.gridDim.x = std::min((unsigned int)kBaseGridNum, ceil_idiv<unsigned int>(N, kernel_1.blockDim.y)); kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); int by = kernel_1.blockDim.y; if (kernel_1.blockDim.y % warpSize == 0) { // Fix shared memory bank conflict by++; } kernel_1.shMemSize = (kernel_1.blockDim.x > 1) ? kernel_1.blockDim.x*by*type_size * 2 : 0; // Maximum number of times we want TB to loop in M // Max size of M-block each TB can handle int maxMblock = kernel_1.blockDim.x*maxLoopPerTB; Mnext = (M + maxMblock - 1) / maxMblock; } else { // Fastest thread ID goes through N kernel_1.gridDim.x = std::min((unsigned int)kBaseGridNum, ceil_idiv<unsigned int>(N, kernel_1.blockDim.x)); kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); kernel_1.shMemSize = (kernel_1.blockDim.y > 1) ? kernel_1.blockDim.x*kernel_1.blockDim.y*type_size * 2 : 0; // Maximum number of times we want TB to loop in M // Max size of M-block each TB can handle int maxMblock = kernel_1.blockDim.y*maxLoopPerTB; Mnext = (M + maxMblock - 1) / maxMblock; } if (Mnext > 1) { // small_dptr[] is N*Mnext*type_size bytes workspace_size += N*Mnext*sizeof(double); // Set gridDim.y to Mnext kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); } if (Mnext > 1) { kernel_2.blockSize = nthread_reduce; kernel_2.gridSize = std::min(kBaseGridNum, static_cast<index_t>((N + kernel_2.blockSize - 1)/kernel_2.blockSize)); } } } }; inline size_t ReduceWorkspaceSize(Stream<gpu> *s, const ::mxnet::TShape& small, const OpReqType req, const ::mxnet::TShape& big, const int type_size) { if (req == kNullOp) return 0; ReduceImplConfig config(small, big, nullptr, nullptr, type_size); return config.workspace_size; } inline size_t ReduceWorkspaceSize(Stream<gpu> *s, const ::mxnet::TShape& small, const OpReqType req, const ::mxnet::TShape& big, const ::mxnet::TShape& lhs, const ::mxnet::TShape& rhs, const int type_size) { if (req == kNullOp) return 0; ReduceImplConfig config(small, big, &lhs, &rhs, type_size); return config.workspace_size; } #ifdef __CUDACC__ #include "broadcast_reduce-inl.cuh" #endif #endif // MXNET_USE_CUDA template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto, const DType* __restrict big, const DType* __restrict lhs, const DType* __restrict rhs, DType *small, const Shape<ndim>& big_shape, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0, const Shape<ndim>& small_shape, const Shape<ndim>& rshape, const Shape<ndim>& lhs_shape, const Shape<ndim>& rhs_shape, const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride, const Shape<ndim>& rhs_stride) { Shape<ndim> coord = mxnet_op::unravel(idx, small_shape); const index_t idx_big0 = mxnet_op::ravel(coord, big_shape); const index_t idx_lhs0 = mxnet_op::ravel(coord, lhs_shape0); const index_t idx_rhs0 = mxnet_op::ravel(coord, rhs_shape0); DType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { Shape<ndim> coord_big = mxnet_op::unravel(k, rshape); index_t idx_big = idx_big0 + mxnet_op::dot(coord_big, rstride); Shape<ndim> coord_lhs = mxnet_op::unravel(k, lhs_shape); index_t idx_lhs = idx_lhs0 + mxnet_op::dot(coord_lhs, lhs_stride); Shape<ndim> coord_rhs = mxnet_op::unravel(k, rhs_shape); index_t idx_rhs = idx_rhs0 + mxnet_op::dot(coord_rhs, rhs_stride); Reducer::Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual); } Reducer::Finalize(val, residual); assign(&small[idx], addto, val); } template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void seq_reduce_compute(const size_t N, const size_t M, const bool addto, const DType *big, const DType *lhs, const DType *rhs, DType *small, const Shape<ndim> big_shape, const Shape<ndim> small_shape, const Shape<ndim> rshape, const Shape<ndim> rstride, const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride, const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small, big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride, lhs_stride, rhs_stride); } } template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void Reduce(Stream<cpu> *s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs, const TBlob& rhs) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(); size_t M = rshape.Size(); Shape<ndim> lhs_shape, lhs_stride; diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride); Shape<ndim> rhs_shape, rhs_stride; diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride); seq_reduce_compute<Reducer, ndim, DType, OP1, OP2>( N, M, req == kAddTo, big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, lhs_shape, lhs_stride, rhs_shape, rhs_stride, lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>()); } #if MXNET_USE_CUDA void RTCReduce(const OpContext& ctx, const TBlob& small, const OpReqType req, const Tensor<gpu, 1, char>& workspace, const TBlob& big, const std::string& reducer, int ndim, const std::string& OP); void RTCReduce(const OpContext& ctx, const TBlob& small, const OpReqType req, const Tensor<gpu, 1, char>& workspace, const TBlob& big, const TBlob &lhs, const TBlob &rhs, const std::string& reducer, int ndim, const std::string& OP1, const std::string& OP2); #endif } // namespace broadcast } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
cryptocontext.h
// @file cryptocontext.h -- Control for encryption operations. // @author TPOC: [email protected] // // @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT)) // All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. THIS SOFTWARE IS // PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO // EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef SRC_PKE_CRYPTOCONTEXT_H_ #define SRC_PKE_CRYPTOCONTEXT_H_ #include <map> #include <memory> #include <string> #include <utility> #include <vector> #include "palisade.h" #include "scheme/allscheme.h" #include "cryptocontexthelper.h" #include "cryptotiming.h" #include "utils/caller_info.h" #include "utils/serial.h" #include "utils/serialize-binary.h" #include "utils/serialize-json.h" namespace lbcrypto { template <typename Element> class CryptoContextFactory; template <typename Element> class CryptoContextImpl; template <typename Element> using CryptoContext = shared_ptr<CryptoContextImpl<Element>>; /** * @brief CryptoContextImpl * * A CryptoContextImpl is the object used to access the PALISADE library * * All PALISADE functionality is accessed by way of an instance of a * CryptoContextImpl; we say that various objects are "created in" a context, * and can only be used in the context in which they were created * * All PALISADE methods are accessed through CryptoContextImpl methods. Guards * are implemented to make certain that only valid objects that have been * created in the context are used * * Contexts are created using the CryptoContextFactory, and can be serialized * and recovered from a serialization */ template <typename Element> class CryptoContextImpl : public Serializable { using IntType = typename Element::Integer; using ParmType = typename Element::Params; friend class CryptoContextFactory<Element>; protected: // crypto parameters used for this context shared_ptr<LPCryptoParameters<Element>> params; // algorithm used; accesses all crypto methods shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme; static std::map<string, std::vector<LPEvalKey<Element>>>& evalMultKeyMap() { // cached evalmult keys, by secret key UID static std::map<string, std::vector<LPEvalKey<Element>>> s_evalMultKeyMap; return s_evalMultKeyMap; } static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>& evalSumKeyMap() { // cached evalsum keys, by secret key UID static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> s_evalSumKeyMap; return s_evalSumKeyMap; } static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>& evalAutomorphismKeyMap() { // cached evalautomorphism keys, by secret key UID static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> s_evalAutomorphismKeyMap; return s_evalAutomorphismKeyMap; } bool doTiming; vector<TimingInfo>* timeSamples; string m_schemeId; size_t m_keyGenLevel; /** * TypeCheck makes sure that an operation between two ciphertexts is permitted * @param a * @param b */ void TypeCheck(ConstCiphertext<Element> a, ConstCiphertext<Element> b, CALLER_INFO_ARGS_HDR) const { if (a == nullptr || b == nullptr) { std::string errorMsg(std::string("Null Ciphertext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext() != b->GetCryptoContext()) { std::string errorMsg( std::string( "Ciphertexts were not created in the same CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetKeyTag() != b->GetKeyTag()) { std::string errorMsg( std::string("Ciphertexts were not encrypted with same keys") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetEncodingType() != b->GetEncodingType()) { std::stringstream ss; ss << "Ciphertext encoding types " << a->GetEncodingType(); ss << " and " << b->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } /** * TypeCheck makes sure that an operation between two ciphertexts is permitted * This is intended for mutable methods, hence inputs are Ciphretext instead * of ConstCiphertext. * * @param a * @param b */ /* void TypeCheck(Ciphertext<Element> a, Ciphertext<Element> b, CALLER_INFO_ARGS_HDR) const { if (a == nullptr || b == nullptr) { std::string errorMsg(std::string("Null Ciphertext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext() != b->GetCryptoContext()) { std::string errorMsg( std::string("Ciphertexts were not created in the same CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetKeyTag() != b->GetKeyTag()) { std::string errorMsg( std::string("Ciphertexts were not encrypted with same keys") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetEncodingType() != b->GetEncodingType()) { std::stringstream ss; ss << "Ciphertext encoding types " << a->GetEncodingType(); ss << " and " << b->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } */ /** * TypeCheck makes sure that an operation between a ciphertext and a plaintext * is permitted * @param a * @param b */ void TypeCheck(ConstCiphertext<Element> a, ConstPlaintext b, CALLER_INFO_ARGS_HDR) const { if (a == nullptr) { std::string errorMsg(std::string("Null Ciphertext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (b == nullptr) { std::string errorMsg(std::string("Null Plaintext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetEncodingType() != b->GetEncodingType()) { std::stringstream ss; ss << "Ciphertext encoding type " << a->GetEncodingType(); ss << " and Plaintext encoding type " << b->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } /** * TypeCheck makes sure that an operation between two ciphertexts is permitted * @param a * @param b */ void TypeCheck(const RationalCiphertext<Element>& a, const RationalCiphertext<Element>& b, CALLER_INFO_ARGS_HDR) const { if (a.GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetCryptoContext() != b.GetCryptoContext()) { std::string errorMsg( std::string( "Ciphertexts were not created in the same CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetKeyTag() != b.GetKeyTag()) { std::string errorMsg( std::string("Ciphertexts were not encrypted with same keys") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetNumerator()->GetEncodingType() != b.GetNumerator()->GetEncodingType()) { std::stringstream ss; ss << "RationalCiphertext encoding types " << a.GetNumerator()->GetEncodingType(); ss << " and " << b.GetNumerator()->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } /** * TypeCheck makes sure that an operation between a ciphertext and a plaintext * is permitted * @param a * @param b */ void TypeCheck(const RationalCiphertext<Element>& a, ConstPlaintext b, CALLER_INFO_ARGS_HDR) const { if (b == nullptr) { std::string errorMsg(std::string("Null Plaintext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetNumerator()->GetEncodingType() != b->GetEncodingType()) { std::stringstream ss; ss << "RationalCiphertext encoding type " << a.GetNumerator()->GetEncodingType(); ss << " and Plaintext encoding type " << b->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } bool Mismatched(const CryptoContext<Element> a) const { if (a.get() != this) { return true; } return false; } public: LPPrivateKey<Element> privateKey; /** * This stores the private key in the crypto context. * This is only intended for debugging and should not be * used in production systems. Please define DEBUG_KEY in * palisade.h to enable this. * * If used, one can create a key pair and store the secret * key in th crypto context like this: * * auto keys = cc->KeyGen(); * cc->SetPrivateKey(keys.secretKey); * * After that, anyone in the code, one can access the * secret key by getting the crypto context and doing the * following: * * auto sk = cc->GetPrivateKey(); * * This key can be used for decrypting any intermediate * ciphertexts for debugging purposes. * * @param sk the secret key * */ void SetPrivateKey(const LPPrivateKey<Element> sk) { #ifdef DEBUG_KEY cerr << "Warning - SetPrivateKey is only intended to be used for debugging " "purposes - not for production systems." << endl; this->privateKey = sk; #else PALISADE_THROW( not_available_error, "SetPrivateKey is only allowed if DEBUG_KEY is set in palisade.h"); #endif } /** * This gets the private key from the crypto context. * This is only intended for debugging and should not be * used in production systems. Please define DEBUG_KEY in * palisade.h to enable this. * * If used, one can create a key pair and store the secret * key in th crypto context like this: * * auto keys = cc->KeyGen(); * cc->SetPrivateKey(keys.secretKey); * * After that, anyone in the code, one can access the * secret key by getting the crypto context and doing the * following: * * auto sk = cc->GetPrivateKey(); * * This key can be used for decrypting any intermediate * ciphertexts for debugging purposes. * * @return the secret key * */ const LPPrivateKey<Element> GetPrivateKey() { #ifdef DEBUG_KEY return this->privateKey; #else PALISADE_THROW( not_available_error, "GetPrivateKey is only allowed if DEBUG_KEY is set in palisade.h"); #endif } void setSchemeId(string schemeTag) { this->m_schemeId = schemeTag; } string getSchemeId() const { return this->m_schemeId; } /** * CryptoContextImpl constructor from pointers to parameters and scheme * @param params - pointer to CryptoParameters * @param scheme - pointer to Crypto Scheme */ CryptoContextImpl(LPCryptoParameters<Element>* params = nullptr, LPPublicKeyEncryptionScheme<Element>* scheme = nullptr, const string& schemeId = "Not") { this->params.reset(params); this->scheme.reset(scheme); this->doTiming = false; this->timeSamples = 0; this->m_keyGenLevel = 0; this->m_schemeId = schemeId; } /** * CryptoContextImpl constructor from shared pointers to parameters and scheme * @param params - shared pointer to CryptoParameters * @param scheme - sharedpointer to Crypto Scheme */ CryptoContextImpl(shared_ptr<LPCryptoParameters<Element>> params, shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme, const string& schemeId = "Not") { this->params = params; this->scheme = scheme; this->doTiming = false; this->timeSamples = 0; this->m_keyGenLevel = 0; this->m_schemeId = schemeId; } /** * Copy constructor * @param c - source */ CryptoContextImpl(const CryptoContextImpl<Element>& c) { params = c.params; scheme = c.scheme; doTiming = c.doTiming; timeSamples = c.timeSamples; this->m_keyGenLevel = 0; this->m_schemeId = c.m_schemeId; } /** * Assignment * @param rhs - assigning from * @return this */ CryptoContextImpl<Element>& operator=(const CryptoContextImpl<Element>& rhs) { params = rhs.params; scheme = rhs.scheme; doTiming = rhs.doTiming; timeSamples = rhs.timeSamples; m_keyGenLevel = rhs.m_keyGenLevel; m_schemeId = rhs.m_schemeId; return *this; } /** * A CryptoContextImpl is only valid if the shared pointers are both valid */ operator bool() const { return params && scheme; } /** * Private methods to compare two contexts; this is only used internally and * is not generally available * @param a - operand 1 * @param b - operand 2 * @return true if the implementations have identical parms and scheme */ friend bool operator==(const CryptoContextImpl<Element>& a, const CryptoContextImpl<Element>& b) { // Identical if the parameters and the schemes are identical... the exact // same object, OR the same type and the same values if (a.params.get() == b.params.get()) { return true; } else { if (typeid(*a.params.get()) != typeid(*b.params.get())) { return false; } if (*a.params.get() != *b.params.get()) return false; } if (a.scheme.get() == b.scheme.get()) { return true; } else { if (typeid(*a.scheme.get()) != typeid(*b.scheme.get())) { return false; } if (*a.scheme.get() != *b.scheme.get()) return false; } return true; } friend bool operator!=(const CryptoContextImpl<Element>& a, const CryptoContextImpl<Element>& b) { return !(a == b); } // TIMING METHODS /** * StartTiming method activates timing of CryptoMethods * * @param timeSamples points to a vector in which timing samples will be * stored */ void StartTiming(vector<TimingInfo>* timeSamples) { this->timeSamples = timeSamples; doTiming = true; } /* * StopTiming - turns off timing */ void StopTiming() { doTiming = false; } /** * ResumeTiming - re-enables timing with existing TimingInfo vector */ void ResumeTiming() { doTiming = true; } /** * ResetTiming - erases measurements */ void ResetTiming() { this->timeSamples->clear(); } static bool SerializeEvalMultKey(Serialized* serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalMultKey(Serialized* serObj, const string& id) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalMultKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool DeserializeEvalMultKey(Serialized* serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * SerializeEvalMultKey for a single EvalMult key or all EvalMult keys * * @param ser - stream to serialize to * @param sertype - type of serialization * @param id for key to serialize - if empty string, serialize them all * @return true on success */ template <typename ST> static bool SerializeEvalMultKey(std::ostream& ser, const ST& sertype, string id = ""); /** * SerializeEvalMultKey for all EvalMultKeys made in a given context * * @param cc whose keys should be serialized * @param ser - stream to serialize to * @param sertype - type of serialization * @return true on success (false on failure or no keys found) */ template <typename ST> static bool SerializeEvalMultKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) { std::map<string, std::vector<LPEvalKey<Element>>> omap; for (const auto& k : GetAllEvalMultKeys()) { if (k.second[0]->GetCryptoContext() == cc) { omap[k.first] = k.second; } } if (omap.size() == 0) return false; Serial::Serialize(omap, ser, sertype); return true; } /** * DeserializeEvalMultKey deserialize all keys in the serialization * deserialized keys silently replace any existing matching keys * deserialization will create CryptoContextImpl if necessary * * @param serObj - stream with a serialization * @return true on success */ template <typename ST> static bool DeserializeEvalMultKey(std::istream& ser, const ST& sertype) { std::map<string, std::vector<LPEvalKey<Element>>> evalMultKeys; Serial::Deserialize(GetAllEvalMultKeys(), ser, sertype); // The deserialize call created any contexts that needed to be created.... // so all we need to do is put the keys into the maps for their context for (auto k : GetAllEvalMultKeys()) { GetAllEvalMultKeys()[k.first] = k.second; } return true; } /** * ClearEvalMultKeys - flush EvalMultKey cache */ static void ClearEvalMultKeys(); /** * ClearEvalMultKeys - flush EvalMultKey cache for a given id * @param id */ static void ClearEvalMultKeys(const string& id); /** * ClearEvalMultKeys - flush EvalMultKey cache for a given context * @param cc */ static void ClearEvalMultKeys(const CryptoContext<Element> cc); /** * InsertEvalMultKey - add the given vector of keys to the map, replacing the * existing vector if there * @param vectorToInsert */ static void InsertEvalMultKey( const std::vector<LPEvalKey<Element>>& vectorToInsert); static bool SerializeEvalSumKey(Serialized* serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalSumKey(Serialized* serObj, const string& id) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalSumKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool DeserializeEvalSumKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * SerializeEvalSumKey for a single EvalSum key or all of the EvalSum keys * * @param ser - stream to serialize to * @param sertype - type of serialization * @param id - key to serialize; empty string means all keys * @return true on success */ template <typename ST> static bool SerializeEvalSumKey(std::ostream& ser, const ST& sertype, string id = "") { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>* smap; std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> omap; if (id.length() == 0) { smap = &GetAllEvalSumKeys(); } else { auto k = GetAllEvalSumKeys().find(id); if (k == GetAllEvalSumKeys().end()) return false; // no such id smap = &omap; omap[k->first] = k->second; } Serial::Serialize(*smap, ser, sertype); return true; } /** * SerializeEvalSumKey for all of the EvalSum keys for a context * * @param ser - stream to serialize to * @param sertype - type of serialization * @param cc - context * @return true on success */ template <typename ST> static bool SerializeEvalSumKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> omap; for (const auto& k : GetAllEvalSumKeys()) { if (k.second->begin()->second->GetCryptoContext() == cc) { omap[k.first] = k.second; } } if (omap.size() == 0) return false; Serial::Serialize(omap, ser, sertype); return true; } /** * DeserializeEvalSumKey deserialize all keys in the serialization * deserialized keys silently replace any existing matching keys * deserialization will create CryptoContextImpl if necessary * * @param ser - stream to serialize from * @param sertype - type of serialization * @return true on success */ template <typename ST> static bool DeserializeEvalSumKey(std::istream& ser, const ST& sertype) { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> evalSumKeys; Serial::Deserialize(evalSumKeys, ser, sertype); // The deserialize call created any contexts that needed to be created.... // so all we need to do is put the keys into the maps for their context for (auto k : evalSumKeys) { GetAllEvalSumKeys()[k.first] = k.second; } return true; } /** * ClearEvalSumKeys - flush EvalSumKey cache */ static void ClearEvalSumKeys(); /** * ClearEvalSumKeys - flush EvalSumKey cache for a given id * @param id */ static void ClearEvalSumKeys(const string& id); /** * ClearEvalSumKeys - flush EvalSumKey cache for a given context * @param cc */ static void ClearEvalSumKeys(const CryptoContext<Element> cc); /** * InsertEvalSumKey - add the given map of keys to the map, replacing the * existing map if there * @param mapToInsert */ static void InsertEvalSumKey( const shared_ptr<std::map<usint, LPEvalKey<Element>>> mapToInsert); static bool SerializeEvalAutomorphismKey(Serialized* serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalAutomorphismKey(Serialized* serObj, const string& id) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalAutomorphismKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool DeserializeEvalAutomorphismKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * SerializeEvalAutomorphismKey for a single EvalAuto key or all of the * EvalAuto keys * * @param ser - stream to serialize to * @param sertype - type of serialization * @param id - key to serialize; empty string means all keys * @return true on success */ template <typename ST> static bool SerializeEvalAutomorphismKey(std::ostream& ser, const ST& sertype, string id = "") { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>* smap; std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> omap; if (id.length() == 0) { smap = &GetAllEvalAutomorphismKeys(); } else { auto k = GetAllEvalAutomorphismKeys().find(id); if (k == GetAllEvalAutomorphismKeys().end()) return false; // no such id smap = &omap; omap[k->first] = k->second; } Serial::Serialize(*smap, ser, sertype); return true; } /** * SerializeEvalAutomorphismKey for all of the EvalAuto keys for a context * * @param ser - stream to serialize to * @param sertype - type of serialization * @param cc - context * @return true on success */ template <typename ST> static bool SerializeEvalAutomorphismKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> omap; for (const auto& k : GetAllEvalAutomorphismKeys()) { if (k.second->begin()->second->GetCryptoContext() == cc) { omap[k.first] = k.second; } } if (omap.size() == 0) return false; Serial::Serialize(omap, ser, sertype); return true; } /** * DeserializeEvalAutomorphismKey deserialize all keys in the serialization * deserialized keys silently replace any existing matching keys * deserialization will create CryptoContextImpl if necessary * * @param ser - stream to serialize from * @param sertype - type of serialization * @return true on success */ template <typename ST> static bool DeserializeEvalAutomorphismKey(std::istream& ser, const ST& sertype) { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> evalSumKeys; Serial::Deserialize(evalSumKeys, ser, sertype); // The deserialize call created any contexts that needed to be created.... // so all we need to do is put the keys into the maps for their context for (auto k : evalSumKeys) { GetAllEvalAutomorphismKeys()[k.first] = k.second; } return true; } /** * ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache */ static void ClearEvalAutomorphismKeys(); /** * ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache for a given id * @param id */ static void ClearEvalAutomorphismKeys(const string& id); /** * ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache for a given * context * @param cc */ static void ClearEvalAutomorphismKeys(const CryptoContext<Element> cc); /** * InsertEvalAutomorphismKey - add the given map of keys to the map, replacing * the existing map if there * @param mapToInsert */ static void InsertEvalAutomorphismKey( const shared_ptr<std::map<usint, LPEvalKey<Element>>> mapToInsert); // TURN FEATURES ON /** * Enable a particular feature for use with this CryptoContextImpl * @param feature - the feature that should be enabled */ void Enable(PKESchemeFeature feature) { scheme->Enable(feature); } /** * Enable several features at once * @param featureMask - bitwise or of several PKESchemeFeatures */ void Enable(usint featureMask) { scheme->Enable(featureMask); } // GETTERS /** * Getter for Scheme * @return scheme */ const shared_ptr<LPPublicKeyEncryptionScheme<Element>> GetEncryptionAlgorithm() const { return scheme; } /** * Getter for CryptoParams * @return params */ const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return params; } size_t GetKeyGenLevel() const { return m_keyGenLevel; } void SetKeyGenLevel(size_t level) { m_keyGenLevel = level; } /** * Getter for element params * @return */ const shared_ptr<ParmType> GetElementParams() const { return params->GetElementParams(); } /** * Getter for encoding params * @return */ const EncodingParams GetEncodingParams() const { return params->GetEncodingParams(); } /** * Get the cyclotomic order used for this context * * @return */ usint GetCyclotomicOrder() const { return params->GetElementParams()->GetCyclotomicOrder(); } /** * Get the ring dimension used for this context * * @return */ usint GetRingDimension() const { return params->GetElementParams()->GetRingDimension(); } /** * Get the ciphertext modulus used for this context * * @return */ const IntType& GetModulus() const { return params->GetElementParams()->GetModulus(); } /** * Get the ciphertext modulus used for this context * * @return */ const IntType& GetRootOfUnity() const { return params->GetElementParams()->GetRootOfUnity(); } /** * KeyGen generates a key pair using this algorithm's KeyGen method * @return a public/secret key pair */ LPKeyPair<Element> KeyGen() { TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->KeyGen( CryptoContextFactory<Element>::GetContextForPointer(this), false); if (doTiming) { timeSamples->push_back(TimingInfo(OpKeyGen, TOC_US(t))); } return r; } /** * Threshold FHE: Generation of a public key derived * from a previous joined public key (for prior secret shares) and the secret * key share of the current party. * * @param pk joined public key from prior parties. * @param makeSparse set to true if ring reduce by a factor of 2 is to be * used. NOT SUPPORTED BY ANY SCHEME ANYMORE. * @param fresh set to true if proxy re-encryption is used in the multi-party * protocol or star topology is used * @return key pair including the secret share for the current party and * joined public key */ LPKeyPair<Element> MultipartyKeyGen(const LPPublicKey<Element> pk, bool makeSparse = false, bool fresh = false) { if (!pk) PALISADE_THROW(config_error, "Input public key is empty"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultipartyKeyGen( CryptoContextFactory<Element>::GetContextForPointer(this), pk, makeSparse, fresh); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiPartyKeyGenKey, TOC_US(t))); } return r; } /** * Threshold FHE: Generates a public key from a vector of secret shares. * ONLY FOR DEBUGGIN PURPOSES. SHOULD NOT BE USED IN PRODUCTION. * * @param secretkeys secrete key shares. * @return key pair including the private for the current party and joined * public key */ LPKeyPair<Element> MultipartyKeyGen( const vector<LPPrivateKey<Element>>& secretKeys) { if (!secretKeys.size()) PALISADE_THROW(config_error, "Input private key vector is empty"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultipartyKeyGen( CryptoContextFactory<Element>::GetContextForPointer(this), secretKeys, false); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiPartyKeyGenKeyvec, TOC_US(t))); } return r; } /** * Threshold FHE: Method for decryption operation run by the lead decryption * client * * @param privateKey secret key share used for decryption. * @param ciphertext ciphertext id decrypted. */ vector<Ciphertext<Element>> MultipartyDecryptLead( const LPPrivateKey<Element> privateKey, const vector<Ciphertext<Element>>& ciphertext) const { if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to MultipartyDecryptLead was not " "generated with this crypto context"); vector<Ciphertext<Element>> newCiphertext; TimeVar t; if (doTiming) TIC(t); for (size_t i = 0; i < ciphertext.size(); i++) { if (ciphertext[i] == nullptr || Mismatched(ciphertext[i]->GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to MultipartyDecryptLead was not " "generated with this crypto context"); newCiphertext.push_back(GetEncryptionAlgorithm()->MultipartyDecryptLead( privateKey, ciphertext[i])); } if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiPartyDecryptLead, TOC_US(t))); } return newCiphertext; } /** * Threshold FHE: "Partial" decryption computed by all parties except for the * lead one * * @param privateKey secret key share used for decryption. * @param ciphertext ciphertext that is being decrypted. */ vector<Ciphertext<Element>> MultipartyDecryptMain( const LPPrivateKey<Element> privateKey, const vector<Ciphertext<Element>>& ciphertext) const { if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to MultipartyDecryptMain was not " "generated with this crypto context"); vector<Ciphertext<Element>> newCiphertext; TimeVar t; if (doTiming) TIC(t); for (size_t i = 0; i < ciphertext.size(); i++) { if (ciphertext[i] == nullptr || Mismatched(ciphertext[i]->GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to MultipartyDecryptMain was not " "generated with this crypto context"); newCiphertext.push_back(GetEncryptionAlgorithm()->MultipartyDecryptMain( privateKey, ciphertext[i])); } if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiPartyDecryptMain, TOC_US(t))); } return newCiphertext; } /** * Threshold FHE: Method for combining the partially decrypted ciphertexts * and getting the final decryption in the clear. * * @param &partialCiphertextVec vector of "partial" decryptions. * @param *plaintext the plaintext output. * @return the decoding result. */ DecryptResult MultipartyDecryptFusion( const vector<Ciphertext<Element>>& partialCiphertextVec, Plaintext* plaintext) const; /** * Threshold FHE: Generates a joined evaluation key * from the current secret share and a prior joined * evaluation key * * @param originalPrivateKey secret key transformed from. * @param newPrivateKey secret key transformed to. * @param ek the prior joined evaluation key. * @return the new joined evaluation key. */ LPEvalKey<Element> MultiKeySwitchGen( const LPPrivateKey<Element> originalPrivateKey, const LPPrivateKey<Element> newPrivateKey, const LPEvalKey<Element> ek) const { if (!originalPrivateKey) PALISADE_THROW(config_error, "Input first private key is nullptr"); if (!newPrivateKey) PALISADE_THROW(config_error, "Input second private key is nullptr"); if (!ek) PALISADE_THROW(config_error, "Input evaluation key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiKeySwitchGen(originalPrivateKey, newPrivateKey, ek); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiKeySwitchGen, TOC_US(t))); } return r; } /** * Threshold FHE: Generates joined automorphism keys * from the current secret share and prior joined * automorphism keys * * @param privateKey secret key share. * @param eAuto a dictionary with prior joined automorphism keys. * @param &indexList a vector of automorphism indices. * @param keyId - new key identifier used for the resulting evaluation key * @return a dictionary with new joined automorphism keys. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiEvalAutomorphismKeyGen( const LPPrivateKey<Element> privateKey, const shared_ptr<std::map<usint, LPEvalKey<Element>>> eAuto, const std::vector<usint>& indexList, const std::string& keyId = "") { if (!privateKey) PALISADE_THROW(config_error, "Input private key is nullptr"); if (!eAuto) PALISADE_THROW(config_error, "Input evaluation key map is nullptr"); if (!indexList.size()) PALISADE_THROW(config_error, "Input index vector is empty"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiEvalAutomorphismKeyGen( privateKey, eAuto, indexList, keyId); if (doTiming) { timeSamples->push_back( TimingInfo(OpMultiEvalAutomorphismKeyGen, TOC_US(t))); } return r; } /** * Threshold FHE: Generates joined rotation keys * from the current secret share and prior joined * rotation keys * * @param privateKey secret key share. * @param eAuto a dictionary with prior joined rotation keys. * @param &indexList a vector of rotation indices. * @param keyId - new key identifier used for the resulting evaluation key * @return a dictionary with new joined rotation keys. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiEvalAtIndexKeyGen( const LPPrivateKey<Element> privateKey, const shared_ptr<std::map<usint, LPEvalKey<Element>>> eAuto, const std::vector<int32_t>& indexList, const std::string& keyId = "") { if (!privateKey) PALISADE_THROW(config_error, "Input private key is nullptr"); if (!eAuto) PALISADE_THROW(config_error, "Input evaluation key map is nullptr"); if (!indexList.size()) PALISADE_THROW(config_error, "Input index vector is empty"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiEvalAtIndexKeyGen(privateKey, eAuto, indexList, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiEvalAtIndexKeyGen, TOC_US(t))); } return r; } /** * Threshold FHE: Generates joined summation evaluation keys * from the current secret share and prior joined * summation keys * * @param privateKey secret key share. * @param eSum a dictionary with prior joined summation keys. * @param keyId - new key identifier used for the resulting evaluation key * @return new joined summation keys. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiEvalSumKeyGen( const LPPrivateKey<Element> privateKey, const shared_ptr<std::map<usint, LPEvalKey<Element>>> eSum, const std::string& keyId = "") { if (!privateKey) PALISADE_THROW(config_error, "Input private key is nullptr"); if (!eSum) PALISADE_THROW(config_error, "Input evaluation key map is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiEvalSumKeyGen(privateKey, eSum, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiEvalSumKeyGen, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two prior evaluation keys * * @param a first evaluation key. * @param b second evaluation key. * @param keyId - new key identifier used for the resulting evaluation key * @return the new joined key. */ LPEvalKey<Element> MultiAddEvalKeys(LPEvalKey<Element> a, LPEvalKey<Element> b, const std::string& keyId = "") { if (!a) PALISADE_THROW(config_error, "Input first evaluation key is nullptr"); if (!b) PALISADE_THROW(config_error, "Input second evaluation key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddEvalKeys(a, b, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiAddEvalKeys, TOC_US(t))); } return r; } /** * Threshold FHE: Generates a partial evaluation key for homomorphic * multiplication based on the current secret share and an existing partial * evaluation key * * @param evalKey prior evaluation key. * @param sk current secret share. * @param keyId - new key identifier used for the resulting evaluation key * @return the new joined key. */ LPEvalKey<Element> MultiMultEvalKey(LPEvalKey<Element> evalKey, LPPrivateKey<Element> sk, const std::string& keyId = "") { if (!evalKey) PALISADE_THROW(config_error, "Input evaluation key is nullptr"); if (!sk) PALISADE_THROW(config_error, "Input private key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiMultEvalKey(evalKey, sk, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiMultEvalKey, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two prior evaluation key sets for summation * * @param es1 first summation key set. * @param es2 second summation key set. * @param keyId - new key identifier used for the resulting evaluation key * @return the new joined key set for summation. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiAddEvalSumKeys( const shared_ptr<std::map<usint, LPEvalKey<Element>>> es1, const shared_ptr<std::map<usint, LPEvalKey<Element>>> es2, const std::string& keyId = "") { if (!es1) PALISADE_THROW(config_error, "Input first evaluation key map is nullptr"); if (!es2) PALISADE_THROW(config_error, "Input second evaluation key map is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddEvalSumKeys(es1, es2, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiAddEvalSumKeys, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two prior evaluation key sets for automorphisms * * @param es1 first automorphism key set. * @param es2 second automorphism key set. * @param keyId - new key identifier used for the resulting evaluation key. * @return the new joined key set for summation. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiAddEvalAutomorphismKeys( const shared_ptr<std::map<usint, LPEvalKey<Element>>> es1, const shared_ptr<std::map<usint, LPEvalKey<Element>>> es2, const std::string& keyId = "") { if (!es1) PALISADE_THROW(config_error, "Input first evaluation key map is nullptr"); if (!es2) PALISADE_THROW(config_error, "Input second evaluation key map is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddEvalAutomorphismKeys(es1, es2, keyId); if (doTiming) { timeSamples->push_back( TimingInfo(OpMultiAddEvalAutomorphismKeys, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two partial public keys * * @param pubKey1 first public key. * @param pubKey2 second public key. * @param keyId - new key identifier used for the resulting evaluation key. * @return the new joined key. */ LPPublicKey<Element> MultiAddPubKeys(LPPublicKey<Element> pubKey1, LPPublicKey<Element> pubKey2, const std::string& keyId = "") { if (!pubKey1) PALISADE_THROW(config_error, "Input first public key is nullptr"); if (!pubKey2) PALISADE_THROW(config_error, "Input second public key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddPubKeys(pubKey1, pubKey2, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiAddPubKeys, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two partial evaluation keys for multiplication * * @param evalKey1 first evaluation key. * @param evalKey2 second evaluation key. * @param keyId - new key identifier used for the resulting evaluation key. * @return the new joined key. */ LPEvalKey<Element> MultiAddEvalMultKeys(LPEvalKey<Element> evalKey1, LPEvalKey<Element> evalKey2, const std::string& keyId = "") { if (!evalKey1) PALISADE_THROW(config_error, "Input first evaluation key is nullptr"); if (!evalKey2) PALISADE_THROW(config_error, "Input second evaluation key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddEvalMultKeys(evalKey1, evalKey2, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiAddEvalMultKeys, TOC_US(t))); } return r; } /** * SparseKeyGen generates a key pair with special structure, and without full * entropy, for use in special cases like Ring Reduction * @return a public/secret key pair */ LPKeyPair<Element> SparseKeyGen() { TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->KeyGen( CryptoContextFactory<Element>::GetContextForPointer(this), true); if (doTiming) { timeSamples->push_back(TimingInfo(OpSparseKeyGen, TOC_US(t))); } return r; } /** * ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption * @param newKey (public) * @param oldKey (private) * @return new evaluation key */ LPEvalKey<Element> ReKeyGen(const LPPublicKey<Element> newKey, const LPPrivateKey<Element> oldKey) const { if (newKey == nullptr || oldKey == nullptr || Mismatched(newKey->GetCryptoContext()) || Mismatched(oldKey->GetCryptoContext())) PALISADE_THROW(config_error, "Keys passed to ReKeyGen were not generated with this " "crypto context"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->ReKeyGen(newKey, oldKey); if (doTiming) { timeSamples->push_back(TimingInfo(OpReKeyGenPubPri, TOC_US(t))); } return r; } /** * ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption * NOTE this functionality has been completely removed from PALISADE * @param newKey (private) * @param oldKey (private) * @return new evaluation key */ LPEvalKey<Element> ReKeyGen(const LPPrivateKey<Element> newKey, const LPPrivateKey<Element> oldKey) const __attribute__((deprecated("functionality removed from PALISADE"))); /** * EvalMultKeyGen creates a key that can be used with the PALISADE EvalMult * operator * the new evaluation key is stored in cryptocontext * @param key */ void EvalMultKeyGen(const LPPrivateKey<Element> key); /** * EvalMultsKeyGen creates a vector evalmult keys that can be used with the * PALISADE EvalMult operator 1st key (for s^2) is used for multiplication of * ciphertexts of depth 1 2nd key (for s^3) is used for multiplication of * ciphertexts of depth 2, etc. * a vector of new evaluation keys is stored in crytpocontext * * @param key */ void EvalMultKeysGen(const LPPrivateKey<Element> key); /** * GetEvalMultKeyVector fetches the eval mult keys for a given KeyID * @param keyID * @return key vector from ID */ static const vector<LPEvalKey<Element>>& GetEvalMultKeyVector( const string& keyID); /** * GetEvalMultKeys * @return map of all the keys */ static std::map<string, std::vector<LPEvalKey<Element>>>& GetAllEvalMultKeys(); /** * KeySwitchGen creates a key that can be used with the PALISADE KeySwitch * operation * @param key1 * @param key2 * @return new evaluation key */ LPEvalKey<Element> KeySwitchGen(const LPPrivateKey<Element> key1, const LPPrivateKey<Element> key2) const { if (key1 == nullptr || key2 == nullptr || Mismatched(key1->GetCryptoContext()) || Mismatched(key2->GetCryptoContext())) PALISADE_THROW(config_error, "Keys passed to KeySwitchGen were not generated with this " "crypto context"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->KeySwitchGen(key1, key2); if (doTiming) { timeSamples->push_back(TimingInfo(OpKeySwitchGen, TOC_US(t))); } return r; } /** * Encrypt a plaintext using a given public key * @param publicKey * @param plaintext * @return ciphertext (or null on failure) */ Ciphertext<Element> Encrypt(const LPPublicKey<Element> publicKey, Plaintext plaintext) { if (publicKey == nullptr) PALISADE_THROW(type_error, "null key passed to Encrypt"); if (plaintext == nullptr) PALISADE_THROW(type_error, "Input plaintext is nullptr"); if (Mismatched(publicKey->GetCryptoContext())) PALISADE_THROW( config_error, "key passed to Encrypt was not generated with this crypto context"); TimeVar t; if (doTiming) TIC(t); Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt( publicKey, plaintext->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType(plaintext->GetEncodingType()); ciphertext->SetScalingFactor(plaintext->GetScalingFactor()); ciphertext->SetDepth(plaintext->GetDepth()); ciphertext->SetLevel(plaintext->GetLevel()); } if (doTiming) { timeSamples->push_back(TimingInfo(OpEncryptPub, TOC_US(t))); } return ciphertext; } /** * Encrypt a plaintext using a given private key * @param privateKey * @param plaintext * @return ciphertext (or null on failure) */ Ciphertext<Element> Encrypt(const LPPrivateKey<Element> privateKey, Plaintext plaintext) const { if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW( config_error, "key passed to Encrypt was not generated with this crypto context"); if (plaintext == nullptr) PALISADE_THROW(type_error, "Input plaintext is nullptr"); TimeVar t; if (doTiming) TIC(t); Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt( privateKey, plaintext->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType(plaintext->GetEncodingType()); ciphertext->SetScalingFactor(plaintext->GetScalingFactor()); ciphertext->SetDepth(plaintext->GetDepth()); ciphertext->SetLevel(plaintext->GetLevel()); } if (doTiming) { timeSamples->push_back(TimingInfo(OpEncryptPriv, TOC_US(t))); } return ciphertext; } /** * Encrypt a matrix of Plaintext * @param publicKey - for encryption * @param plaintext - to encrypt * @param doEncryption encrypts if true, embeds (encodes) the plaintext into * cryptocontext if false * @return a vector of pointers to Ciphertexts created by encrypting the * plaintext */ shared_ptr<Matrix<RationalCiphertext<Element>>> EncryptMatrix( const LPPublicKey<Element> publicKey, Matrix<Plaintext>& plaintext) { if (publicKey == nullptr || Mismatched(publicKey->GetCryptoContext())) PALISADE_THROW(config_error, "key passed to EncryptMatrix was not generated with this " "crypto context"); auto zeroAlloc = [=]() { return RationalCiphertext<Element>(publicKey->GetCryptoContext(), true); }; auto cipherResults = std::make_shared<Matrix<RationalCiphertext<Element>>>( zeroAlloc, plaintext.GetRows(), plaintext.GetCols()); TimeVar t; if (doTiming) TIC(t); for (size_t row = 0; row < plaintext.GetRows(); row++) { for (size_t col = 0; col < plaintext.GetCols(); col++) { if (plaintext(row, col)->Encode() == false) return 0; Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt( publicKey, plaintext(row, col)->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType(plaintext(row, col)->GetEncodingType()); } (*cipherResults)(row, col).SetNumerator(ciphertext); } } if (doTiming) { timeSamples->push_back(TimingInfo(OpEncryptMatrixPlain, TOC_US(t))); } return cipherResults; } /** * Encrypt a matrix of Plaintext * @param publicKey - for encryption * @param plaintext - to encrypt * @param doEncryption encrypts if true, embeds (encodes) the plaintext into * cryptocontext if false * @return a vector of pointers to Ciphertexts created by encrypting the * plaintext */ Matrix<Ciphertext<Element>> EncryptMatrixCiphertext( const LPPublicKey<Element> publicKey, Matrix<Plaintext>& plaintext) { if (publicKey == nullptr || Mismatched(publicKey->GetCryptoContext())) PALISADE_THROW(config_error, "key passed to EncryptMatrix was not generated with this " "crypto context"); auto zeroAlloc = [=]() { return Ciphertext<Element>(std::make_shared<CiphertextImpl<Element>>( publicKey->GetCryptoContext())); }; Matrix<Ciphertext<Element>> cipherResults(zeroAlloc, plaintext.GetRows(), plaintext.GetCols()); TimeVar t; if (doTiming) TIC(t); for (size_t row = 0; row < plaintext.GetRows(); row++) { for (size_t col = 0; col < plaintext.GetCols(); col++) { if (plaintext(row, col)->Encode() == false) PALISADE_THROW(math_error, "Plaintext is not encoded"); Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt( publicKey, plaintext(row, col)->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType(plaintext(row, col)->GetEncodingType()); } cipherResults(row, col) = (ciphertext); } } if (doTiming) { timeSamples->push_back(TimingInfo(OpEncryptMatrixPlain, TOC_US(t))); } return cipherResults; } /** * Perform an encryption by reading plaintext from a stream, serializing each * piece of ciphertext, and writing the serializations to an output stream * @param publicKey - the encryption key in use * @param instream - where to read the input from * @param ostream - where to write the serialization to * @param doEncryption encrypts if true, embeds (encodes) the plaintext into * cryptocontext if false * @return */ void EncryptStream(const LPPublicKey<Element> publicKey, std::istream& instream, std::ostream& outstream) const __attribute__(( deprecated("serialization changed, see wiki for details"))); // PLAINTEXT FACTORY METHODS // FIXME to be deprecated in 2.0 /** * MakeScalarPlaintext constructs a ScalarEncoding in this context * @param value * @param isSigned * @return plaintext */ Plaintext MakeScalarPlaintext(int64_t value) const { auto p = PlaintextFactory::MakePlaintext(Scalar, this->GetElementParams(), this->GetEncodingParams(), value); return p; } /** * MakeStringPlaintext constructs a StringEncoding in this context * @param str * @return plaintext */ Plaintext MakeStringPlaintext(const string& str) const { auto p = PlaintextFactory::MakePlaintext(String, this->GetElementParams(), this->GetEncodingParams(), str); return p; } /** * MakeIntegerPlaintext constructs an IntegerEncoding in this context * @param value * @return plaintext */ Plaintext MakeIntegerPlaintext(int64_t value) const { auto p = PlaintextFactory::MakePlaintext(Integer, this->GetElementParams(), this->GetEncodingParams(), value); return p; } /** * MakeIntegerPlaintext constructs a FractionalEncoding in this context * @param value * @param truncatedBits limit on fractional * @return plaintext */ Plaintext MakeFractionalPlaintext(int64_t value, size_t truncatedBits = 0) const { auto p = PlaintextFactory::MakePlaintext( Fractional, this->GetElementParams(), this->GetEncodingParams(), value, truncatedBits); return p; } /** * MakeCoefPackedPlaintext constructs a CoefPackedEncoding in this context * @param value * @return plaintext */ Plaintext MakeCoefPackedPlaintext(const vector<int64_t>& value) const { auto p = PlaintextFactory::MakePlaintext( CoefPacked, this->GetElementParams(), this->GetEncodingParams(), value); return p; } /** * MakePackedPlaintext constructs a PackedEncoding in this context * @param value * @return plaintext */ Plaintext MakePackedPlaintext(const vector<int64_t>& value) const { auto p = PlaintextFactory::MakePlaintext(Packed, this->GetElementParams(), this->GetEncodingParams(), value); return p; } /** * MakePlaintext static that takes a cc and calls the Plaintext Factory * @param encoding * @param cc * @param value * @return */ template <typename Value1> static Plaintext MakePlaintext(PlaintextEncodings encoding, CryptoContext<Element> cc, const Value1& value) { return PlaintextFactory::MakePlaintext(encoding, cc->GetElementParams(), cc->GetEncodingParams(), value); } template <typename Value1, typename Value2> static Plaintext MakePlaintext(PlaintextEncodings encoding, CryptoContext<Element> cc, const Value1& value, const Value2& value2) { return PlaintextFactory::MakePlaintext(encoding, cc->GetElementParams(), cc->GetEncodingParams(), value, value2); } /** * COMPLEX ARITHMETIC IS NOT AVAILABLE STARTING WITH 1.10.6, * AND THIS METHOD BE DEPRECATED. USE THE REAL-NUMBER METHOD INSTEAD. * MakeCKKSPackedPlaintext constructs a CKKSPackedEncoding in this context * from a vector of complex numbers * @param value - input vector * @paran depth - depth used to encode the vector * @param level - level at each the vector will get encrypted * @param params - parameters to be usef for the ciphertext * @return plaintext */ virtual Plaintext MakeCKKSPackedPlaintext( const std::vector<std::complex<double>>& value, size_t depth = 1, uint32_t level = 0, const shared_ptr<ParmType> params = nullptr) const { Plaintext p; const auto cryptoParamsCKKS = std::dynamic_pointer_cast<LPCryptoParametersCKKS<DCRTPoly>>( this->GetCryptoParameters()); double scFact = cryptoParamsCKKS->GetScalingFactorOfLevel(level); if (params == nullptr) { shared_ptr<ILDCRTParams<DCRTPoly::Integer>> elemParamsPtr; if (level != 0) { ILDCRTParams<DCRTPoly::Integer> elemParams = *(cryptoParamsCKKS->GetElementParams()); for (uint32_t i = 0; i < level; i++) { elemParams.PopLastParam(); } elemParamsPtr = std::make_shared<ILDCRTParams<DCRTPoly::Integer>>(elemParams); } else { elemParamsPtr = cryptoParamsCKKS->GetElementParams(); } p = Plaintext(std::make_shared<CKKSPackedEncoding>( elemParamsPtr, this->GetEncodingParams(), value, depth, level, scFact)); } else { p = Plaintext(std::make_shared<CKKSPackedEncoding>( params, this->GetEncodingParams(), value, depth, level, scFact)); } p->Encode(); return p; } /** * MakeCKKSPackedPlaintext constructs a CKKSPackedEncoding in this context * from a vector of real numbers * @param value - input vector * @paran depth - depth used to encode the vector * @param level - level at each the vector will get encrypted * @param params - parameters to be usef for the ciphertext * @return plaintext */ virtual Plaintext MakeCKKSPackedPlaintext( const std::vector<double>& value, size_t depth = 1, uint32_t level = 0, const shared_ptr<ParmType> params = nullptr) const { std::vector<std::complex<double>> complexValue(value.size()); std::transform(value.begin(), value.end(), complexValue.begin(), [](double da) { return std::complex<double>(da); }); return MakeCKKSPackedPlaintext(complexValue, depth, level, params); } /** * GetPlaintextForDecrypt returns a new Plaintext to be used in decryption. * * @param pte Type of plaintext we want to return * @param evp Element parameters * @param ep Encoding parameters * @return plaintext */ static Plaintext GetPlaintextForDecrypt(PlaintextEncodings pte, shared_ptr<ParmType> evp, EncodingParams ep); public: /** * Decrypt a single ciphertext into the appropriate plaintext * * @param privateKey - decryption key * @param ciphertext - ciphertext to decrypt * @param plaintext - resulting plaintext object pointer is here * @return */ DecryptResult Decrypt(const LPPrivateKey<Element> privateKey, ConstCiphertext<Element> ciphertext, Plaintext* plaintext); /** * Decrypt method for a matrix of ciphertexts * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrix( const LPPrivateKey<Element> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, shared_ptr<Matrix<Plaintext>>* numerator, shared_ptr<Matrix<Plaintext>>* denominator) const { if (ciphertext == nullptr) PALISADE_THROW(type_error, "Input ciphertext is nullptr"); if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to DecryptMatrix was not generated " "with this crypto context"); // edge case if ((ciphertext->GetCols() == 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); const Ciphertext<Element> ctN = (*ciphertext)(0, 0).GetNumerator(); // need to build matrices for the result Plaintext ptx = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); auto zeroPackingAlloc = [=]() { return Plaintext(ptx); }; *numerator = std::make_shared<Matrix<Plaintext>>( zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()); *denominator = std::make_shared<Matrix<Plaintext>>( zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()); TimeVar t; if (doTiming) TIC(t); for (size_t row = 0; row < ciphertext->GetRows(); row++) { for (size_t col = 0; col < ciphertext->GetCols(); col++) { if (Mismatched((*ciphertext)(row, col).GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not " "generated with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(row, col).GetNumerator(); // determine which type of plaintext that you need to decrypt into Plaintext decryptedNumerator = GetPlaintextForDecrypt( ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt( privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); if (resultN.isValid == false) return resultN; (**numerator)(row, col) = decryptedNumerator; (**numerator)(row, col)->Decode(); Plaintext decryptedDenominator = GetPlaintextForDecrypt( ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); if ((*ciphertext)(row, col).GetIntegerFlag() == true) { decryptedDenominator->GetElement<Poly>().SetValuesToZero(); decryptedDenominator->GetElement<Poly>().at(0) = 1; } else { const Ciphertext<Element> ctD = (*ciphertext)(row, col).GetDenominator(); DecryptResult resultD = GetEncryptionAlgorithm()->Decrypt( privateKey, ctD, &decryptedDenominator->GetElement<NativePoly>()); if (resultD.isValid == false) return resultD; (**denominator)(row, col) = decryptedDenominator; } (**denominator)(row, col)->Decode(); } } if (doTiming) { timeSamples->push_back(TimingInfo(OpDecryptMatrixPlain, TOC_US(t))); } return DecryptResult( (**numerator)((*numerator)->GetRows() - 1, (*numerator)->GetCols() - 1) ->GetLength()); } /** * Decrypt method for a matrix of ciphertexts * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrixCiphertext( const LPPrivateKey<Element> privateKey, const Matrix<Ciphertext<Element>> ciphertext, Matrix<Plaintext>* numerator) const { if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to DecryptMatrix was not generated " "with this crypto context"); // edge case if ((ciphertext.GetCols() == 0) && (ciphertext.GetRows() == 0)) return DecryptResult(); const Ciphertext<Element> ctN = (ciphertext)(0, 0); // need to build matrices for the result // Plaintext ptx = // GetPlaintextForDecrypt(ctN->GetEncodingType(), // this->GetElementParams(), // this->GetEncodingParams()); // auto zeroPackingAlloc = [=]() { return Plaintext(ptx); }; // numerator = std::make_shared<Matrix<Plaintext>>(zeroPackingAlloc, // ciphertext.GetRows(), // ciphertext.GetCols()); TimeVar t; if (doTiming) TIC(t); for (size_t row = 0; row < ciphertext.GetRows(); row++) { for (size_t col = 0; col < ciphertext.GetCols(); col++) { if (Mismatched((ciphertext(row, col))->GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not " "generated with this crypto context"); const Ciphertext<Element> ctN = (ciphertext)(row, col); // determine which type of plaintext that you need to decrypt into Plaintext decryptedNumerator = GetPlaintextForDecrypt( ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt( privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); if (resultN.isValid == false) return resultN; (*numerator)(row, col) = decryptedNumerator; (*numerator)(row, col)->Decode(); } } if (doTiming) { timeSamples->push_back(TimingInfo(OpDecryptMatrixPlain, TOC_US(t))); } return DecryptResult( (*numerator)(numerator->GetRows() - 1, numerator->GetCols() - 1) ->GetLength()); } /** * Decrypt method for numerators in a matrix of ciphertexts (packed encoding) * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrixNumerator( const LPPrivateKey<Element> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, shared_ptr<Matrix<Plaintext>>* numerator) const { if (ciphertext == nullptr) PALISADE_THROW(type_error, "Input ciphertext is nullptr"); if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to DecryptMatrix was not generated " "with this crypto context"); // edge case if ((ciphertext->GetCols() == 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); TimeVar t; if (doTiming) TIC(t); // force all precomputations to take place in advance if (Mismatched((*ciphertext)(0, 0).GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not generated " "with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(0, 0).GetNumerator(); // need to build a numerator matrix for the result Plaintext ptx = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); auto zeroPackingAlloc = [=]() { return Plaintext(ptx); }; *numerator = std::make_shared<Matrix<Plaintext>>( zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()); Plaintext decryptedNumerator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt( privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); if (resultN.isValid == false) return resultN; (**numerator)(0, 0) = decryptedNumerator; (**numerator)(0, 0)->Decode(); for (size_t row = 0; row < ciphertext->GetRows(); row++) { #pragma omp parallel for for (size_t col = 0; col < ciphertext->GetCols(); col++) { if (row + col > 0) { if (Mismatched((*ciphertext)(row, col).GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not " "generated with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(row, col).GetNumerator(); Plaintext decryptedNumerator = GetPlaintextForDecrypt( ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); GetEncryptionAlgorithm()->Decrypt( privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); (**numerator)(row, col) = decryptedNumerator; (**numerator)(row, col)->Decode(); } } } if (doTiming) { timeSamples->push_back(TimingInfo(OpDecryptMatrixPacked, TOC_US(t))); } return DecryptResult( (**numerator)((*numerator)->GetRows() - 1, (*numerator)->GetCols() - 1) ->GetLength()); } /** * read instream for a sequence of serialized ciphertext; deserialize it, * decrypt it, and write it to outstream * @param privateKey - reference to the decryption key * @param instream - input stream with sequence of serialized ciphertexts * @param outstream - output stream for plaintext * @return total bytes processed */ size_t DecryptStream(const LPPrivateKey<Element> privateKey, std::istream& instream, std::ostream& outstream) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * ReEncrypt - Proxy Re Encryption mechanism for PALISADE * @param evalKey - evaluation key from the PRE keygen method * @param ciphertext - vector of shared pointers to encrypted Ciphertext * @param publicKey the public key of the recipient of the re-encrypted * ciphertext. * @return vector of shared pointers to re-encrypted ciphertexts */ Ciphertext<Element> ReEncrypt( LPEvalKey<Element> evalKey, ConstCiphertext<Element> ciphertext, const LPPublicKey<Element> publicKey = nullptr) const { if (evalKey == nullptr || Mismatched(evalKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to ReEncrypt was not generated with " "this crypto context"); if (ciphertext == nullptr || Mismatched(ciphertext->GetCryptoContext())) PALISADE_THROW(config_error, "The ciphertext passed to ReEncrypt was not generated " "with this crypto context"); TimeVar t; if (doTiming) TIC(t); Ciphertext<Element> newCiphertext = GetEncryptionAlgorithm()->ReEncrypt(evalKey, ciphertext, publicKey); if (doTiming) { timeSamples->push_back(TimingInfo(OpReEncrypt, TOC_US(t))); } return newCiphertext; } /** * read instream for a serialized ciphertext. deserialize, re-encrypt, * serialize, and write to outstream * @param evalKey - reference to the re-encryption key * @param instream - input stream with sequence of serialized ciphertext * @param outstream - output stream with sequence of serialized re-encrypted * ciphertext */ void ReEncryptStream(const LPEvalKey<Element> evalKey, std::istream& instream, std::ostream& outstream, const LPPublicKey<Element> publicKey = nullptr) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * EvalAdd - PALISADE EvalAdd method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 + ct2 */ Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAdd(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAdd, TOC_US(t))); } return rv; } /** * EvalAdd - PALISADE EvalAddMutable method for a pair of ciphertexts. * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ct1 * @param ct2 * @return new ciphertext for ct1 + ct2 */ Ciphertext<Element> EvalAddMutable(Ciphertext<Element>& ct1, Ciphertext<Element>& ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAddMutable(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAdd, TOC_US(t))); } return rv; } /** * EvalAddMatrix - PALISADE EvalAdd method for a pair of matrices of * ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalAddMatrix( const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0, 0), (*ct2)(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<RationalCiphertext<Element>> rv = *ct1 + *ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddMatrix, TOC_US(t))); } return std::make_shared<Matrix<RationalCiphertext<Element>>>(rv); } /** * EvalAddMatrix - PALISADE EvalAdd method for a pair of matrices of * ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ Matrix<Ciphertext<Element>> EvalAddMatrix( const Matrix<Ciphertext<Element>>& ct1, const Matrix<Ciphertext<Element>>& ct2) const { TypeCheck(ct1(0, 0), ct2(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<Ciphertext<Element>> rv = ct1 + ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddMatrix, TOC_US(t))); } // Matrix<Ciphertext<Element>> a(rv); return rv; } /** * EvalSub - PALISADE EvalSub method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 - ct2 */ Ciphertext<Element> EvalSub(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSub(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSub, TOC_US(t))); } return rv; } /** * EvalSub - PALISADE EvalSubMutable method for a pair of ciphertexts * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ct1 * @param ct2 * @return new ciphertext for ct1 - ct2 */ Ciphertext<Element> EvalSubMutable(Ciphertext<Element>& ct1, Ciphertext<Element>& ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSubMutable(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSub, TOC_US(t))); } return rv; } /** * EvalSubMatrix - PALISADE EvalSub method for a pair of matrices of * ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalSubMatrix( const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0, 0), (*ct2)(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<RationalCiphertext<Element>> rv = *ct1 - *ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubMatrix, TOC_US(t))); } return std::make_shared<Matrix<RationalCiphertext<Element>>>(rv); } /** * EvalSubMatrix - PALISADE EvalSub method for a pair of matrices of * ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ Matrix<Ciphertext<Element>> EvalSubMatrix( const Matrix<Ciphertext<Element>>& ct1, const Matrix<Ciphertext<Element>>& ct2) const { TypeCheck(ct1(0, 0), ct2(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<Ciphertext<Element>> rv = ct1 - ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubMatrix, TOC_US(t))); } return rv; } /** * EvalAdd - PALISADE EvalAdd method for a ciphertext and plaintext * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext + plaintext */ Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext, ConstPlaintext plaintext) const { TypeCheck(ciphertext, plaintext); TimeVar t; if (doTiming) TIC(t); plaintext->SetFormat(EVALUATION); auto rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, plaintext); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddPlain, TOC_US(t))); } return rv; } /** * EvalAdd - PALISADE EvalAddMutable method for a ciphertext and plaintext * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext + plaintext */ Ciphertext<Element> EvalAddMutable(Ciphertext<Element>& ciphertext, Plaintext plaintext) const { TypeCheck((ConstCiphertext<Element>)ciphertext, (ConstPlaintext)plaintext); TimeVar t; if (doTiming) TIC(t); plaintext->SetFormat(EVALUATION); auto rv = GetEncryptionAlgorithm()->EvalAddMutable(ciphertext, plaintext); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddPlain, TOC_US(t))); } return rv; } /** * EvalAdd - PALISADE EvalAdd method for a ciphertext and constant * @param ciphertext * @param constant * @return new ciphertext for ciphertext + constant */ Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext, double constant) const { TimeVar t; Ciphertext<Element> rv; if (constant >= 0) { if (doTiming) TIC(t); rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddConst, TOC_US(t))); } } else { TimeVar t; if (doTiming) TIC(t); rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, -constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddConst, TOC_US(t))); } } return rv; } /** * EvalLinearWSum - PALISADE EvalLinearWSum method to compute a linear * weighted sum * * @param ciphertexts a list of ciphertexts * @param constants a list of weights * @return new ciphertext containing the weighted sum */ Ciphertext<Element> EvalLinearWSum(vector<Ciphertext<Element>> ciphertexts, vector<double> constants) const { TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalLinearWSum(ciphertexts, constants); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalLinearWSum, TOC_US(t))); } return rv; } /** * EvalLinearWSum - method to compute a linear weighted sum. * This is a mutable version, meaning the level/depth of input * ciphertexts may change in the process. * * @param ciphertexts a list of ciphertexts * @param constants a list of weights * @return new ciphertext containing the weighted sum */ Ciphertext<Element> EvalLinearWSumMutable( vector<Ciphertext<Element>> ciphertexts, vector<double> constants) const { TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalLinearWSumMutable(ciphertexts, constants); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalLinearWSum, TOC_US(t))); } return rv; } inline Ciphertext<Element> EvalLinearWSum( vector<double> constants, vector<Ciphertext<Element>> ciphertexts) const { return EvalLinearWSum(ciphertexts, constants); } inline Ciphertext<Element> EvalLinearWSumMutable( vector<double> constants, vector<Ciphertext<Element>> ciphertexts) const { return EvalLinearWSumMutable(ciphertexts, constants); } inline Ciphertext<Element> EvalAdd( ConstPlaintext plaintext, ConstCiphertext<Element> ciphertext) const { return EvalAdd(ciphertext, plaintext); } inline Ciphertext<Element> EvalAddMutable( Plaintext plaintext, Ciphertext<Element>& ciphertext) const { return EvalAddMutable(ciphertext, plaintext); } inline Ciphertext<Element> EvalAdd( double constant, ConstCiphertext<Element> ciphertext) const { return EvalAdd(ciphertext, constant); } /** * EvalSubPlain - PALISADE EvalSub method for a ciphertext and plaintext * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext - plaintext */ Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext, ConstPlaintext plaintext) const { TypeCheck(ciphertext, plaintext); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, plaintext); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubPlain, TOC_US(t))); } return rv; } /** * EvalSubPlain - PALISADE EvalSubMutable method for a ciphertext and * plaintext This is a mutable version - input ciphertexts may get * automatically rescaled, or level-reduced. * * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext - plaintext */ Ciphertext<Element> EvalSubMutable(Ciphertext<Element>& ciphertext, Plaintext plaintext) const { TypeCheck((ConstCiphertext<Element>)ciphertext, (ConstPlaintext)plaintext); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSubMutable(ciphertext, plaintext); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubPlain, TOC_US(t))); } return rv; } /** * EvalSub - PALISADE EvalSub method for a ciphertext and constant * @param ciphertext * @param constant * @return new ciphertext for ciphertext - constant */ Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext, double constant) const { TimeVar t; Ciphertext<Element> rv; if (constant >= 0) { if (doTiming) TIC(t); rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubConst, TOC_US(t))); } } else { if (doTiming) TIC(t); rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, -constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubConst, TOC_US(t))); } } return rv; } inline Ciphertext<Element> EvalSub( ConstPlaintext plaintext, ConstCiphertext<Element> ciphertext) const { return EvalAdd(EvalNegate(ciphertext), plaintext); } inline Ciphertext<Element> EvalSubMutable( Plaintext plaintext, Ciphertext<Element>& ciphertext) const { Ciphertext<Element> negated = EvalNegate(ciphertext); Ciphertext<Element> result = EvalAddMutable(negated, plaintext); ciphertext = EvalNegate(negated); return result; } inline Ciphertext<Element> EvalSub( double constant, ConstCiphertext<Element> ciphertext) const { return EvalAdd(EvalNegate(ciphertext), constant); } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts - with key * switching * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ Ciphertext<Element> EvalMult(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); auto ek = GetEvalMultKeyVector(ct1->GetKeyTag()); if (!ek.size()) { PALISADE_THROW(type_error, "Evaluation key has not been generated for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2, ek[0]); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts - with key * switching This is a mutable version - input ciphertexts may get * automatically rescaled, or level-reduced. * * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ Ciphertext<Element> EvalMultMutable(Ciphertext<Element>& ct1, Ciphertext<Element>& ct2) const { TypeCheck(ct1, ct2); auto ek = GetEvalMultKeyVector(ct1->GetKeyTag()); if (!ek.size()) { PALISADE_THROW(type_error, "Evaluation key has not been generated for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ct1, ct2, ek[0]); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts - no key * switching (relinearization) * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ Ciphertext<Element> EvalMultNoRelin(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMultMany - PALISADE function for evaluating multiplication on * ciphertext followed by relinearization operation (at the end). It computes * the multiplication in a binary tree manner. Also, it reduces the number of * elements in the ciphertext to two after each multiplication. * Currently it assumes that the consecutive two input arguments have * total depth smaller than the supported depth. Otherwise, it throws an * error. * * @param cipherTextList is the ciphertext list. * * @return new ciphertext. */ Ciphertext<Element> EvalMultMany( const vector<Ciphertext<Element>>& ct) const { // input parameter check if (!ct.size()) PALISADE_THROW(type_error, "Empty input ciphertext vector"); const auto ek = GetEvalMultKeyVector(ct[0]->GetKeyTag()); if (ek.size() < (ct[0]->GetElements().size() - 2)) { PALISADE_THROW(type_error, "Insufficient value was used for maxDepth to generate " "keys for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMany(ct, ek); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMultMany, TOC_US(t))); } return rv; } /** * EvalAddMany - Evaluate addition on a vector of ciphertexts. * It computes the addition in a binary tree manner. * * @param ctList is the list of ciphertexts. * * @return new ciphertext. */ Ciphertext<Element> EvalAddMany( const vector<Ciphertext<Element>>& ctList) const { // input parameter check if (!ctList.size()) PALISADE_THROW(type_error, "Empty input ciphertext vector"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAddMany(ctList); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddMany, TOC_US(t))); } return rv; } /** * EvalAddManyInPlace - Evaluate addition on a vector of ciphertexts. * Addition is computed in a binary tree manner. Difference with EvalAddMany * is that EvalAddManyInPlace uses the input ciphertext vector to store * intermediate results, to avoid the overhead of using extra tepmorary * space. * * @param ctList is the list of ciphertexts. * * @return new ciphertext. */ Ciphertext<Element> EvalAddManyInPlace( vector<Ciphertext<Element>>& ctList) const { // input parameter check if (!ctList.size()) PALISADE_THROW(type_error, "Empty input ciphertext vector"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAddManyInPlace(ctList); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddManyInPlace, TOC_US(t))); } return rv; } /** * Function for evaluating multiplication on ciphertext followed by * relinearization operation. Currently it assumes that the input arguments * have total depth smaller than the supported depth. Otherwise, it throws an * error. * * @param ct1 first input ciphertext. * @param ct2 second input ciphertext. * * @return new ciphertext */ Ciphertext<Element> EvalMultAndRelinearize( ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { // input parameter check if (!ct1 || !ct2) PALISADE_THROW(type_error, "Input ciphertext is nullptr"); const auto ek = GetEvalMultKeyVector(ct1->GetKeyTag()); if (ek.size() < (ct1->GetElements().size() + ct2->GetElements().size() - 3)) { PALISADE_THROW(type_error, "Insufficient value was used for maxDepth to generate " "keys for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultAndRelinearize(ct1, ct2, ek); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * Function for relinearization of a ciphertext. * * @param ct input ciphertext. * * @return relinearized ciphertext */ Ciphertext<Element> Relinearize(ConstCiphertext<Element> ct) const { // input parameter check if (!ct) PALISADE_THROW(type_error, "Input ciphertext is nullptr"); const auto ek = GetEvalMultKeyVector(ct->GetKeyTag()); if (ek.size() < (ct->GetElements().size() - 2)) { PALISADE_THROW(type_error, "Insufficient value was used for maxDepth to generate " "keys for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->Relinearize(ct, ek); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalRelin, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalMult method for plaintext * ciphertext * @param pt2 * @param ct1 * @return new ciphertext for ct1 * pt2 */ inline Ciphertext<Element> EvalMult(ConstPlaintext pt2, ConstCiphertext<Element> ct1) const { return EvalMult(ct1, pt2); } /** * EvalMult - PALISADE EvalMultMutable method for plaintext * ciphertext * @param pt2 * @param ct1 * @return new ciphertext for ct1 * pt2 */ inline Ciphertext<Element> EvalMultMutable(Plaintext pt2, Ciphertext<Element>& ct1) const { return EvalMultMutable(ct1, pt2); } /** * EvalMult - PALISADE EvalMult method for constant * ciphertext * @param constant * @param ct1 * @return new ciphertext for ct1 * constant */ inline Ciphertext<Element> EvalMult(double constant, ConstCiphertext<Element> ct1) const { return EvalMult(ct1, constant); } inline Ciphertext<Element> EvalMultMutable(double constant, Ciphertext<Element>& ct1) const { return EvalMultMutable(ct1, constant); } /** * EvalShiftRight - works only for Fractional Encoding * @param pt2 * @param ct1 * @return new ciphertext for ct1 * pt2 */ Ciphertext<Element> EvalRightShift(ConstCiphertext<Element> ct1, size_t divisor) const { if (ct1 && ct1->GetEncodingType() != Fractional) { std::stringstream ss; ss << "A " << Fractional << " encoded ciphertext is required for the EvalRightShift operation"; PALISADE_THROW(type_error, ss.str()); } Plaintext plaintextShift = MakeFractionalPlaintext(0, divisor); TypeCheck(ct1, plaintextShift); double start = 0; if (doTiming) start = currentDateTime(); auto rv = EvalMult(ct1, plaintextShift); if (doTiming) { timeSamples->push_back( TimingInfo(OpEvalRightShift, currentDateTime() - start)); } return rv; } /** * EvalMult - PALISADE EvalMult method for plaintext * ciphertext * @param ct1 * @param pt2 * @return new ciphertext for ct1 * pt2 */ Ciphertext<Element> EvalMult(ConstCiphertext<Element> ct1, ConstPlaintext pt2) const { TypeCheck(ct1, pt2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, pt2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalMultMutable method for plaintext * ciphertext * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ct1 * @param pt2 * @return new ciphertext for ct1 * pt2 */ Ciphertext<Element> EvalMultMutable(Ciphertext<Element>& ct1, Plaintext pt2) const { TypeCheck(ct1, pt2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ct1, pt2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalSub method for a ciphertext and constant * @param ciphertext * @param constant * @return new ciphertext for ciphertext - constant */ Ciphertext<Element> EvalMult(ConstCiphertext<Element> ciphertext, double constant) const { // input parameter check if (!ciphertext) { PALISADE_THROW(type_error, "Input ciphertext is nullptr"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ciphertext, constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMultConst, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalSub method for a ciphertext and constant * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ciphertext * @param constant * @return new ciphertext for ciphertext - constant */ Ciphertext<Element> EvalMultMutable(Ciphertext<Element>& ciphertext, double constant) const { // input parameter check if (!ciphertext) { PALISADE_THROW(type_error, "Input ciphertext is nullptr"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ciphertext, constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMultConst, TOC_US(t))); } return rv; } /** * EvalMultMatrix - PALISADE EvalMult method for two matrices of ciphertext * @param ct1 * @param ct2 * @return new matrix for ct1 * ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalMultMatrix( const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0, 0), (*ct2)(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<RationalCiphertext<Element>> rv = *ct1 * *ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMultMatrix, TOC_US(t))); } return std::make_shared<Matrix<RationalCiphertext<Element>>>(rv); } /** * EvalSub - PALISADE Negate method for a ciphertext * @param ct * @return new ciphertext -ct */ Ciphertext<Element> EvalNegate(ConstCiphertext<Element> ct) const { if (ct == nullptr || Mismatched(ct->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to EvalNegate was not generated with " "this crypto context"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalNegate(ct); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalNeg, TOC_US(t))); } return rv; } /** * EvalSub - PALISADE Negate method for a ciphertext * @param ct * @return new ciphertext -ct */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalNegateMatrix( const shared_ptr<Matrix<RationalCiphertext<Element>>> ct) const { if (ct == nullptr || Mismatched((*ct)(0, 0).GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to EvalNegateMatrix was not generated " "with this crypto context"); TimeVar t; if (doTiming) TIC(t); auto m = std::make_shared<Matrix<RationalCiphertext<Element>>>( ct->GetAllocator(), ct->GetRows(), ct->GetCols()); for (size_t r = 0; r < m->GetRows(); r++) for (size_t c = 0; c < m->GetCols(); c++) (*m)(r, c) = -((*ct)(r, c)); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalNegMatrix, TOC_US(t))); } return m; } /** * Generate automophism keys for a given private key * * @param publicKey original public key. * @param origPrivateKey original private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys; index 0 of the vector corresponds to * plaintext index 2, index 1 to plaintex index 3, etc. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen( const LPPublicKey<Element> publicKey, const LPPrivateKey<Element> origPrivateKey, const std::vector<usint>& indexList) const { if (publicKey == nullptr || origPrivateKey == nullptr) PALISADE_THROW(type_error, "Null Keys"); if (!indexList.size()) PALISADE_THROW(config_error, "Input index vector is empty"); if (publicKey->GetCryptoContext().get() != this) PALISADE_THROW(type_error, "Key was not created in this CryptoContextImpl"); if (publicKey->GetCryptoContext() != origPrivateKey->GetCryptoContext()) PALISADE_THROW(type_error, "Keys were not created in the same CryptoContextImpl"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen( publicKey, origPrivateKey, indexList); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAutomorphismKeyGen, TOC_US(t))); } return rv; } /** * Function for evaluating automorphism of ciphertext at index i * * @param ciphertext the input ciphertext. * @param i automorphism index * @param &evalKeys - reference to the vector of evaluation keys generated by * EvalAutomorphismKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalAutomorphism( ConstCiphertext<Element> ciphertext, usint i, const std::map<usint, LPEvalKey<Element>>& evalKeys, CALLER_INFO_ARGS_HDR) const { if (nullptr == ciphertext) { std::string errorMsg(std::string("Input ciphertext is nullptr") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (evalKeys.empty()) { std::string errorMsg(std::string("Empty input key map") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } auto tk = evalKeys.begin()->second; if (nullptr == tk) { std::string errorMsg(std::string("Invalid evalKey") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (ciphertext->GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (ciphertext->GetCryptoContext() != tk->GetCryptoContext()) { std::string errorMsg( std::string("Items were not created in the same CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (ciphertext->GetKeyTag() != tk->GetKeyTag()) { std::string errorMsg( std::string("Items were not encrypted with same keys") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAutomorphism(ciphertext, i, evalKeys); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAutomorphismI, TOC_US(t))); } return rv; } /** * Generate automophism keys for a given private key; Uses the private key for * encryption * * @param privateKey private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys */ shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen( const LPPrivateKey<Element> privateKey, const std::vector<usint>& indexList) const { if (privateKey == nullptr) PALISADE_THROW(type_error, "Null input"); if (!indexList.size()) PALISADE_THROW(config_error, "Input index vector is empty"); if (privateKey->GetCryptoContext().get() != this) PALISADE_THROW(type_error, "Key was not created in this CryptoContextImpl"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen(privateKey, indexList); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAutomorphismK, TOC_US(t))); } return rv; } /** * EvalSumKeyGen Generates the key map to be used by evalsum * * @param privateKey private key. * @param publicKey public key (used in NTRU schemes). */ void EvalSumKeyGen(const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey = nullptr); shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumRowsKeyGen( const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey = nullptr, usint rowSize = 0); shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumColsKeyGen( const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey = nullptr); /** * GetEvalSumKey returns the map * * @return the EvalSum key map */ static const std::map<usint, LPEvalKey<Element>>& GetEvalSumKeyMap( const string& id); static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>& GetAllEvalSumKeys(); /** * Function for evaluating a sum of all components * * @param ciphertext the input ciphertext. * @param batchSize size of the batch * @return resulting ciphertext */ Ciphertext<Element> EvalSum(ConstCiphertext<Element> ciphertext, usint batchSize) const; Ciphertext<Element> EvalSumRows( ConstCiphertext<Element> ciphertext, usint rowSize, const std::map<usint, LPEvalKey<Element>>& evalKeys) const; Ciphertext<Element> EvalSumCols( ConstCiphertext<Element> ciphertext, usint rowSize, const std::map<usint, LPEvalKey<Element>>& evalKeys) const; /** * EvalSumKeyGen Generates the key map to be used by evalsum * * @param privateKey private key. * @param indexList list of indices. * @param publicKey public key (used in NTRU schemes). */ void EvalAtIndexKeyGen(const LPPrivateKey<Element> privateKey, const std::vector<int32_t>& indexList, const LPPublicKey<Element> publicKey = nullptr); /** * EvalFastRotationPrecompute implements the precomputation step of * hoisted automorphisms. * * Please refer to Section 5 of Halevi and Shoup, "Faster Homomorphic * linear transformations in HELib." for more details, link: * https://eprint.iacr.org/2018/244. * * Generally, automorphisms are performed with three steps: (1) the * automorphism is applied on the ciphertext, (2) the automorphed values are * decomposed into digits, and (3) key switching is applied to make it * possible to further compute on the ciphertext. * * Hoisted automorphisms is a technique that performs the digit decomposition * for the original ciphertext first, and then performs the automorphism and * the key switching on the decomposed digits. The benefit of this is that the * digit decomposition is independent of the automorphism rotation index, so * it can be reused for multiple different indices. This can greatly improve * performance when we have to compute many automorphisms on the same * ciphertext. This routinely happens when we do permutations (EvalPermute). * * EvalFastRotationPrecompute implements the digit decomposition step of * hoisted automorphisms. * * @param ct the input ciphertext on which to do the precomputation (digit * decomposition) */ shared_ptr<vector<Element>> EvalFastRotationPrecompute( ConstCiphertext<Element> ct) const { TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalFastRotationPrecompute(ct); if (doTiming) { timeSamples->push_back(TimingInfo(OpFastRotPrecomp, TOC_US(t))); } return rv; } /** * EvalFastRotation implements the automorphism and key switching step of * hoisted automorphisms. * * Please refer to Section 5 of Halevi and Shoup, "Faster Homomorphic * linear transformations in HELib." for more details, link: * https://eprint.iacr.org/2018/244. * * Generally, automorphisms are performed with three steps: (1) the * automorphism is applied on the ciphertext, (2) the automorphed values are * decomposed into digits, and (3) key switching is applied to make it * possible to further compute on the ciphertext. * * Hoisted automorphisms is a technique that performs the digit decomposition * for the original ciphertext first, and then performs the automorphism and * the key switching on the decomposed digits. The benefit of this is that the * digit decomposition is independent of the automorphism rotation index, so * it can be reused for multiple different indices. This can greatly improve * performance when we have to compute many automorphisms on the same * ciphertext. This routinely happens when we do permutations (EvalPermute). * * EvalFastRotation implements the automorphism and key swithcing step of * hoisted automorphisms. * * This method assumes that all required rotation keys exist. This may not be * true if we are using baby-step/giant-step key switching. Please refer to * Section 5.1 of the above reference and EvalPermuteBGStepHoisted to see how * to deal with this issue. * * @param ct the input ciphertext to perform the automorphism on * @param index the index of the rotation. Positive indices correspond to left * rotations and negative indices correspond to right rotations. * @param m is the cyclotomic order * @param digits the digit decomposition created by EvalFastRotationPrecompute * at the precomputation step. */ Ciphertext<Element> EvalFastRotation( ConstCiphertext<Element> ct, const usint index, const usint m, const shared_ptr<vector<Element>> digits) const { TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalFastRotation(ct, index, m, digits); if (doTiming) { timeSamples->push_back(TimingInfo(OpFastRot, TOC_US(t))); } return rv; } /** * Merges multiple ciphertexts with encrypted results in slot 0 into a single * ciphertext The slot assignment is done based on the order of ciphertexts in * the vector * * @param ciphertextVector vector of ciphertexts to be merged. * @param &evalKeys - reference to the map of evaluation keys generated by * EvalAutomorphismKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalMerge( const vector<Ciphertext<Element>>& ciphertextVector) const; /** * GetEvalAutomorphismKey returns the map * * @return the EvalAutomorphism key map */ static const std::map<usint, LPEvalKey<Element>>& GetEvalAutomorphismKeyMap( const string& id); static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>& GetAllEvalAutomorphismKeys(); /** * Moves i-th slot to slot 0 * * @param ciphertext. * @param i the index. * @return resulting ciphertext */ Ciphertext<Element> EvalAtIndex(ConstCiphertext<Element> ciphertext, int32_t index) const; /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector. * @param ciphertext2 second vector. * @param batchSize size of the batch to be summed up * @return resulting ciphertext */ Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2, usint batchSize) const; /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector - ciphertext. * @param plaintext second vector - plaintext. * @param batchSize size of the batch to be summed up * @return resulting ciphertext */ Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1, ConstPlaintext plaintext, usint batchSize) const; /** * EvalCrossCorrelation - Computes the sliding sum of inner products (known as * as cross-correlation, sliding inner product, or sliding dot product in * image processing * @param x - first vector of row vectors * @param y - second vector of row vectors * @param batchSize - batch size for packed encoding * @param indexStart - starting index in the vectors of row vectors * @param length - length of the slice in the vectors of row vectors; default * is 0 meaning to use the full length of the vector * @return sum(x_i*y_i), i.e., a sum of inner products */ Ciphertext<Element> EvalCrossCorrelation( const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize, usint indexStart = 0, usint length = 0) const; /** * Method for polynomial evaluation for polynomials represented as power * series. * * @param &cipherText input ciphertext * @param &coefficients is the vector of coefficients in the polynomial; the * size of the vector is the degree of the polynomial + 1 * @return the result of polynomial evaluation. */ virtual Ciphertext<Element> EvalPoly( ConstCiphertext<Element> ciphertext, const std::vector<double>& coefficients) const { if (ciphertext == nullptr || this->Mismatched(ciphertext->GetCryptoContext())) throw std::logic_error( "Information passed to EvalPoly was not generated with this crypto " "context"); TimeVar t; if (this->doTiming) TIC(t); auto rv = std::static_pointer_cast<LPPublicKeyEncryptionScheme<Element>>( this->GetEncryptionAlgorithm()) ->EvalPoly(ciphertext, coefficients); if (this->doTiming) { this->timeSamples->push_back(TimingInfo(OpEvalPoly, TOC_US(t))); } return rv; } /** * EvalLinRegressBatched- Computes the parameter vector for linear regression * using the least squares method Supported only in batched mode; currently * works only for two regressors * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares * method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegressBatched( const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize) const; /** * EvalLinRegression - Computes the parameter vector for linear regression * using the least squares method * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares * method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegression( const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y) const { TypeCheck((*x)(0, 0), (*y)(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalLinRegression(x, y); if (doTiming) { timeSamples->push_back(TimingInfo(OpLinRegression, TOC_US(t))); } return rv; } /** * KeySwitch - PALISADE KeySwitch method * @param keySwitchHint - reference to KeySwitchHint * @param ciphertext - vector of ciphertext * @return new CiphertextImpl after applying key switch */ Ciphertext<Element> KeySwitch(const LPEvalKey<Element> keySwitchHint, ConstCiphertext<Element> ciphertext) const { if (keySwitchHint == nullptr || Mismatched(keySwitchHint->GetCryptoContext())) PALISADE_THROW( config_error, "Key passed to KeySwitch was not generated with this crypto context"); if (ciphertext == nullptr || Mismatched(ciphertext->GetCryptoContext())) PALISADE_THROW(config_error, "Ciphertext passed to KeySwitch was not generated with " "this crypto context"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->KeySwitch(keySwitchHint, ciphertext); if (doTiming) { timeSamples->push_back(TimingInfo(OpKeySwitch, TOC_US(t))); } return rv; } /** * Rescale - An alias for PALISADE ModReduce method. * This is because ModReduce is called Rescale in CKKS. * * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ Ciphertext<Element> Rescale(ConstCiphertext<Element> ciphertext) const { if (ciphertext == nullptr || Mismatched(ciphertext->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to Rescale was not generated with " "this crypto context"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->ModReduce(ciphertext); if (doTiming) { timeSamples->push_back(TimingInfo(OpModReduce, TOC_US(t))); } return rv; } /** * ModReduce - PALISADE ModReduce method used only for BGVrns * @param ciphertext - vector of ciphertext * @param numTowersToDrop - number of towers to drop * @return vector of mod reduced ciphertext */ Ciphertext<Element> ModReduce(ConstCiphertext<Element> ciphertext) const { if (ciphertext == nullptr || Mismatched(ciphertext->GetCryptoContext())) PALISADE_THROW( not_available_error, "Information passed to ModReduce was not generated with this crypto " "context"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->ModReduce(ciphertext); if (doTiming) { timeSamples->push_back(TimingInfo(OpModReduce, TOC_US(t))); } return rv; } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ RationalCiphertext<Element> ModReduceRational( RationalCiphertext<Element> ciphertext) const { TimeVar t; if (doTiming) TIC(t); Ciphertext<Element> n = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetNumerator()); Ciphertext<Element> d = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetDenominator()); if (doTiming) { timeSamples->push_back(TimingInfo(OpModReduce, TOC_US(t))); } return RationalCiphertext<Element>(n, d); } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ shared_ptr<Matrix<RationalCiphertext<Element>>> ModReduceMatrix( shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext) const { // needs context check TimeVar t; if (doTiming) TIC(t); auto m = std::make_shared<Matrix<RationalCiphertext<Element>>>( ciphertext->GetAllocator(), ciphertext->GetRows(), ciphertext->GetCols()); for (size_t r = 0; r < m->GetRows(); r++) for (size_t c = 0; c < m->GetCols(); c++) (*m)(r, c) = ModReduceRational((*ciphertext)(r, c)); if (doTiming) { timeSamples->push_back(TimingInfo(OpModReduceMatrix, TOC_US(t))); } return m; } /** * LevelReduce - PALISADE LevelReduce method * @param cipherText1 * @param linearKeySwitchHint * @return vector of level reduced ciphertext */ Ciphertext<Element> LevelReduce( ConstCiphertext<Element> cipherText1, const LPEvalKeyNTRU<Element> linearKeySwitchHint, size_t levels = 1) const { const auto cryptoParams = std::dynamic_pointer_cast<LPCryptoParametersCKKS<DCRTPoly>>( cipherText1->GetCryptoParameters()); if (cipherText1 == nullptr || Mismatched(cipherText1->GetCryptoContext())) { PALISADE_THROW(config_error, "Information passed to LevelReduce was not generated with " "this crypto context"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->LevelReduce( cipherText1, linearKeySwitchHint, levels); if (doTiming) { timeSamples->push_back(TimingInfo(OpLevelReduce, TOC_US(t))); } return rv; } /** * ComposedEvalMult - PALISADE composed evalmult * @param ciphertext1 - vector for first cipher text * @param ciphertext2 - vector for second cipher text * @param quadKeySwitchHint - is the quadratic key switch hint from original * private key to the quadratic key return vector of resulting ciphertext */ Ciphertext<Element> ComposedEvalMult( ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2) const { if (ciphertext1 == nullptr || ciphertext2 == nullptr || ciphertext1->GetKeyTag() != ciphertext2->GetKeyTag() || Mismatched(ciphertext1->GetCryptoContext())) PALISADE_THROW(config_error, "Ciphertexts passed to ComposedEvalMult were not " "generated with this crypto context"); auto ek = GetEvalMultKeyVector(ciphertext1->GetKeyTag()); if (!ek.size()) { PALISADE_THROW(type_error, "Evaluation key has not been generated for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->ComposedEvalMult(ciphertext1, ciphertext2, ek[0]); if (doTiming) { timeSamples->push_back(TimingInfo(OpComposedEvalMult, TOC_US(t))); } return rv; } static LPPublicKey<Element> deserializePublicKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static LPPrivateKey<Element> deserializeSecretKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static LPEvalKey<Element> deserializeEvalKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static LPEvalKey<Element> deserializeEvalKeyInContext( const Serialized& serObj, CryptoContext<Element> cc) __attribute__(( deprecated("serialization changed, see wiki for details"))); template <class Archive> void save(Archive& ar, std::uint32_t const version) const { ar(cereal::make_nvp("cc", params)); ar(cereal::make_nvp("kt", scheme)); ar(cereal::make_nvp("si", m_schemeId)); } template <class Archive> void load(Archive& ar, std::uint32_t const version) { if (version > SerializedVersion()) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar(cereal::make_nvp("cc", params)); ar(cereal::make_nvp("kt", scheme)); ar(cereal::make_nvp("si", m_schemeId)); // NOTE: a pointer to this object will be wrapped in a shared_ptr, and is a // "CryptoContext". PALISADE relies on the notion that identical // CryptoContextImpls are not duplicated in memory Once we deserialize this // object, we must check to see if there is a matching object for this // object that's already existing in memory if it DOES exist, use it. If it // does NOT exist, add this to the cache of all contexts } virtual std::string SerializedObjectName() const { return "CryptoContext"; } static uint32_t SerializedVersion() { return 1; } }; /** * @brief CryptoObject * * A class to aid in referring to the crypto context that an object belongs to */ template <typename Element> class CryptoObject { protected: CryptoContext<Element> context; // crypto context this object belongs to // tag used to find the evaluation key needed // for SHE/FHE operations string keyTag; public: explicit CryptoObject(CryptoContext<Element> cc = nullptr, const string& tag = "") : context(cc), keyTag(tag) {} CryptoObject(const CryptoObject& rhs) { context = rhs.context; keyTag = rhs.keyTag; } CryptoObject(const CryptoObject&& rhs) { context = std::move(rhs.context); keyTag = std::move(rhs.keyTag); } virtual ~CryptoObject() {} const CryptoObject& operator=(const CryptoObject& rhs) { this->context = rhs.context; this->keyTag = rhs.keyTag; return *this; } const CryptoObject& operator=(const CryptoObject&& rhs) { this->context = std::move(rhs.context); this->keyTag = std::move(rhs.keyTag); return *this; } bool operator==(const CryptoObject& rhs) const { return context.get() == rhs.context.get() && keyTag == rhs.keyTag; } CryptoContext<Element> GetCryptoContext() const { return context; } const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return context->GetCryptoParameters(); } const EncodingParams GetEncodingParameters() const { return context->GetCryptoParameters()->GetEncodingParams(); } const string GetKeyTag() const { return keyTag; } void SetKeyTag(const string& tag) { keyTag = tag; } template <class Archive> void save(Archive& ar, std::uint32_t const version) const { ar(::cereal::make_nvp("cc", context)); ar(::cereal::make_nvp("kt", keyTag)); } template <class Archive> void load(Archive& ar, std::uint32_t const version) { if (version > SerializedVersion()) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar(::cereal::make_nvp("cc", context)); ar(::cereal::make_nvp("kt", keyTag)); context = CryptoContextFactory<Element>::GetContext( context->GetCryptoParameters(), context->GetEncryptionAlgorithm()); } std::string SerializedObjectName() const { return "CryptoObject"; } static uint32_t SerializedVersion() { return 1; } }; /** * @brief CryptoContextFactory * * A class that contains static methods to generate new crypto contexts from * user parameters * */ template <typename Element> class CryptoContextFactory { using ParmType = typename Element::Params; using IntType = typename Element::Integer; protected: static vector<CryptoContext<Element>> AllContexts; public: static void ReleaseAllContexts(); static int GetContextCount(); static CryptoContext<Element> GetSingleContext(); static CryptoContext<Element> GetContext( shared_ptr<LPCryptoParameters<Element>> params, shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme, const string& schemeId = "Not"); static CryptoContext<Element> GetContextForPointer( CryptoContextImpl<Element>* cc); static const vector<CryptoContext<Element>>& GetAllContexts(); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme * @param params ring parameters * @param plaintextModulus plaintext modulus * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param delta - the plaintext scaling parameter floor(q/t) in BFV * @param mode - mode for generating secret keys (RLWE vs OPTIMIZED) * @param bigmodulus - large modulus used in tensoring of homomorphic * multiplication * @param bigrootofunity - root of unity for bigmodulus * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - * sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @param bigmodulusarb - additional large modulus for bigmoduls for the case * of general (non-power-of-two) cyclotomics * @param bigrootofunityarb - root of unity for bigmodulusarb * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @return new context */ static CryptoContext<Element> genCryptoContextBFV( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, const std::string& delta, MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0", int depth = 0, int assuranceMeasure = 0, float securityLevel = 0, const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0", int maxDepth = 2); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme * @param params ring parameters * @param encodingParams plaintext encoding parameters * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param delta - the plaintext scaling parameter floor(q/t) in BFV * @param mode - mode for generating secret keys (RLWE vs OPTIMIZED) * @param bigmodulus - large modulus used in tensoring of homomorphic * multiplication * @param bigrootofunity - root of unity for bigmodulus * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - * sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @param bigmodulusarb - additional large modulus for bigmoduls for the case * of general (non-power-of-two) cyclotomics * @param bigrootofunityarb - root of unity for bigmodulusarb * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @return new context */ static CryptoContext<Element> genCryptoContextBFV( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, const std::string& delta, MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0", int depth = 0, int assuranceMeasure = 0, float securityLevel = 0, const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0", int maxDepth = 2); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel root Hermite factor (lattice security parameter) * @param relinWindow bits in the base of digits in key * switching/relinearization * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( const PlaintextModulus plaintextModulus, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel standard security level * @param relinWindow bits in the base of digits in key * switching/relinearization * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel root Hermite factor (lattice security parameter) * @param distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( EncodingParams encodingParams, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel standard security level * @param distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( EncodingParams encodingParams, SecurityLevel securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window (bits in the base for digits) * used for digit decomposition (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( const PlaintextModulus plaintextModulus, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel standard secuirity level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window (bits in the base for digits) * used for digit decomposition (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( EncodingParams encodingParams, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel standard security level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( EncodingParams encodingParams, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( const PlaintextModulus plaintextModulus, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel standard security level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( EncodingParams encodingParams, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel standard security level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( EncodingParams encodingParams, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BGV Scheme * @param params ring parameters * @param plaintextModulus plaintext modulus * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param depth of supported computation circuit (not used; for future use) * @return new context */ static CryptoContext<Element> genCryptoContextBGV( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContextImpl for the BGV Scheme * @param params ring parameters * @param encodingParams plaintext encoding parameters * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param depth of supported computation circuit (not used; for future use) * @return new context */ static CryptoContext<Element> genCryptoContextBGV( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContextImpl for the CKKS Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param depth * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or * EXACTRESCALE) * @return new context */ static CryptoContext<Element> genCryptoContextCKKS( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, KeySwitchTechnique ksTech = BV, RescalingTechnique rsTech = APPROXRESCALE); /** * construct a PALISADE CryptoContextImpl for the CKKS Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or * EXACTRESCALE) * @return new context */ static CryptoContext<Element> genCryptoContextCKKS( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, enum KeySwitchTechnique ksTech = BV, RescalingTechnique rsTech = APPROXRESCALE); /** * Automatically generate the moduli chain and construct a PALISADE * CryptoContextImpl for the CKKS Scheme with it. * * @param cyclOrder the cyclotomic order M * @param numPrimes the number of towers/primes to use when building the * moduli chain * @param scaleExp the plaintext scaling factor, which is equal to dcrtBits in * our implementation of CKKS * @param batchSize the batch size of the ciphertext * @param mode RLWE or OPTIMIZED * @param depth * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param firstModSize the bit-length of the first modulus * @param ksTech key switching technique to use (e.g., GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or * EXACTRESCALE) * @param numLargeDigits the number of big digits to use in HYBRID key * switching * @return new context */ static CryptoContext<Element> genCryptoContextCKKSWithParamsGen( usint cyclOrder, usint numPrimes, usint scaleExp, usint relinWindow, usint batchSize, MODE mode, int depth = 1, int maxDepth = 2, usint firstModSize = 60, enum KeySwitchTechnique ksTech = BV, enum RescalingTechnique rsTech = APPROXRESCALE, uint32_t numLargeDigits = 4); /** * Construct a PALISADE CryptoContextImpl for the CKKS Scheme. * * @param multiplicativeDepth the depth of multiplications supported by the * scheme (equal to number of towers - 1) * @param scalingFactorBits the size of the scaling factor in bits * @param batchSize the number of slots being used in the ciphertext * @param stdLevel the standard security level we want the scheme to satisfy * @param ringDim the ring dimension (if not specified selected automatically * based on stdLevel) * @param ksTech key switching technique to use (e.g., HYBRID, GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or * EXACTRESCALE) * @param numLargeDigits the number of big digits to use in HYBRID key * switching * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param firstModSize the bit-length of the first modulus * @param relinWindow the relinearization windows (used in BV key switching, * use 0 for RNS decomposition) * @param mode RLWE (gaussian distribution) or OPTIMIZED (ternary * distribution) * @return new context */ static CryptoContext<Element> genCryptoContextCKKS( usint multiplicativeDepth, usint scalingFactorBits, usint batchSize, SecurityLevel stdLevel = HEStd_128_classic, usint ringDim = 0, enum RescalingTechnique rsTech = EXACTRESCALE, enum KeySwitchTechnique ksTech = HYBRID, uint32_t numLargeDigits = 0, int maxDepth = 2, usint firstModSize = 60, usint relinWindow = 0, MODE mode = OPTIMIZED); /** * construct a PALISADE CryptoContextImpl for the BGVrns Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param depth * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param msMethod mod switch method * @return new context */ static CryptoContext<Element> genCryptoContextBGVrns( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, KeySwitchTechnique ksTech = BV, enum ModSwitchMethod msMethod = MANUAL); /** * construct a PALISADE CryptoContextImpl for the BGVrns Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param msMethod mod switch method * @return new context */ static CryptoContext<Element> genCryptoContextBGVrns( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, enum KeySwitchTechnique ksTech = BV, enum ModSwitchMethod msMethod = MANUAL); /** * Automatically generate the moduli chain and construct a PALISADE * CryptoContextImpl for the BGVrns Scheme with it. * * @param cyclOrder the cyclotomic order M * @param numPrimes the number of towers/primes to use when building the * moduli chain * @param ptm the plaintext modulus * @param mode RLWE or OPTIMIZED * @param depth * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param firstModSize the bit-length of the first modulus * @param dcrtrBits the size of the moduli in bits * @param ksTech key switching technique to use (e.g., GHS or BV) * @param numLargeDigits the number of big digits to use in HYBRID key * switching * @param batchSize the number of slots being used in the ciphertext * @param msMethod mod switch method * @return new context */ static CryptoContext<Element> genCryptoContextBGVrnsWithParamsGen( usint cyclOrder, usint numPrimes, usint ptm, usint relinWindow, MODE mode, int depth = 1, int maxDepth = 2, enum KeySwitchTechnique ksTech = BV, usint firstModSize = 0, usint dcrtBits = 0, uint32_t numLargeDigits = 4, usint batchSize = 0, enum ModSwitchMethod msMethod = MANUAL); /** * Construct a PALISADE CryptoContextImpl for the BGVrns Scheme. * * @param multiplicativeDepth the depth of multiplications supported by the * scheme (equal to number of towers - 1) * @param ptm the plaintext modulus * @param stdLevel the standard security level we want the scheme to satisfy * @param stdDev sigma - distribution parameter for error distribution * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param mode RLWE (gaussian distribution) or OPTIMIZED (ternary * distribution) * @param ksTech key switching technique to use (e.g., HYBRID, GHS or BV) * @param ringDim the ring dimension (if not specified selected automatically * based on stdLevel) * @param numLargeDigits the number of big digits to use in HYBRID key * switching * @param firstModSize the bit-length of the first modulus * @param dcrtrBits the size of the moduli in bits * @param relinWindow the relinearization windows (used in BV key switching, * use 0 for RNS decomposition) * @param batchSize the number of slots being used in the ciphertext * @param msMethod mod switch method * @return new context */ static CryptoContext<Element> genCryptoContextBGVrns( usint multiplicativeDepth, usint ptm, SecurityLevel stdLevel = HEStd_128_classic, float stdDev = 3.19, int maxDepth = 2, MODE mode = OPTIMIZED, enum KeySwitchTechnique ksTech = HYBRID, usint ringDim = 0, uint32_t numLargeDigits = 0, usint firstModSize = 0, usint dcrtBits = 0, usint relinWindow = 0, usint batchSize = 0, enum ModSwitchMethod msMethod = AUTO); /** * construct a PALISADE CryptoContextImpl for the StehleSteinfeld Scheme * @param params ring parameters * @param plaintextModulus plaintext modulus * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param stdDev distribution parameter for secret key distribution * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - * sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @return new context */ static CryptoContext<Element> genCryptoContextStehleSteinfeld( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContextImpl for the StehleSteinfeld Scheme * @param params ring parameters * @param encodingParams plaintext encoding parameters * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param stdDev distribution parameter for secret key distribution * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - * sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @return new context */ static CryptoContext<Element> genCryptoContextStehleSteinfeld( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContextImpl for the Null Scheme * @param m cyclotomic order (ring dimension n = m/2 for power-of-two * cyclotomics) * @param plaintextModulus plaintext modulus * @return */ static CryptoContext<Element> genCryptoContextNull( unsigned int m, const PlaintextModulus ptModulus); /** * construct a PALISADE CryptoContextImpl for the Null Scheme * @param m cyclotomic order (ring dimension n = m/2 for power-of-two * cyclotomics) * @param encodingParams plaintext encoding parameters * @return */ static CryptoContext<Element> genCryptoContextNull( unsigned int m, EncodingParams encodingParams); static CryptoContext<Element> DeserializeAndCreateContext( const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); }; } // namespace lbcrypto #endif /* SRC_PKE_CRYPTOCONTEXT_H_ */
// @file cryptocontext.h -- Control for encryption operations. // @author TPOC: [email protected] // // @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT)) // All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. THIS SOFTWARE IS // PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO // EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef SRC_PKE_CRYPTOCONTEXT_H_ #define SRC_PKE_CRYPTOCONTEXT_H_ #include <map> #include <memory> #include <string> #include <utility> #include <vector> #include "palisade.h" #include "scheme/allscheme.h" #include "cryptocontexthelper.h" #include "cryptotiming.h" #include "utils/caller_info.h" #include "utils/serial.h" #include "utils/serialize-binary.h" #include "utils/serialize-json.h" namespace lbcrypto { template <typename Element> class CryptoContextFactory; template <typename Element> class CryptoContextImpl; template <typename Element> using CryptoContext = shared_ptr<CryptoContextImpl<Element>>; /** * @brief CryptoContextImpl * * A CryptoContextImpl is the object used to access the PALISADE library * * All PALISADE functionality is accessed by way of an instance of a * CryptoContextImpl; we say that various objects are "created in" a context, * and can only be used in the context in which they were created * * All PALISADE methods are accessed through CryptoContextImpl methods. Guards * are implemented to make certain that only valid objects that have been * created in the context are used * * Contexts are created using the CryptoContextFactory, and can be serialized * and recovered from a serialization */ template <typename Element> class CryptoContextImpl : public Serializable { using IntType = typename Element::Integer; using ParmType = typename Element::Params; friend class CryptoContextFactory<Element>; protected: // crypto parameters used for this context shared_ptr<LPCryptoParameters<Element>> params; // algorithm used; accesses all crypto methods shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme; static std::map<string, std::vector<LPEvalKey<Element>>>& evalMultKeyMap() { // cached evalmult keys, by secret key UID static std::map<string, std::vector<LPEvalKey<Element>>> s_evalMultKeyMap; return s_evalMultKeyMap; } static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>& evalSumKeyMap() { // cached evalsum keys, by secret key UID static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> s_evalSumKeyMap; return s_evalSumKeyMap; } static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>& evalAutomorphismKeyMap() { // cached evalautomorphism keys, by secret key UID static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> s_evalAutomorphismKeyMap; return s_evalAutomorphismKeyMap; } bool doTiming; vector<TimingInfo>* timeSamples; string m_schemeId; size_t m_keyGenLevel; /** * TypeCheck makes sure that an operation between two ciphertexts is permitted * @param a * @param b */ void TypeCheck(ConstCiphertext<Element> a, ConstCiphertext<Element> b, CALLER_INFO_ARGS_HDR) const { if (a == nullptr || b == nullptr) { std::string errorMsg(std::string("Null Ciphertext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext() != b->GetCryptoContext()) { std::string errorMsg( std::string( "Ciphertexts were not created in the same CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetKeyTag() != b->GetKeyTag()) { std::string errorMsg( std::string("Ciphertexts were not encrypted with same keys") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetEncodingType() != b->GetEncodingType()) { std::stringstream ss; ss << "Ciphertext encoding types " << a->GetEncodingType(); ss << " and " << b->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } /** * TypeCheck makes sure that an operation between two ciphertexts is permitted * This is intended for mutable methods, hence inputs are Ciphretext instead * of ConstCiphertext. * * @param a * @param b */ /* void TypeCheck(Ciphertext<Element> a, Ciphertext<Element> b, CALLER_INFO_ARGS_HDR) const { if (a == nullptr || b == nullptr) { std::string errorMsg(std::string("Null Ciphertext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext() != b->GetCryptoContext()) { std::string errorMsg( std::string("Ciphertexts were not created in the same CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetKeyTag() != b->GetKeyTag()) { std::string errorMsg( std::string("Ciphertexts were not encrypted with same keys") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetEncodingType() != b->GetEncodingType()) { std::stringstream ss; ss << "Ciphertext encoding types " << a->GetEncodingType(); ss << " and " << b->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } */ /** * TypeCheck makes sure that an operation between a ciphertext and a plaintext * is permitted * @param a * @param b */ void TypeCheck(ConstCiphertext<Element> a, ConstPlaintext b, CALLER_INFO_ARGS_HDR) const { if (a == nullptr) { std::string errorMsg(std::string("Null Ciphertext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (b == nullptr) { std::string errorMsg(std::string("Null Plaintext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetEncodingType() != b->GetEncodingType()) { std::stringstream ss; ss << "Ciphertext encoding type " << a->GetEncodingType(); ss << " and Plaintext encoding type " << b->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } /** * TypeCheck makes sure that an operation between two ciphertexts is permitted * @param a * @param b */ void TypeCheck(const RationalCiphertext<Element>& a, const RationalCiphertext<Element>& b, CALLER_INFO_ARGS_HDR) const { if (a.GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetCryptoContext() != b.GetCryptoContext()) { std::string errorMsg( std::string( "Ciphertexts were not created in the same CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetKeyTag() != b.GetKeyTag()) { std::string errorMsg( std::string("Ciphertexts were not encrypted with same keys") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetNumerator()->GetEncodingType() != b.GetNumerator()->GetEncodingType()) { std::stringstream ss; ss << "RationalCiphertext encoding types " << a.GetNumerator()->GetEncodingType(); ss << " and " << b.GetNumerator()->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } /** * TypeCheck makes sure that an operation between a ciphertext and a plaintext * is permitted * @param a * @param b */ void TypeCheck(const RationalCiphertext<Element>& a, ConstPlaintext b, CALLER_INFO_ARGS_HDR) const { if (b == nullptr) { std::string errorMsg(std::string("Null Plaintext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetNumerator()->GetEncodingType() != b->GetEncodingType()) { std::stringstream ss; ss << "RationalCiphertext encoding type " << a.GetNumerator()->GetEncodingType(); ss << " and Plaintext encoding type " << b->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } bool Mismatched(const CryptoContext<Element> a) const { if (a.get() != this) { return true; } return false; } public: LPPrivateKey<Element> privateKey; /** * This stores the private key in the crypto context. * This is only intended for debugging and should not be * used in production systems. Please define DEBUG_KEY in * palisade.h to enable this. * * If used, one can create a key pair and store the secret * key in th crypto context like this: * * auto keys = cc->KeyGen(); * cc->SetPrivateKey(keys.secretKey); * * After that, anyone in the code, one can access the * secret key by getting the crypto context and doing the * following: * * auto sk = cc->GetPrivateKey(); * * This key can be used for decrypting any intermediate * ciphertexts for debugging purposes. * * @param sk the secret key * */ void SetPrivateKey(const LPPrivateKey<Element> sk) { #ifdef DEBUG_KEY cerr << "Warning - SetPrivateKey is only intended to be used for debugging " "purposes - not for production systems." << endl; this->privateKey = sk; #else PALISADE_THROW( not_available_error, "SetPrivateKey is only allowed if DEBUG_KEY is set in palisade.h"); #endif } /** * This gets the private key from the crypto context. * This is only intended for debugging and should not be * used in production systems. Please define DEBUG_KEY in * palisade.h to enable this. * * If used, one can create a key pair and store the secret * key in th crypto context like this: * * auto keys = cc->KeyGen(); * cc->SetPrivateKey(keys.secretKey); * * After that, anyone in the code, one can access the * secret key by getting the crypto context and doing the * following: * * auto sk = cc->GetPrivateKey(); * * This key can be used for decrypting any intermediate * ciphertexts for debugging purposes. * * @return the secret key * */ const LPPrivateKey<Element> GetPrivateKey() { #ifdef DEBUG_KEY return this->privateKey; #else PALISADE_THROW( not_available_error, "GetPrivateKey is only allowed if DEBUG_KEY is set in palisade.h"); #endif } void setSchemeId(string schemeTag) { this->m_schemeId = schemeTag; } string getSchemeId() const { return this->m_schemeId; } /** * CryptoContextImpl constructor from pointers to parameters and scheme * @param params - pointer to CryptoParameters * @param scheme - pointer to Crypto Scheme */ CryptoContextImpl(LPCryptoParameters<Element>* params = nullptr, LPPublicKeyEncryptionScheme<Element>* scheme = nullptr, const string& schemeId = "Not") { this->params.reset(params); this->scheme.reset(scheme); this->doTiming = false; this->timeSamples = 0; this->m_keyGenLevel = 0; this->m_schemeId = schemeId; } /** * CryptoContextImpl constructor from shared pointers to parameters and scheme * @param params - shared pointer to CryptoParameters * @param scheme - sharedpointer to Crypto Scheme */ CryptoContextImpl(shared_ptr<LPCryptoParameters<Element>> params, shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme, const string& schemeId = "Not") { this->params = params; this->scheme = scheme; this->doTiming = false; this->timeSamples = 0; this->m_keyGenLevel = 0; this->m_schemeId = schemeId; } /** * Copy constructor * @param c - source */ CryptoContextImpl(const CryptoContextImpl<Element>& c) { params = c.params; scheme = c.scheme; doTiming = c.doTiming; timeSamples = c.timeSamples; this->m_keyGenLevel = 0; this->m_schemeId = c.m_schemeId; } /** * Assignment * @param rhs - assigning from * @return this */ CryptoContextImpl<Element>& operator=(const CryptoContextImpl<Element>& rhs) { params = rhs.params; scheme = rhs.scheme; doTiming = rhs.doTiming; timeSamples = rhs.timeSamples; m_keyGenLevel = rhs.m_keyGenLevel; m_schemeId = rhs.m_schemeId; return *this; } /** * A CryptoContextImpl is only valid if the shared pointers are both valid */ operator bool() const { return params && scheme; } /** * Private methods to compare two contexts; this is only used internally and * is not generally available * @param a - operand 1 * @param b - operand 2 * @return true if the implementations have identical parms and scheme */ friend bool operator==(const CryptoContextImpl<Element>& a, const CryptoContextImpl<Element>& b) { // Identical if the parameters and the schemes are identical... the exact // same object, OR the same type and the same values if (a.params.get() == b.params.get()) { return true; } else { if (typeid(*a.params.get()) != typeid(*b.params.get())) { return false; } if (*a.params.get() != *b.params.get()) return false; } if (a.scheme.get() == b.scheme.get()) { return true; } else { if (typeid(*a.scheme.get()) != typeid(*b.scheme.get())) { return false; } if (*a.scheme.get() != *b.scheme.get()) return false; } return true; } friend bool operator!=(const CryptoContextImpl<Element>& a, const CryptoContextImpl<Element>& b) { return !(a == b); } // TIMING METHODS /** * StartTiming method activates timing of CryptoMethods * * @param timeSamples points to a vector in which timing samples will be * stored */ void StartTiming(vector<TimingInfo>* timeSamples) { this->timeSamples = timeSamples; doTiming = true; } /* * StopTiming - turns off timing */ void StopTiming() { doTiming = false; } /** * ResumeTiming - re-enables timing with existing TimingInfo vector */ void ResumeTiming() { doTiming = true; } /** * ResetTiming - erases measurements */ void ResetTiming() { this->timeSamples->clear(); } static bool SerializeEvalMultKey(Serialized* serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalMultKey(Serialized* serObj, const string& id) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalMultKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool DeserializeEvalMultKey(Serialized* serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * SerializeEvalMultKey for a single EvalMult key or all EvalMult keys * * @param ser - stream to serialize to * @param sertype - type of serialization * @param id for key to serialize - if empty string, serialize them all * @return true on success */ template <typename ST> static bool SerializeEvalMultKey(std::ostream& ser, const ST& sertype, string id = ""); /** * SerializeEvalMultKey for all EvalMultKeys made in a given context * * @param cc whose keys should be serialized * @param ser - stream to serialize to * @param sertype - type of serialization * @return true on success (false on failure or no keys found) */ template <typename ST> static bool SerializeEvalMultKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) { std::map<string, std::vector<LPEvalKey<Element>>> omap; for (const auto& k : GetAllEvalMultKeys()) { if (k.second[0]->GetCryptoContext() == cc) { omap[k.first] = k.second; } } if (omap.size() == 0) return false; Serial::Serialize(omap, ser, sertype); return true; } /** * DeserializeEvalMultKey deserialize all keys in the serialization * deserialized keys silently replace any existing matching keys * deserialization will create CryptoContextImpl if necessary * * @param serObj - stream with a serialization * @return true on success */ template <typename ST> static bool DeserializeEvalMultKey(std::istream& ser, const ST& sertype) { std::map<string, std::vector<LPEvalKey<Element>>> evalMultKeys; Serial::Deserialize(GetAllEvalMultKeys(), ser, sertype); // The deserialize call created any contexts that needed to be created.... // so all we need to do is put the keys into the maps for their context for (auto k : GetAllEvalMultKeys()) { GetAllEvalMultKeys()[k.first] = k.second; } return true; } /** * ClearEvalMultKeys - flush EvalMultKey cache */ static void ClearEvalMultKeys(); /** * ClearEvalMultKeys - flush EvalMultKey cache for a given id * @param id */ static void ClearEvalMultKeys(const string& id); /** * ClearEvalMultKeys - flush EvalMultKey cache for a given context * @param cc */ static void ClearEvalMultKeys(const CryptoContext<Element> cc); /** * InsertEvalMultKey - add the given vector of keys to the map, replacing the * existing vector if there * @param vectorToInsert */ static void InsertEvalMultKey( const std::vector<LPEvalKey<Element>>& vectorToInsert); static bool SerializeEvalSumKey(Serialized* serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalSumKey(Serialized* serObj, const string& id) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalSumKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool DeserializeEvalSumKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * SerializeEvalSumKey for a single EvalSum key or all of the EvalSum keys * * @param ser - stream to serialize to * @param sertype - type of serialization * @param id - key to serialize; empty string means all keys * @return true on success */ template <typename ST> static bool SerializeEvalSumKey(std::ostream& ser, const ST& sertype, string id = "") { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>* smap; std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> omap; if (id.length() == 0) { smap = &GetAllEvalSumKeys(); } else { auto k = GetAllEvalSumKeys().find(id); if (k == GetAllEvalSumKeys().end()) return false; // no such id smap = &omap; omap[k->first] = k->second; } Serial::Serialize(*smap, ser, sertype); return true; } /** * SerializeEvalSumKey for all of the EvalSum keys for a context * * @param ser - stream to serialize to * @param sertype - type of serialization * @param cc - context * @return true on success */ template <typename ST> static bool SerializeEvalSumKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> omap; for (const auto& k : GetAllEvalSumKeys()) { if (k.second->begin()->second->GetCryptoContext() == cc) { omap[k.first] = k.second; } } if (omap.size() == 0) return false; Serial::Serialize(omap, ser, sertype); return true; } /** * DeserializeEvalSumKey deserialize all keys in the serialization * deserialized keys silently replace any existing matching keys * deserialization will create CryptoContextImpl if necessary * * @param ser - stream to serialize from * @param sertype - type of serialization * @return true on success */ template <typename ST> static bool DeserializeEvalSumKey(std::istream& ser, const ST& sertype) { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> evalSumKeys; Serial::Deserialize(evalSumKeys, ser, sertype); // The deserialize call created any contexts that needed to be created.... // so all we need to do is put the keys into the maps for their context for (auto k : evalSumKeys) { GetAllEvalSumKeys()[k.first] = k.second; } return true; } /** * ClearEvalSumKeys - flush EvalSumKey cache */ static void ClearEvalSumKeys(); /** * ClearEvalSumKeys - flush EvalSumKey cache for a given id * @param id */ static void ClearEvalSumKeys(const string& id); /** * ClearEvalSumKeys - flush EvalSumKey cache for a given context * @param cc */ static void ClearEvalSumKeys(const CryptoContext<Element> cc); /** * InsertEvalSumKey - add the given map of keys to the map, replacing the * existing map if there * @param mapToInsert */ static void InsertEvalSumKey( const shared_ptr<std::map<usint, LPEvalKey<Element>>> mapToInsert); static bool SerializeEvalAutomorphismKey(Serialized* serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalAutomorphismKey(Serialized* serObj, const string& id) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalAutomorphismKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool DeserializeEvalAutomorphismKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * SerializeEvalAutomorphismKey for a single EvalAuto key or all of the * EvalAuto keys * * @param ser - stream to serialize to * @param sertype - type of serialization * @param id - key to serialize; empty string means all keys * @return true on success */ template <typename ST> static bool SerializeEvalAutomorphismKey(std::ostream& ser, const ST& sertype, string id = "") { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>* smap; std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> omap; if (id.length() == 0) { smap = &GetAllEvalAutomorphismKeys(); } else { auto k = GetAllEvalAutomorphismKeys().find(id); if (k == GetAllEvalAutomorphismKeys().end()) return false; // no such id smap = &omap; omap[k->first] = k->second; } Serial::Serialize(*smap, ser, sertype); return true; } /** * SerializeEvalAutomorphismKey for all of the EvalAuto keys for a context * * @param ser - stream to serialize to * @param sertype - type of serialization * @param cc - context * @return true on success */ template <typename ST> static bool SerializeEvalAutomorphismKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> omap; for (const auto& k : GetAllEvalAutomorphismKeys()) { if (k.second->begin()->second->GetCryptoContext() == cc) { omap[k.first] = k.second; } } if (omap.size() == 0) return false; Serial::Serialize(omap, ser, sertype); return true; } /** * DeserializeEvalAutomorphismKey deserialize all keys in the serialization * deserialized keys silently replace any existing matching keys * deserialization will create CryptoContextImpl if necessary * * @param ser - stream to serialize from * @param sertype - type of serialization * @return true on success */ template <typename ST> static bool DeserializeEvalAutomorphismKey(std::istream& ser, const ST& sertype) { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> evalSumKeys; Serial::Deserialize(evalSumKeys, ser, sertype); // The deserialize call created any contexts that needed to be created.... // so all we need to do is put the keys into the maps for their context for (auto k : evalSumKeys) { GetAllEvalAutomorphismKeys()[k.first] = k.second; } return true; } /** * ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache */ static void ClearEvalAutomorphismKeys(); /** * ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache for a given id * @param id */ static void ClearEvalAutomorphismKeys(const string& id); /** * ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache for a given * context * @param cc */ static void ClearEvalAutomorphismKeys(const CryptoContext<Element> cc); /** * InsertEvalAutomorphismKey - add the given map of keys to the map, replacing * the existing map if there * @param mapToInsert */ static void InsertEvalAutomorphismKey( const shared_ptr<std::map<usint, LPEvalKey<Element>>> mapToInsert); // TURN FEATURES ON /** * Enable a particular feature for use with this CryptoContextImpl * @param feature - the feature that should be enabled */ void Enable(PKESchemeFeature feature) { scheme->Enable(feature); } /** * Enable several features at once * @param featureMask - bitwise or of several PKESchemeFeatures */ void Enable(usint featureMask) { scheme->Enable(featureMask); } // GETTERS /** * Getter for Scheme * @return scheme */ const shared_ptr<LPPublicKeyEncryptionScheme<Element>> GetEncryptionAlgorithm() const { return scheme; } /** * Getter for CryptoParams * @return params */ const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return params; } size_t GetKeyGenLevel() const { return m_keyGenLevel; } void SetKeyGenLevel(size_t level) { m_keyGenLevel = level; } /** * Getter for element params * @return */ const shared_ptr<ParmType> GetElementParams() const { return params->GetElementParams(); } /** * Getter for encoding params * @return */ const EncodingParams GetEncodingParams() const { return params->GetEncodingParams(); } /** * Get the cyclotomic order used for this context * * @return */ usint GetCyclotomicOrder() const { return params->GetElementParams()->GetCyclotomicOrder(); } /** * Get the ring dimension used for this context * * @return */ usint GetRingDimension() const { return params->GetElementParams()->GetRingDimension(); } /** * Get the ciphertext modulus used for this context * * @return */ const IntType& GetModulus() const { return params->GetElementParams()->GetModulus(); } /** * Get the ciphertext modulus used for this context * * @return */ const IntType& GetRootOfUnity() const { return params->GetElementParams()->GetRootOfUnity(); } /** * KeyGen generates a key pair using this algorithm's KeyGen method * @return a public/secret key pair */ LPKeyPair<Element> KeyGen() { TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->KeyGen( CryptoContextFactory<Element>::GetContextForPointer(this), false); if (doTiming) { timeSamples->push_back(TimingInfo(OpKeyGen, TOC_US(t))); } return r; } /** * Threshold FHE: Generation of a public key derived * from a previous joined public key (for prior secret shares) and the secret * key share of the current party. * * @param pk joined public key from prior parties. * @param makeSparse set to true if ring reduce by a factor of 2 is to be * used. NOT SUPPORTED BY ANY SCHEME ANYMORE. * @param fresh set to true if proxy re-encryption is used in the multi-party * protocol or star topology is used * @return key pair including the secret share for the current party and * joined public key */ LPKeyPair<Element> MultipartyKeyGen(const LPPublicKey<Element> pk, bool makeSparse = false, bool fresh = false) { if (!pk) PALISADE_THROW(config_error, "Input public key is empty"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultipartyKeyGen( CryptoContextFactory<Element>::GetContextForPointer(this), pk, makeSparse, fresh); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiPartyKeyGenKey, TOC_US(t))); } return r; } /** * Threshold FHE: Generates a public key from a vector of secret shares. * ONLY FOR DEBUGGIN PURPOSES. SHOULD NOT BE USED IN PRODUCTION. * * @param secretkeys secrete key shares. * @return key pair including the private for the current party and joined * public key */ LPKeyPair<Element> MultipartyKeyGen( const vector<LPPrivateKey<Element>>& secretKeys) { if (!secretKeys.size()) PALISADE_THROW(config_error, "Input private key vector is empty"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultipartyKeyGen( CryptoContextFactory<Element>::GetContextForPointer(this), secretKeys, false); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiPartyKeyGenKeyvec, TOC_US(t))); } return r; } /** * Threshold FHE: Method for decryption operation run by the lead decryption * client * * @param privateKey secret key share used for decryption. * @param ciphertext ciphertext id decrypted. */ vector<Ciphertext<Element>> MultipartyDecryptLead( const LPPrivateKey<Element> privateKey, const vector<Ciphertext<Element>>& ciphertext) const { if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to MultipartyDecryptLead was not " "generated with this crypto context"); vector<Ciphertext<Element>> newCiphertext; TimeVar t; if (doTiming) TIC(t); for (size_t i = 0; i < ciphertext.size(); i++) { if (ciphertext[i] == nullptr || Mismatched(ciphertext[i]->GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to MultipartyDecryptLead was not " "generated with this crypto context"); newCiphertext.push_back(GetEncryptionAlgorithm()->MultipartyDecryptLead( privateKey, ciphertext[i])); } if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiPartyDecryptLead, TOC_US(t))); } return newCiphertext; } /** * Threshold FHE: "Partial" decryption computed by all parties except for the * lead one * * @param privateKey secret key share used for decryption. * @param ciphertext ciphertext that is being decrypted. */ vector<Ciphertext<Element>> MultipartyDecryptMain( const LPPrivateKey<Element> privateKey, const vector<Ciphertext<Element>>& ciphertext) const { if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to MultipartyDecryptMain was not " "generated with this crypto context"); vector<Ciphertext<Element>> newCiphertext; TimeVar t; if (doTiming) TIC(t); for (size_t i = 0; i < ciphertext.size(); i++) { if (ciphertext[i] == nullptr || Mismatched(ciphertext[i]->GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to MultipartyDecryptMain was not " "generated with this crypto context"); newCiphertext.push_back(GetEncryptionAlgorithm()->MultipartyDecryptMain( privateKey, ciphertext[i])); } if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiPartyDecryptMain, TOC_US(t))); } return newCiphertext; } /** * Threshold FHE: Method for combining the partially decrypted ciphertexts * and getting the final decryption in the clear. * * @param &partialCiphertextVec vector of "partial" decryptions. * @param *plaintext the plaintext output. * @return the decoding result. */ DecryptResult MultipartyDecryptFusion( const vector<Ciphertext<Element>>& partialCiphertextVec, Plaintext* plaintext) const; /** * Threshold FHE: Generates a joined evaluation key * from the current secret share and a prior joined * evaluation key * * @param originalPrivateKey secret key transformed from. * @param newPrivateKey secret key transformed to. * @param ek the prior joined evaluation key. * @return the new joined evaluation key. */ LPEvalKey<Element> MultiKeySwitchGen( const LPPrivateKey<Element> originalPrivateKey, const LPPrivateKey<Element> newPrivateKey, const LPEvalKey<Element> ek) const { if (!originalPrivateKey) PALISADE_THROW(config_error, "Input first private key is nullptr"); if (!newPrivateKey) PALISADE_THROW(config_error, "Input second private key is nullptr"); if (!ek) PALISADE_THROW(config_error, "Input evaluation key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiKeySwitchGen(originalPrivateKey, newPrivateKey, ek); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiKeySwitchGen, TOC_US(t))); } return r; } /** * Threshold FHE: Generates joined automorphism keys * from the current secret share and prior joined * automorphism keys * * @param privateKey secret key share. * @param eAuto a dictionary with prior joined automorphism keys. * @param &indexList a vector of automorphism indices. * @param keyId - new key identifier used for the resulting evaluation key * @return a dictionary with new joined automorphism keys. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiEvalAutomorphismKeyGen( const LPPrivateKey<Element> privateKey, const shared_ptr<std::map<usint, LPEvalKey<Element>>> eAuto, const std::vector<usint>& indexList, const std::string& keyId = "") { if (!privateKey) PALISADE_THROW(config_error, "Input private key is nullptr"); if (!eAuto) PALISADE_THROW(config_error, "Input evaluation key map is nullptr"); if (!indexList.size()) PALISADE_THROW(config_error, "Input index vector is empty"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiEvalAutomorphismKeyGen( privateKey, eAuto, indexList, keyId); if (doTiming) { timeSamples->push_back( TimingInfo(OpMultiEvalAutomorphismKeyGen, TOC_US(t))); } return r; } /** * Threshold FHE: Generates joined rotation keys * from the current secret share and prior joined * rotation keys * * @param privateKey secret key share. * @param eAuto a dictionary with prior joined rotation keys. * @param &indexList a vector of rotation indices. * @param keyId - new key identifier used for the resulting evaluation key * @return a dictionary with new joined rotation keys. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiEvalAtIndexKeyGen( const LPPrivateKey<Element> privateKey, const shared_ptr<std::map<usint, LPEvalKey<Element>>> eAuto, const std::vector<int32_t>& indexList, const std::string& keyId = "") { if (!privateKey) PALISADE_THROW(config_error, "Input private key is nullptr"); if (!eAuto) PALISADE_THROW(config_error, "Input evaluation key map is nullptr"); if (!indexList.size()) PALISADE_THROW(config_error, "Input index vector is empty"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiEvalAtIndexKeyGen(privateKey, eAuto, indexList, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiEvalAtIndexKeyGen, TOC_US(t))); } return r; } /** * Threshold FHE: Generates joined summation evaluation keys * from the current secret share and prior joined * summation keys * * @param privateKey secret key share. * @param eSum a dictionary with prior joined summation keys. * @param keyId - new key identifier used for the resulting evaluation key * @return new joined summation keys. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiEvalSumKeyGen( const LPPrivateKey<Element> privateKey, const shared_ptr<std::map<usint, LPEvalKey<Element>>> eSum, const std::string& keyId = "") { if (!privateKey) PALISADE_THROW(config_error, "Input private key is nullptr"); if (!eSum) PALISADE_THROW(config_error, "Input evaluation key map is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiEvalSumKeyGen(privateKey, eSum, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiEvalSumKeyGen, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two prior evaluation keys * * @param a first evaluation key. * @param b second evaluation key. * @param keyId - new key identifier used for the resulting evaluation key * @return the new joined key. */ LPEvalKey<Element> MultiAddEvalKeys(LPEvalKey<Element> a, LPEvalKey<Element> b, const std::string& keyId = "") { if (!a) PALISADE_THROW(config_error, "Input first evaluation key is nullptr"); if (!b) PALISADE_THROW(config_error, "Input second evaluation key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddEvalKeys(a, b, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiAddEvalKeys, TOC_US(t))); } return r; } /** * Threshold FHE: Generates a partial evaluation key for homomorphic * multiplication based on the current secret share and an existing partial * evaluation key * * @param evalKey prior evaluation key. * @param sk current secret share. * @param keyId - new key identifier used for the resulting evaluation key * @return the new joined key. */ LPEvalKey<Element> MultiMultEvalKey(LPEvalKey<Element> evalKey, LPPrivateKey<Element> sk, const std::string& keyId = "") { if (!evalKey) PALISADE_THROW(config_error, "Input evaluation key is nullptr"); if (!sk) PALISADE_THROW(config_error, "Input private key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiMultEvalKey(evalKey, sk, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiMultEvalKey, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two prior evaluation key sets for summation * * @param es1 first summation key set. * @param es2 second summation key set. * @param keyId - new key identifier used for the resulting evaluation key * @return the new joined key set for summation. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiAddEvalSumKeys( const shared_ptr<std::map<usint, LPEvalKey<Element>>> es1, const shared_ptr<std::map<usint, LPEvalKey<Element>>> es2, const std::string& keyId = "") { if (!es1) PALISADE_THROW(config_error, "Input first evaluation key map is nullptr"); if (!es2) PALISADE_THROW(config_error, "Input second evaluation key map is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddEvalSumKeys(es1, es2, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiAddEvalSumKeys, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two prior evaluation key sets for automorphisms * * @param es1 first automorphism key set. * @param es2 second automorphism key set. * @param keyId - new key identifier used for the resulting evaluation key. * @return the new joined key set for summation. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiAddEvalAutomorphismKeys( const shared_ptr<std::map<usint, LPEvalKey<Element>>> es1, const shared_ptr<std::map<usint, LPEvalKey<Element>>> es2, const std::string& keyId = "") { if (!es1) PALISADE_THROW(config_error, "Input first evaluation key map is nullptr"); if (!es2) PALISADE_THROW(config_error, "Input second evaluation key map is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddEvalAutomorphismKeys(es1, es2, keyId); if (doTiming) { timeSamples->push_back( TimingInfo(OpMultiAddEvalAutomorphismKeys, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two partial public keys * * @param pubKey1 first public key. * @param pubKey2 second public key. * @param keyId - new key identifier used for the resulting evaluation key. * @return the new joined key. */ LPPublicKey<Element> MultiAddPubKeys(LPPublicKey<Element> pubKey1, LPPublicKey<Element> pubKey2, const std::string& keyId = "") { if (!pubKey1) PALISADE_THROW(config_error, "Input first public key is nullptr"); if (!pubKey2) PALISADE_THROW(config_error, "Input second public key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddPubKeys(pubKey1, pubKey2, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiAddPubKeys, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two partial evaluation keys for multiplication * * @param evalKey1 first evaluation key. * @param evalKey2 second evaluation key. * @param keyId - new key identifier used for the resulting evaluation key. * @return the new joined key. */ LPEvalKey<Element> MultiAddEvalMultKeys(LPEvalKey<Element> evalKey1, LPEvalKey<Element> evalKey2, const std::string& keyId = "") { if (!evalKey1) PALISADE_THROW(config_error, "Input first evaluation key is nullptr"); if (!evalKey2) PALISADE_THROW(config_error, "Input second evaluation key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddEvalMultKeys(evalKey1, evalKey2, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiAddEvalMultKeys, TOC_US(t))); } return r; } /** * SparseKeyGen generates a key pair with special structure, and without full * entropy, for use in special cases like Ring Reduction * @return a public/secret key pair */ LPKeyPair<Element> SparseKeyGen() { TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->KeyGen( CryptoContextFactory<Element>::GetContextForPointer(this), true); if (doTiming) { timeSamples->push_back(TimingInfo(OpSparseKeyGen, TOC_US(t))); } return r; } /** * ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption * @param newKey (public) * @param oldKey (private) * @return new evaluation key */ LPEvalKey<Element> ReKeyGen(const LPPublicKey<Element> newKey, const LPPrivateKey<Element> oldKey) const { if (newKey == nullptr || oldKey == nullptr || Mismatched(newKey->GetCryptoContext()) || Mismatched(oldKey->GetCryptoContext())) PALISADE_THROW(config_error, "Keys passed to ReKeyGen were not generated with this " "crypto context"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->ReKeyGen(newKey, oldKey); if (doTiming) { timeSamples->push_back(TimingInfo(OpReKeyGenPubPri, TOC_US(t))); } return r; } /** * ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption * NOTE this functionality has been completely removed from PALISADE * @param newKey (private) * @param oldKey (private) * @return new evaluation key */ LPEvalKey<Element> ReKeyGen(const LPPrivateKey<Element> newKey, const LPPrivateKey<Element> oldKey) const __attribute__((deprecated("functionality removed from PALISADE"))); /** * EvalMultKeyGen creates a key that can be used with the PALISADE EvalMult * operator * the new evaluation key is stored in cryptocontext * @param key */ void EvalMultKeyGen(const LPPrivateKey<Element> key); /** * EvalMultsKeyGen creates a vector evalmult keys that can be used with the * PALISADE EvalMult operator 1st key (for s^2) is used for multiplication of * ciphertexts of depth 1 2nd key (for s^3) is used for multiplication of * ciphertexts of depth 2, etc. * a vector of new evaluation keys is stored in crytpocontext * * @param key */ void EvalMultKeysGen(const LPPrivateKey<Element> key); /** * GetEvalMultKeyVector fetches the eval mult keys for a given KeyID * @param keyID * @return key vector from ID */ static const vector<LPEvalKey<Element>>& GetEvalMultKeyVector( const string& keyID); /** * GetEvalMultKeys * @return map of all the keys */ static std::map<string, std::vector<LPEvalKey<Element>>>& GetAllEvalMultKeys(); /** * KeySwitchGen creates a key that can be used with the PALISADE KeySwitch * operation * @param key1 * @param key2 * @return new evaluation key */ LPEvalKey<Element> KeySwitchGen(const LPPrivateKey<Element> key1, const LPPrivateKey<Element> key2) const { if (key1 == nullptr || key2 == nullptr || Mismatched(key1->GetCryptoContext()) || Mismatched(key2->GetCryptoContext())) PALISADE_THROW(config_error, "Keys passed to KeySwitchGen were not generated with this " "crypto context"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->KeySwitchGen(key1, key2); if (doTiming) { timeSamples->push_back(TimingInfo(OpKeySwitchGen, TOC_US(t))); } return r; } /** * Encrypt a plaintext using a given public key * @param publicKey * @param plaintext * @return ciphertext (or null on failure) */ Ciphertext<Element> Encrypt(const LPPublicKey<Element> publicKey, Plaintext plaintext) { if (publicKey == nullptr) PALISADE_THROW(type_error, "null key passed to Encrypt"); if (plaintext == nullptr) PALISADE_THROW(type_error, "Input plaintext is nullptr"); if (Mismatched(publicKey->GetCryptoContext())) PALISADE_THROW( config_error, "key passed to Encrypt was not generated with this crypto context"); TimeVar t; if (doTiming) TIC(t); Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt( publicKey, plaintext->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType(plaintext->GetEncodingType()); ciphertext->SetScalingFactor(plaintext->GetScalingFactor()); ciphertext->SetDepth(plaintext->GetDepth()); ciphertext->SetLevel(plaintext->GetLevel()); } if (doTiming) { timeSamples->push_back(TimingInfo(OpEncryptPub, TOC_US(t))); } return ciphertext; } /** * Encrypt a plaintext using a given private key * @param privateKey * @param plaintext * @return ciphertext (or null on failure) */ Ciphertext<Element> Encrypt(const LPPrivateKey<Element> privateKey, Plaintext plaintext) const { if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW( config_error, "key passed to Encrypt was not generated with this crypto context"); if (plaintext == nullptr) PALISADE_THROW(type_error, "Input plaintext is nullptr"); TimeVar t; if (doTiming) TIC(t); Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt( privateKey, plaintext->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType(plaintext->GetEncodingType()); ciphertext->SetScalingFactor(plaintext->GetScalingFactor()); ciphertext->SetDepth(plaintext->GetDepth()); ciphertext->SetLevel(plaintext->GetLevel()); } if (doTiming) { timeSamples->push_back(TimingInfo(OpEncryptPriv, TOC_US(t))); } return ciphertext; } /** * Encrypt a matrix of Plaintext * @param publicKey - for encryption * @param plaintext - to encrypt * @param doEncryption encrypts if true, embeds (encodes) the plaintext into * cryptocontext if false * @return a vector of pointers to Ciphertexts created by encrypting the * plaintext */ shared_ptr<Matrix<RationalCiphertext<Element>>> EncryptMatrix( const LPPublicKey<Element> publicKey, Matrix<Plaintext>& plaintext) { if (publicKey == nullptr || Mismatched(publicKey->GetCryptoContext())) PALISADE_THROW(config_error, "key passed to EncryptMatrix was not generated with this " "crypto context"); auto zeroAlloc = [=]() { return RationalCiphertext<Element>(publicKey->GetCryptoContext(), true); }; auto cipherResults = std::make_shared<Matrix<RationalCiphertext<Element>>>( zeroAlloc, plaintext.GetRows(), plaintext.GetCols()); TimeVar t; if (doTiming) TIC(t); for (size_t row = 0; row < plaintext.GetRows(); row++) { for (size_t col = 0; col < plaintext.GetCols(); col++) { if (plaintext(row, col)->Encode() == false) return 0; Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt( publicKey, plaintext(row, col)->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType(plaintext(row, col)->GetEncodingType()); } (*cipherResults)(row, col).SetNumerator(ciphertext); } } if (doTiming) { timeSamples->push_back(TimingInfo(OpEncryptMatrixPlain, TOC_US(t))); } return cipherResults; } /** * Encrypt a matrix of Plaintext * @param publicKey - for encryption * @param plaintext - to encrypt * @param doEncryption encrypts if true, embeds (encodes) the plaintext into * cryptocontext if false * @return a vector of pointers to Ciphertexts created by encrypting the * plaintext */ Matrix<Ciphertext<Element>> EncryptMatrixCiphertext( const LPPublicKey<Element> publicKey, Matrix<Plaintext>& plaintext) { if (publicKey == nullptr || Mismatched(publicKey->GetCryptoContext())) PALISADE_THROW(config_error, "key passed to EncryptMatrix was not generated with this " "crypto context"); auto zeroAlloc = [=]() { return Ciphertext<Element>(std::make_shared<CiphertextImpl<Element>>( publicKey->GetCryptoContext())); }; Matrix<Ciphertext<Element>> cipherResults(zeroAlloc, plaintext.GetRows(), plaintext.GetCols()); TimeVar t; if (doTiming) TIC(t); for (size_t row = 0; row < plaintext.GetRows(); row++) { for (size_t col = 0; col < plaintext.GetCols(); col++) { if (plaintext(row, col)->Encode() == false) PALISADE_THROW(math_error, "Plaintext is not encoded"); Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt( publicKey, plaintext(row, col)->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType(plaintext(row, col)->GetEncodingType()); } cipherResults(row, col) = (ciphertext); } } if (doTiming) { timeSamples->push_back(TimingInfo(OpEncryptMatrixPlain, TOC_US(t))); } return cipherResults; } /** * Perform an encryption by reading plaintext from a stream, serializing each * piece of ciphertext, and writing the serializations to an output stream * @param publicKey - the encryption key in use * @param instream - where to read the input from * @param ostream - where to write the serialization to * @param doEncryption encrypts if true, embeds (encodes) the plaintext into * cryptocontext if false * @return */ void EncryptStream(const LPPublicKey<Element> publicKey, std::istream& instream, std::ostream& outstream) const __attribute__(( deprecated("serialization changed, see wiki for details"))); // PLAINTEXT FACTORY METHODS // FIXME to be deprecated in 2.0 /** * MakeScalarPlaintext constructs a ScalarEncoding in this context * @param value * @param isSigned * @return plaintext */ Plaintext MakeScalarPlaintext(int64_t value) const { auto p = PlaintextFactory::MakePlaintext(Scalar, this->GetElementParams(), this->GetEncodingParams(), value); return p; } /** * MakeStringPlaintext constructs a StringEncoding in this context * @param str * @return plaintext */ Plaintext MakeStringPlaintext(const string& str) const { auto p = PlaintextFactory::MakePlaintext(String, this->GetElementParams(), this->GetEncodingParams(), str); return p; } /** * MakeIntegerPlaintext constructs an IntegerEncoding in this context * @param value * @return plaintext */ Plaintext MakeIntegerPlaintext(int64_t value) const { auto p = PlaintextFactory::MakePlaintext(Integer, this->GetElementParams(), this->GetEncodingParams(), value); return p; } /** * MakeIntegerPlaintext constructs a FractionalEncoding in this context * @param value * @param truncatedBits limit on fractional * @return plaintext */ Plaintext MakeFractionalPlaintext(int64_t value, size_t truncatedBits = 0) const { auto p = PlaintextFactory::MakePlaintext( Fractional, this->GetElementParams(), this->GetEncodingParams(), value, truncatedBits); return p; } /** * MakeCoefPackedPlaintext constructs a CoefPackedEncoding in this context * @param value * @return plaintext */ Plaintext MakeCoefPackedPlaintext(const vector<int64_t>& value) const { auto p = PlaintextFactory::MakePlaintext( CoefPacked, this->GetElementParams(), this->GetEncodingParams(), value); return p; } /** * MakePackedPlaintext constructs a PackedEncoding in this context * @param value * @return plaintext */ Plaintext MakePackedPlaintext(const vector<int64_t>& value) const { auto p = PlaintextFactory::MakePlaintext(Packed, this->GetElementParams(), this->GetEncodingParams(), value); return p; } /** * MakePlaintext static that takes a cc and calls the Plaintext Factory * @param encoding * @param cc * @param value * @return */ template <typename Value1> static Plaintext MakePlaintext(PlaintextEncodings encoding, CryptoContext<Element> cc, const Value1& value) { return PlaintextFactory::MakePlaintext(encoding, cc->GetElementParams(), cc->GetEncodingParams(), value); } template <typename Value1, typename Value2> static Plaintext MakePlaintext(PlaintextEncodings encoding, CryptoContext<Element> cc, const Value1& value, const Value2& value2) { return PlaintextFactory::MakePlaintext(encoding, cc->GetElementParams(), cc->GetEncodingParams(), value, value2); } /** * COMPLEX ARITHMETIC IS NOT AVAILABLE STARTING WITH 1.10.6, * AND THIS METHOD BE DEPRECATED. USE THE REAL-NUMBER METHOD INSTEAD. * MakeCKKSPackedPlaintext constructs a CKKSPackedEncoding in this context * from a vector of complex numbers * @param value - input vector * @paran depth - depth used to encode the vector * @param level - level at each the vector will get encrypted * @param params - parameters to be usef for the ciphertext * @return plaintext */ virtual Plaintext MakeCKKSPackedPlaintext( const std::vector<std::complex<double>>& value, size_t depth = 1, uint32_t level = 0, const shared_ptr<ParmType> params = nullptr) const { Plaintext p; const auto cryptoParamsCKKS = std::dynamic_pointer_cast<LPCryptoParametersCKKS<DCRTPoly>>( this->GetCryptoParameters()); double scFact = cryptoParamsCKKS->GetScalingFactorOfLevel(level); if (params == nullptr) { shared_ptr<ILDCRTParams<DCRTPoly::Integer>> elemParamsPtr; if (level != 0) { ILDCRTParams<DCRTPoly::Integer> elemParams = *(cryptoParamsCKKS->GetElementParams()); for (uint32_t i = 0; i < level; i++) { elemParams.PopLastParam(); } elemParamsPtr = std::make_shared<ILDCRTParams<DCRTPoly::Integer>>(elemParams); } else { elemParamsPtr = cryptoParamsCKKS->GetElementParams(); } p = Plaintext(std::make_shared<CKKSPackedEncoding>( elemParamsPtr, this->GetEncodingParams(), value, depth, level, scFact)); } else { p = Plaintext(std::make_shared<CKKSPackedEncoding>( params, this->GetEncodingParams(), value, depth, level, scFact)); } p->Encode(); return p; } /** * MakeCKKSPackedPlaintext constructs a CKKSPackedEncoding in this context * from a vector of real numbers * @param value - input vector * @paran depth - depth used to encode the vector * @param level - level at each the vector will get encrypted * @param params - parameters to be usef for the ciphertext * @return plaintext */ virtual Plaintext MakeCKKSPackedPlaintext( const std::vector<double>& value, size_t depth = 1, uint32_t level = 0, const shared_ptr<ParmType> params = nullptr) const { std::vector<std::complex<double>> complexValue(value.size()); std::transform(value.begin(), value.end(), complexValue.begin(), [](double da) { return std::complex<double>(da); }); return MakeCKKSPackedPlaintext(complexValue, depth, level, params); } /** * GetPlaintextForDecrypt returns a new Plaintext to be used in decryption. * * @param pte Type of plaintext we want to return * @param evp Element parameters * @param ep Encoding parameters * @return plaintext */ static Plaintext GetPlaintextForDecrypt(PlaintextEncodings pte, shared_ptr<ParmType> evp, EncodingParams ep); public: /** * Decrypt a single ciphertext into the appropriate plaintext * * @param privateKey - decryption key * @param ciphertext - ciphertext to decrypt * @param plaintext - resulting plaintext object pointer is here * @return */ DecryptResult Decrypt(const LPPrivateKey<Element> privateKey, ConstCiphertext<Element> ciphertext, Plaintext* plaintext); /** * Decrypt method for a matrix of ciphertexts * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrix( const LPPrivateKey<Element> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, shared_ptr<Matrix<Plaintext>>* numerator, shared_ptr<Matrix<Plaintext>>* denominator) const { if (ciphertext == nullptr) PALISADE_THROW(type_error, "Input ciphertext is nullptr"); if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to DecryptMatrix was not generated " "with this crypto context"); // edge case if ((ciphertext->GetCols() == 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); const Ciphertext<Element> ctN = (*ciphertext)(0, 0).GetNumerator(); // need to build matrices for the result Plaintext ptx = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); auto zeroPackingAlloc = [=]() { return Plaintext(ptx); }; *numerator = std::make_shared<Matrix<Plaintext>>( zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()); *denominator = std::make_shared<Matrix<Plaintext>>( zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()); TimeVar t; if (doTiming) TIC(t); for (size_t row = 0; row < ciphertext->GetRows(); row++) { for (size_t col = 0; col < ciphertext->GetCols(); col++) { if (Mismatched((*ciphertext)(row, col).GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not " "generated with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(row, col).GetNumerator(); // determine which type of plaintext that you need to decrypt into Plaintext decryptedNumerator = GetPlaintextForDecrypt( ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt( privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); if (resultN.isValid == false) return resultN; (**numerator)(row, col) = decryptedNumerator; (**numerator)(row, col)->Decode(); Plaintext decryptedDenominator = GetPlaintextForDecrypt( ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); if ((*ciphertext)(row, col).GetIntegerFlag() == true) { decryptedDenominator->GetElement<Poly>().SetValuesToZero(); decryptedDenominator->GetElement<Poly>().at(0) = 1; } else { const Ciphertext<Element> ctD = (*ciphertext)(row, col).GetDenominator(); DecryptResult resultD = GetEncryptionAlgorithm()->Decrypt( privateKey, ctD, &decryptedDenominator->GetElement<NativePoly>()); if (resultD.isValid == false) return resultD; (**denominator)(row, col) = decryptedDenominator; } (**denominator)(row, col)->Decode(); } } if (doTiming) { timeSamples->push_back(TimingInfo(OpDecryptMatrixPlain, TOC_US(t))); } return DecryptResult( (**numerator)((*numerator)->GetRows() - 1, (*numerator)->GetCols() - 1) ->GetLength()); } /** * Decrypt method for a matrix of ciphertexts * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrixCiphertext( const LPPrivateKey<Element> privateKey, const Matrix<Ciphertext<Element>> ciphertext, Matrix<Plaintext>* numerator) const { if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to DecryptMatrix was not generated " "with this crypto context"); // edge case if ((ciphertext.GetCols() == 0) && (ciphertext.GetRows() == 0)) return DecryptResult(); const Ciphertext<Element> ctN = (ciphertext)(0, 0); // need to build matrices for the result // Plaintext ptx = // GetPlaintextForDecrypt(ctN->GetEncodingType(), // this->GetElementParams(), // this->GetEncodingParams()); // auto zeroPackingAlloc = [=]() { return Plaintext(ptx); }; // numerator = std::make_shared<Matrix<Plaintext>>(zeroPackingAlloc, // ciphertext.GetRows(), // ciphertext.GetCols()); TimeVar t; if (doTiming) TIC(t); for (size_t row = 0; row < ciphertext.GetRows(); row++) { for (size_t col = 0; col < ciphertext.GetCols(); col++) { if (Mismatched((ciphertext(row, col))->GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not " "generated with this crypto context"); const Ciphertext<Element> ctN = (ciphertext)(row, col); // determine which type of plaintext that you need to decrypt into Plaintext decryptedNumerator = GetPlaintextForDecrypt( ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt( privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); if (resultN.isValid == false) return resultN; (*numerator)(row, col) = decryptedNumerator; (*numerator)(row, col)->Decode(); } } if (doTiming) { timeSamples->push_back(TimingInfo(OpDecryptMatrixPlain, TOC_US(t))); } return DecryptResult( (*numerator)(numerator->GetRows() - 1, numerator->GetCols() - 1) ->GetLength()); } /** * Decrypt method for numerators in a matrix of ciphertexts (packed encoding) * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrixNumerator( const LPPrivateKey<Element> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, shared_ptr<Matrix<Plaintext>>* numerator) const { if (ciphertext == nullptr) PALISADE_THROW(type_error, "Input ciphertext is nullptr"); if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to DecryptMatrix was not generated " "with this crypto context"); // edge case if ((ciphertext->GetCols() == 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); TimeVar t; if (doTiming) TIC(t); // force all precomputations to take place in advance if (Mismatched((*ciphertext)(0, 0).GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not generated " "with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(0, 0).GetNumerator(); // need to build a numerator matrix for the result Plaintext ptx = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); auto zeroPackingAlloc = [=]() { return Plaintext(ptx); }; *numerator = std::make_shared<Matrix<Plaintext>>( zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()); Plaintext decryptedNumerator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt( privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); if (resultN.isValid == false) return resultN; (**numerator)(0, 0) = decryptedNumerator; (**numerator)(0, 0)->Decode(); for (size_t row = 0; row < ciphertext->GetRows(); row++) { for (size_t col = 0; col < ciphertext->GetCols(); col++) { if (row + col > 0) { if (Mismatched((*ciphertext)(row, col).GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not " "generated with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(row, col).GetNumerator(); Plaintext decryptedNumerator = GetPlaintextForDecrypt( ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); GetEncryptionAlgorithm()->Decrypt( privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); (**numerator)(row, col) = decryptedNumerator; (**numerator)(row, col)->Decode(); } } } if (doTiming) { timeSamples->push_back(TimingInfo(OpDecryptMatrixPacked, TOC_US(t))); } return DecryptResult( (**numerator)((*numerator)->GetRows() - 1, (*numerator)->GetCols() - 1) ->GetLength()); } /** * read instream for a sequence of serialized ciphertext; deserialize it, * decrypt it, and write it to outstream * @param privateKey - reference to the decryption key * @param instream - input stream with sequence of serialized ciphertexts * @param outstream - output stream for plaintext * @return total bytes processed */ size_t DecryptStream(const LPPrivateKey<Element> privateKey, std::istream& instream, std::ostream& outstream) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * ReEncrypt - Proxy Re Encryption mechanism for PALISADE * @param evalKey - evaluation key from the PRE keygen method * @param ciphertext - vector of shared pointers to encrypted Ciphertext * @param publicKey the public key of the recipient of the re-encrypted * ciphertext. * @return vector of shared pointers to re-encrypted ciphertexts */ Ciphertext<Element> ReEncrypt( LPEvalKey<Element> evalKey, ConstCiphertext<Element> ciphertext, const LPPublicKey<Element> publicKey = nullptr) const { if (evalKey == nullptr || Mismatched(evalKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to ReEncrypt was not generated with " "this crypto context"); if (ciphertext == nullptr || Mismatched(ciphertext->GetCryptoContext())) PALISADE_THROW(config_error, "The ciphertext passed to ReEncrypt was not generated " "with this crypto context"); TimeVar t; if (doTiming) TIC(t); Ciphertext<Element> newCiphertext = GetEncryptionAlgorithm()->ReEncrypt(evalKey, ciphertext, publicKey); if (doTiming) { timeSamples->push_back(TimingInfo(OpReEncrypt, TOC_US(t))); } return newCiphertext; } /** * read instream for a serialized ciphertext. deserialize, re-encrypt, * serialize, and write to outstream * @param evalKey - reference to the re-encryption key * @param instream - input stream with sequence of serialized ciphertext * @param outstream - output stream with sequence of serialized re-encrypted * ciphertext */ void ReEncryptStream(const LPEvalKey<Element> evalKey, std::istream& instream, std::ostream& outstream, const LPPublicKey<Element> publicKey = nullptr) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * EvalAdd - PALISADE EvalAdd method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 + ct2 */ Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAdd(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAdd, TOC_US(t))); } return rv; } /** * EvalAdd - PALISADE EvalAddMutable method for a pair of ciphertexts. * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ct1 * @param ct2 * @return new ciphertext for ct1 + ct2 */ Ciphertext<Element> EvalAddMutable(Ciphertext<Element>& ct1, Ciphertext<Element>& ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAddMutable(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAdd, TOC_US(t))); } return rv; } /** * EvalAddMatrix - PALISADE EvalAdd method for a pair of matrices of * ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalAddMatrix( const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0, 0), (*ct2)(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<RationalCiphertext<Element>> rv = *ct1 + *ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddMatrix, TOC_US(t))); } return std::make_shared<Matrix<RationalCiphertext<Element>>>(rv); } /** * EvalAddMatrix - PALISADE EvalAdd method for a pair of matrices of * ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ Matrix<Ciphertext<Element>> EvalAddMatrix( const Matrix<Ciphertext<Element>>& ct1, const Matrix<Ciphertext<Element>>& ct2) const { TypeCheck(ct1(0, 0), ct2(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<Ciphertext<Element>> rv = ct1 + ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddMatrix, TOC_US(t))); } // Matrix<Ciphertext<Element>> a(rv); return rv; } /** * EvalSub - PALISADE EvalSub method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 - ct2 */ Ciphertext<Element> EvalSub(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSub(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSub, TOC_US(t))); } return rv; } /** * EvalSub - PALISADE EvalSubMutable method for a pair of ciphertexts * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ct1 * @param ct2 * @return new ciphertext for ct1 - ct2 */ Ciphertext<Element> EvalSubMutable(Ciphertext<Element>& ct1, Ciphertext<Element>& ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSubMutable(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSub, TOC_US(t))); } return rv; } /** * EvalSubMatrix - PALISADE EvalSub method for a pair of matrices of * ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalSubMatrix( const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0, 0), (*ct2)(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<RationalCiphertext<Element>> rv = *ct1 - *ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubMatrix, TOC_US(t))); } return std::make_shared<Matrix<RationalCiphertext<Element>>>(rv); } /** * EvalSubMatrix - PALISADE EvalSub method for a pair of matrices of * ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ Matrix<Ciphertext<Element>> EvalSubMatrix( const Matrix<Ciphertext<Element>>& ct1, const Matrix<Ciphertext<Element>>& ct2) const { TypeCheck(ct1(0, 0), ct2(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<Ciphertext<Element>> rv = ct1 - ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubMatrix, TOC_US(t))); } return rv; } /** * EvalAdd - PALISADE EvalAdd method for a ciphertext and plaintext * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext + plaintext */ Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext, ConstPlaintext plaintext) const { TypeCheck(ciphertext, plaintext); TimeVar t; if (doTiming) TIC(t); plaintext->SetFormat(EVALUATION); auto rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, plaintext); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddPlain, TOC_US(t))); } return rv; } /** * EvalAdd - PALISADE EvalAddMutable method for a ciphertext and plaintext * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext + plaintext */ Ciphertext<Element> EvalAddMutable(Ciphertext<Element>& ciphertext, Plaintext plaintext) const { TypeCheck((ConstCiphertext<Element>)ciphertext, (ConstPlaintext)plaintext); TimeVar t; if (doTiming) TIC(t); plaintext->SetFormat(EVALUATION); auto rv = GetEncryptionAlgorithm()->EvalAddMutable(ciphertext, plaintext); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddPlain, TOC_US(t))); } return rv; } /** * EvalAdd - PALISADE EvalAdd method for a ciphertext and constant * @param ciphertext * @param constant * @return new ciphertext for ciphertext + constant */ Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext, double constant) const { TimeVar t; Ciphertext<Element> rv; if (constant >= 0) { if (doTiming) TIC(t); rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddConst, TOC_US(t))); } } else { TimeVar t; if (doTiming) TIC(t); rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, -constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddConst, TOC_US(t))); } } return rv; } /** * EvalLinearWSum - PALISADE EvalLinearWSum method to compute a linear * weighted sum * * @param ciphertexts a list of ciphertexts * @param constants a list of weights * @return new ciphertext containing the weighted sum */ Ciphertext<Element> EvalLinearWSum(vector<Ciphertext<Element>> ciphertexts, vector<double> constants) const { TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalLinearWSum(ciphertexts, constants); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalLinearWSum, TOC_US(t))); } return rv; } /** * EvalLinearWSum - method to compute a linear weighted sum. * This is a mutable version, meaning the level/depth of input * ciphertexts may change in the process. * * @param ciphertexts a list of ciphertexts * @param constants a list of weights * @return new ciphertext containing the weighted sum */ Ciphertext<Element> EvalLinearWSumMutable( vector<Ciphertext<Element>> ciphertexts, vector<double> constants) const { TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalLinearWSumMutable(ciphertexts, constants); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalLinearWSum, TOC_US(t))); } return rv; } inline Ciphertext<Element> EvalLinearWSum( vector<double> constants, vector<Ciphertext<Element>> ciphertexts) const { return EvalLinearWSum(ciphertexts, constants); } inline Ciphertext<Element> EvalLinearWSumMutable( vector<double> constants, vector<Ciphertext<Element>> ciphertexts) const { return EvalLinearWSumMutable(ciphertexts, constants); } inline Ciphertext<Element> EvalAdd( ConstPlaintext plaintext, ConstCiphertext<Element> ciphertext) const { return EvalAdd(ciphertext, plaintext); } inline Ciphertext<Element> EvalAddMutable( Plaintext plaintext, Ciphertext<Element>& ciphertext) const { return EvalAddMutable(ciphertext, plaintext); } inline Ciphertext<Element> EvalAdd( double constant, ConstCiphertext<Element> ciphertext) const { return EvalAdd(ciphertext, constant); } /** * EvalSubPlain - PALISADE EvalSub method for a ciphertext and plaintext * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext - plaintext */ Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext, ConstPlaintext plaintext) const { TypeCheck(ciphertext, plaintext); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, plaintext); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubPlain, TOC_US(t))); } return rv; } /** * EvalSubPlain - PALISADE EvalSubMutable method for a ciphertext and * plaintext This is a mutable version - input ciphertexts may get * automatically rescaled, or level-reduced. * * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext - plaintext */ Ciphertext<Element> EvalSubMutable(Ciphertext<Element>& ciphertext, Plaintext plaintext) const { TypeCheck((ConstCiphertext<Element>)ciphertext, (ConstPlaintext)plaintext); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSubMutable(ciphertext, plaintext); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubPlain, TOC_US(t))); } return rv; } /** * EvalSub - PALISADE EvalSub method for a ciphertext and constant * @param ciphertext * @param constant * @return new ciphertext for ciphertext - constant */ Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext, double constant) const { TimeVar t; Ciphertext<Element> rv; if (constant >= 0) { if (doTiming) TIC(t); rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubConst, TOC_US(t))); } } else { if (doTiming) TIC(t); rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, -constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubConst, TOC_US(t))); } } return rv; } inline Ciphertext<Element> EvalSub( ConstPlaintext plaintext, ConstCiphertext<Element> ciphertext) const { return EvalAdd(EvalNegate(ciphertext), plaintext); } inline Ciphertext<Element> EvalSubMutable( Plaintext plaintext, Ciphertext<Element>& ciphertext) const { Ciphertext<Element> negated = EvalNegate(ciphertext); Ciphertext<Element> result = EvalAddMutable(negated, plaintext); ciphertext = EvalNegate(negated); return result; } inline Ciphertext<Element> EvalSub( double constant, ConstCiphertext<Element> ciphertext) const { return EvalAdd(EvalNegate(ciphertext), constant); } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts - with key * switching * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ Ciphertext<Element> EvalMult(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); auto ek = GetEvalMultKeyVector(ct1->GetKeyTag()); if (!ek.size()) { PALISADE_THROW(type_error, "Evaluation key has not been generated for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2, ek[0]); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts - with key * switching This is a mutable version - input ciphertexts may get * automatically rescaled, or level-reduced. * * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ Ciphertext<Element> EvalMultMutable(Ciphertext<Element>& ct1, Ciphertext<Element>& ct2) const { TypeCheck(ct1, ct2); auto ek = GetEvalMultKeyVector(ct1->GetKeyTag()); if (!ek.size()) { PALISADE_THROW(type_error, "Evaluation key has not been generated for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ct1, ct2, ek[0]); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts - no key * switching (relinearization) * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ Ciphertext<Element> EvalMultNoRelin(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMultMany - PALISADE function for evaluating multiplication on * ciphertext followed by relinearization operation (at the end). It computes * the multiplication in a binary tree manner. Also, it reduces the number of * elements in the ciphertext to two after each multiplication. * Currently it assumes that the consecutive two input arguments have * total depth smaller than the supported depth. Otherwise, it throws an * error. * * @param cipherTextList is the ciphertext list. * * @return new ciphertext. */ Ciphertext<Element> EvalMultMany( const vector<Ciphertext<Element>>& ct) const { // input parameter check if (!ct.size()) PALISADE_THROW(type_error, "Empty input ciphertext vector"); const auto ek = GetEvalMultKeyVector(ct[0]->GetKeyTag()); if (ek.size() < (ct[0]->GetElements().size() - 2)) { PALISADE_THROW(type_error, "Insufficient value was used for maxDepth to generate " "keys for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMany(ct, ek); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMultMany, TOC_US(t))); } return rv; } /** * EvalAddMany - Evaluate addition on a vector of ciphertexts. * It computes the addition in a binary tree manner. * * @param ctList is the list of ciphertexts. * * @return new ciphertext. */ Ciphertext<Element> EvalAddMany( const vector<Ciphertext<Element>>& ctList) const { // input parameter check if (!ctList.size()) PALISADE_THROW(type_error, "Empty input ciphertext vector"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAddMany(ctList); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddMany, TOC_US(t))); } return rv; } /** * EvalAddManyInPlace - Evaluate addition on a vector of ciphertexts. * Addition is computed in a binary tree manner. Difference with EvalAddMany * is that EvalAddManyInPlace uses the input ciphertext vector to store * intermediate results, to avoid the overhead of using extra tepmorary * space. * * @param ctList is the list of ciphertexts. * * @return new ciphertext. */ Ciphertext<Element> EvalAddManyInPlace( vector<Ciphertext<Element>>& ctList) const { // input parameter check if (!ctList.size()) PALISADE_THROW(type_error, "Empty input ciphertext vector"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAddManyInPlace(ctList); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddManyInPlace, TOC_US(t))); } return rv; } /** * Function for evaluating multiplication on ciphertext followed by * relinearization operation. Currently it assumes that the input arguments * have total depth smaller than the supported depth. Otherwise, it throws an * error. * * @param ct1 first input ciphertext. * @param ct2 second input ciphertext. * * @return new ciphertext */ Ciphertext<Element> EvalMultAndRelinearize( ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { // input parameter check if (!ct1 || !ct2) PALISADE_THROW(type_error, "Input ciphertext is nullptr"); const auto ek = GetEvalMultKeyVector(ct1->GetKeyTag()); if (ek.size() < (ct1->GetElements().size() + ct2->GetElements().size() - 3)) { PALISADE_THROW(type_error, "Insufficient value was used for maxDepth to generate " "keys for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultAndRelinearize(ct1, ct2, ek); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * Function for relinearization of a ciphertext. * * @param ct input ciphertext. * * @return relinearized ciphertext */ Ciphertext<Element> Relinearize(ConstCiphertext<Element> ct) const { // input parameter check if (!ct) PALISADE_THROW(type_error, "Input ciphertext is nullptr"); const auto ek = GetEvalMultKeyVector(ct->GetKeyTag()); if (ek.size() < (ct->GetElements().size() - 2)) { PALISADE_THROW(type_error, "Insufficient value was used for maxDepth to generate " "keys for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->Relinearize(ct, ek); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalRelin, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalMult method for plaintext * ciphertext * @param pt2 * @param ct1 * @return new ciphertext for ct1 * pt2 */ inline Ciphertext<Element> EvalMult(ConstPlaintext pt2, ConstCiphertext<Element> ct1) const { return EvalMult(ct1, pt2); } /** * EvalMult - PALISADE EvalMultMutable method for plaintext * ciphertext * @param pt2 * @param ct1 * @return new ciphertext for ct1 * pt2 */ inline Ciphertext<Element> EvalMultMutable(Plaintext pt2, Ciphertext<Element>& ct1) const { return EvalMultMutable(ct1, pt2); } /** * EvalMult - PALISADE EvalMult method for constant * ciphertext * @param constant * @param ct1 * @return new ciphertext for ct1 * constant */ inline Ciphertext<Element> EvalMult(double constant, ConstCiphertext<Element> ct1) const { return EvalMult(ct1, constant); } inline Ciphertext<Element> EvalMultMutable(double constant, Ciphertext<Element>& ct1) const { return EvalMultMutable(ct1, constant); } /** * EvalShiftRight - works only for Fractional Encoding * @param pt2 * @param ct1 * @return new ciphertext for ct1 * pt2 */ Ciphertext<Element> EvalRightShift(ConstCiphertext<Element> ct1, size_t divisor) const { if (ct1 && ct1->GetEncodingType() != Fractional) { std::stringstream ss; ss << "A " << Fractional << " encoded ciphertext is required for the EvalRightShift operation"; PALISADE_THROW(type_error, ss.str()); } Plaintext plaintextShift = MakeFractionalPlaintext(0, divisor); TypeCheck(ct1, plaintextShift); double start = 0; if (doTiming) start = currentDateTime(); auto rv = EvalMult(ct1, plaintextShift); if (doTiming) { timeSamples->push_back( TimingInfo(OpEvalRightShift, currentDateTime() - start)); } return rv; } /** * EvalMult - PALISADE EvalMult method for plaintext * ciphertext * @param ct1 * @param pt2 * @return new ciphertext for ct1 * pt2 */ Ciphertext<Element> EvalMult(ConstCiphertext<Element> ct1, ConstPlaintext pt2) const { TypeCheck(ct1, pt2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, pt2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalMultMutable method for plaintext * ciphertext * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ct1 * @param pt2 * @return new ciphertext for ct1 * pt2 */ Ciphertext<Element> EvalMultMutable(Ciphertext<Element>& ct1, Plaintext pt2) const { TypeCheck(ct1, pt2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ct1, pt2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalSub method for a ciphertext and constant * @param ciphertext * @param constant * @return new ciphertext for ciphertext - constant */ Ciphertext<Element> EvalMult(ConstCiphertext<Element> ciphertext, double constant) const { // input parameter check if (!ciphertext) { PALISADE_THROW(type_error, "Input ciphertext is nullptr"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ciphertext, constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMultConst, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalSub method for a ciphertext and constant * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ciphertext * @param constant * @return new ciphertext for ciphertext - constant */ Ciphertext<Element> EvalMultMutable(Ciphertext<Element>& ciphertext, double constant) const { // input parameter check if (!ciphertext) { PALISADE_THROW(type_error, "Input ciphertext is nullptr"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ciphertext, constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMultConst, TOC_US(t))); } return rv; } /** * EvalMultMatrix - PALISADE EvalMult method for two matrices of ciphertext * @param ct1 * @param ct2 * @return new matrix for ct1 * ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalMultMatrix( const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0, 0), (*ct2)(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<RationalCiphertext<Element>> rv = *ct1 * *ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMultMatrix, TOC_US(t))); } return std::make_shared<Matrix<RationalCiphertext<Element>>>(rv); } /** * EvalSub - PALISADE Negate method for a ciphertext * @param ct * @return new ciphertext -ct */ Ciphertext<Element> EvalNegate(ConstCiphertext<Element> ct) const { if (ct == nullptr || Mismatched(ct->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to EvalNegate was not generated with " "this crypto context"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalNegate(ct); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalNeg, TOC_US(t))); } return rv; } /** * EvalSub - PALISADE Negate method for a ciphertext * @param ct * @return new ciphertext -ct */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalNegateMatrix( const shared_ptr<Matrix<RationalCiphertext<Element>>> ct) const { if (ct == nullptr || Mismatched((*ct)(0, 0).GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to EvalNegateMatrix was not generated " "with this crypto context"); TimeVar t; if (doTiming) TIC(t); auto m = std::make_shared<Matrix<RationalCiphertext<Element>>>( ct->GetAllocator(), ct->GetRows(), ct->GetCols()); for (size_t r = 0; r < m->GetRows(); r++) for (size_t c = 0; c < m->GetCols(); c++) (*m)(r, c) = -((*ct)(r, c)); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalNegMatrix, TOC_US(t))); } return m; } /** * Generate automophism keys for a given private key * * @param publicKey original public key. * @param origPrivateKey original private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys; index 0 of the vector corresponds to * plaintext index 2, index 1 to plaintex index 3, etc. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen( const LPPublicKey<Element> publicKey, const LPPrivateKey<Element> origPrivateKey, const std::vector<usint>& indexList) const { if (publicKey == nullptr || origPrivateKey == nullptr) PALISADE_THROW(type_error, "Null Keys"); if (!indexList.size()) PALISADE_THROW(config_error, "Input index vector is empty"); if (publicKey->GetCryptoContext().get() != this) PALISADE_THROW(type_error, "Key was not created in this CryptoContextImpl"); if (publicKey->GetCryptoContext() != origPrivateKey->GetCryptoContext()) PALISADE_THROW(type_error, "Keys were not created in the same CryptoContextImpl"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen( publicKey, origPrivateKey, indexList); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAutomorphismKeyGen, TOC_US(t))); } return rv; } /** * Function for evaluating automorphism of ciphertext at index i * * @param ciphertext the input ciphertext. * @param i automorphism index * @param &evalKeys - reference to the vector of evaluation keys generated by * EvalAutomorphismKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalAutomorphism( ConstCiphertext<Element> ciphertext, usint i, const std::map<usint, LPEvalKey<Element>>& evalKeys, CALLER_INFO_ARGS_HDR) const { if (nullptr == ciphertext) { std::string errorMsg(std::string("Input ciphertext is nullptr") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (evalKeys.empty()) { std::string errorMsg(std::string("Empty input key map") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } auto tk = evalKeys.begin()->second; if (nullptr == tk) { std::string errorMsg(std::string("Invalid evalKey") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (ciphertext->GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (ciphertext->GetCryptoContext() != tk->GetCryptoContext()) { std::string errorMsg( std::string("Items were not created in the same CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (ciphertext->GetKeyTag() != tk->GetKeyTag()) { std::string errorMsg( std::string("Items were not encrypted with same keys") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAutomorphism(ciphertext, i, evalKeys); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAutomorphismI, TOC_US(t))); } return rv; } /** * Generate automophism keys for a given private key; Uses the private key for * encryption * * @param privateKey private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys */ shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen( const LPPrivateKey<Element> privateKey, const std::vector<usint>& indexList) const { if (privateKey == nullptr) PALISADE_THROW(type_error, "Null input"); if (!indexList.size()) PALISADE_THROW(config_error, "Input index vector is empty"); if (privateKey->GetCryptoContext().get() != this) PALISADE_THROW(type_error, "Key was not created in this CryptoContextImpl"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen(privateKey, indexList); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAutomorphismK, TOC_US(t))); } return rv; } /** * EvalSumKeyGen Generates the key map to be used by evalsum * * @param privateKey private key. * @param publicKey public key (used in NTRU schemes). */ void EvalSumKeyGen(const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey = nullptr); shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumRowsKeyGen( const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey = nullptr, usint rowSize = 0); shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumColsKeyGen( const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey = nullptr); /** * GetEvalSumKey returns the map * * @return the EvalSum key map */ static const std::map<usint, LPEvalKey<Element>>& GetEvalSumKeyMap( const string& id); static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>& GetAllEvalSumKeys(); /** * Function for evaluating a sum of all components * * @param ciphertext the input ciphertext. * @param batchSize size of the batch * @return resulting ciphertext */ Ciphertext<Element> EvalSum(ConstCiphertext<Element> ciphertext, usint batchSize) const; Ciphertext<Element> EvalSumRows( ConstCiphertext<Element> ciphertext, usint rowSize, const std::map<usint, LPEvalKey<Element>>& evalKeys) const; Ciphertext<Element> EvalSumCols( ConstCiphertext<Element> ciphertext, usint rowSize, const std::map<usint, LPEvalKey<Element>>& evalKeys) const; /** * EvalSumKeyGen Generates the key map to be used by evalsum * * @param privateKey private key. * @param indexList list of indices. * @param publicKey public key (used in NTRU schemes). */ void EvalAtIndexKeyGen(const LPPrivateKey<Element> privateKey, const std::vector<int32_t>& indexList, const LPPublicKey<Element> publicKey = nullptr); /** * EvalFastRotationPrecompute implements the precomputation step of * hoisted automorphisms. * * Please refer to Section 5 of Halevi and Shoup, "Faster Homomorphic * linear transformations in HELib." for more details, link: * https://eprint.iacr.org/2018/244. * * Generally, automorphisms are performed with three steps: (1) the * automorphism is applied on the ciphertext, (2) the automorphed values are * decomposed into digits, and (3) key switching is applied to make it * possible to further compute on the ciphertext. * * Hoisted automorphisms is a technique that performs the digit decomposition * for the original ciphertext first, and then performs the automorphism and * the key switching on the decomposed digits. The benefit of this is that the * digit decomposition is independent of the automorphism rotation index, so * it can be reused for multiple different indices. This can greatly improve * performance when we have to compute many automorphisms on the same * ciphertext. This routinely happens when we do permutations (EvalPermute). * * EvalFastRotationPrecompute implements the digit decomposition step of * hoisted automorphisms. * * @param ct the input ciphertext on which to do the precomputation (digit * decomposition) */ shared_ptr<vector<Element>> EvalFastRotationPrecompute( ConstCiphertext<Element> ct) const { TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalFastRotationPrecompute(ct); if (doTiming) { timeSamples->push_back(TimingInfo(OpFastRotPrecomp, TOC_US(t))); } return rv; } /** * EvalFastRotation implements the automorphism and key switching step of * hoisted automorphisms. * * Please refer to Section 5 of Halevi and Shoup, "Faster Homomorphic * linear transformations in HELib." for more details, link: * https://eprint.iacr.org/2018/244. * * Generally, automorphisms are performed with three steps: (1) the * automorphism is applied on the ciphertext, (2) the automorphed values are * decomposed into digits, and (3) key switching is applied to make it * possible to further compute on the ciphertext. * * Hoisted automorphisms is a technique that performs the digit decomposition * for the original ciphertext first, and then performs the automorphism and * the key switching on the decomposed digits. The benefit of this is that the * digit decomposition is independent of the automorphism rotation index, so * it can be reused for multiple different indices. This can greatly improve * performance when we have to compute many automorphisms on the same * ciphertext. This routinely happens when we do permutations (EvalPermute). * * EvalFastRotation implements the automorphism and key swithcing step of * hoisted automorphisms. * * This method assumes that all required rotation keys exist. This may not be * true if we are using baby-step/giant-step key switching. Please refer to * Section 5.1 of the above reference and EvalPermuteBGStepHoisted to see how * to deal with this issue. * * @param ct the input ciphertext to perform the automorphism on * @param index the index of the rotation. Positive indices correspond to left * rotations and negative indices correspond to right rotations. * @param m is the cyclotomic order * @param digits the digit decomposition created by EvalFastRotationPrecompute * at the precomputation step. */ Ciphertext<Element> EvalFastRotation( ConstCiphertext<Element> ct, const usint index, const usint m, const shared_ptr<vector<Element>> digits) const { TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalFastRotation(ct, index, m, digits); if (doTiming) { timeSamples->push_back(TimingInfo(OpFastRot, TOC_US(t))); } return rv; } /** * Merges multiple ciphertexts with encrypted results in slot 0 into a single * ciphertext The slot assignment is done based on the order of ciphertexts in * the vector * * @param ciphertextVector vector of ciphertexts to be merged. * @param &evalKeys - reference to the map of evaluation keys generated by * EvalAutomorphismKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalMerge( const vector<Ciphertext<Element>>& ciphertextVector) const; /** * GetEvalAutomorphismKey returns the map * * @return the EvalAutomorphism key map */ static const std::map<usint, LPEvalKey<Element>>& GetEvalAutomorphismKeyMap( const string& id); static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>& GetAllEvalAutomorphismKeys(); /** * Moves i-th slot to slot 0 * * @param ciphertext. * @param i the index. * @return resulting ciphertext */ Ciphertext<Element> EvalAtIndex(ConstCiphertext<Element> ciphertext, int32_t index) const; /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector. * @param ciphertext2 second vector. * @param batchSize size of the batch to be summed up * @return resulting ciphertext */ Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2, usint batchSize) const; /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector - ciphertext. * @param plaintext second vector - plaintext. * @param batchSize size of the batch to be summed up * @return resulting ciphertext */ Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1, ConstPlaintext plaintext, usint batchSize) const; /** * EvalCrossCorrelation - Computes the sliding sum of inner products (known as * as cross-correlation, sliding inner product, or sliding dot product in * image processing * @param x - first vector of row vectors * @param y - second vector of row vectors * @param batchSize - batch size for packed encoding * @param indexStart - starting index in the vectors of row vectors * @param length - length of the slice in the vectors of row vectors; default * is 0 meaning to use the full length of the vector * @return sum(x_i*y_i), i.e., a sum of inner products */ Ciphertext<Element> EvalCrossCorrelation( const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize, usint indexStart = 0, usint length = 0) const; /** * Method for polynomial evaluation for polynomials represented as power * series. * * @param &cipherText input ciphertext * @param &coefficients is the vector of coefficients in the polynomial; the * size of the vector is the degree of the polynomial + 1 * @return the result of polynomial evaluation. */ virtual Ciphertext<Element> EvalPoly( ConstCiphertext<Element> ciphertext, const std::vector<double>& coefficients) const { if (ciphertext == nullptr || this->Mismatched(ciphertext->GetCryptoContext())) throw std::logic_error( "Information passed to EvalPoly was not generated with this crypto " "context"); TimeVar t; if (this->doTiming) TIC(t); auto rv = std::static_pointer_cast<LPPublicKeyEncryptionScheme<Element>>( this->GetEncryptionAlgorithm()) ->EvalPoly(ciphertext, coefficients); if (this->doTiming) { this->timeSamples->push_back(TimingInfo(OpEvalPoly, TOC_US(t))); } return rv; } /** * EvalLinRegressBatched- Computes the parameter vector for linear regression * using the least squares method Supported only in batched mode; currently * works only for two regressors * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares * method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegressBatched( const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize) const; /** * EvalLinRegression - Computes the parameter vector for linear regression * using the least squares method * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares * method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegression( const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y) const { TypeCheck((*x)(0, 0), (*y)(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalLinRegression(x, y); if (doTiming) { timeSamples->push_back(TimingInfo(OpLinRegression, TOC_US(t))); } return rv; } /** * KeySwitch - PALISADE KeySwitch method * @param keySwitchHint - reference to KeySwitchHint * @param ciphertext - vector of ciphertext * @return new CiphertextImpl after applying key switch */ Ciphertext<Element> KeySwitch(const LPEvalKey<Element> keySwitchHint, ConstCiphertext<Element> ciphertext) const { if (keySwitchHint == nullptr || Mismatched(keySwitchHint->GetCryptoContext())) PALISADE_THROW( config_error, "Key passed to KeySwitch was not generated with this crypto context"); if (ciphertext == nullptr || Mismatched(ciphertext->GetCryptoContext())) PALISADE_THROW(config_error, "Ciphertext passed to KeySwitch was not generated with " "this crypto context"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->KeySwitch(keySwitchHint, ciphertext); if (doTiming) { timeSamples->push_back(TimingInfo(OpKeySwitch, TOC_US(t))); } return rv; } /** * Rescale - An alias for PALISADE ModReduce method. * This is because ModReduce is called Rescale in CKKS. * * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ Ciphertext<Element> Rescale(ConstCiphertext<Element> ciphertext) const { if (ciphertext == nullptr || Mismatched(ciphertext->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to Rescale was not generated with " "this crypto context"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->ModReduce(ciphertext); if (doTiming) { timeSamples->push_back(TimingInfo(OpModReduce, TOC_US(t))); } return rv; } /** * ModReduce - PALISADE ModReduce method used only for BGVrns * @param ciphertext - vector of ciphertext * @param numTowersToDrop - number of towers to drop * @return vector of mod reduced ciphertext */ Ciphertext<Element> ModReduce(ConstCiphertext<Element> ciphertext) const { if (ciphertext == nullptr || Mismatched(ciphertext->GetCryptoContext())) PALISADE_THROW( not_available_error, "Information passed to ModReduce was not generated with this crypto " "context"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->ModReduce(ciphertext); if (doTiming) { timeSamples->push_back(TimingInfo(OpModReduce, TOC_US(t))); } return rv; } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ RationalCiphertext<Element> ModReduceRational( RationalCiphertext<Element> ciphertext) const { TimeVar t; if (doTiming) TIC(t); Ciphertext<Element> n = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetNumerator()); Ciphertext<Element> d = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetDenominator()); if (doTiming) { timeSamples->push_back(TimingInfo(OpModReduce, TOC_US(t))); } return RationalCiphertext<Element>(n, d); } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ shared_ptr<Matrix<RationalCiphertext<Element>>> ModReduceMatrix( shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext) const { // needs context check TimeVar t; if (doTiming) TIC(t); auto m = std::make_shared<Matrix<RationalCiphertext<Element>>>( ciphertext->GetAllocator(), ciphertext->GetRows(), ciphertext->GetCols()); for (size_t r = 0; r < m->GetRows(); r++) for (size_t c = 0; c < m->GetCols(); c++) (*m)(r, c) = ModReduceRational((*ciphertext)(r, c)); if (doTiming) { timeSamples->push_back(TimingInfo(OpModReduceMatrix, TOC_US(t))); } return m; } /** * LevelReduce - PALISADE LevelReduce method * @param cipherText1 * @param linearKeySwitchHint * @return vector of level reduced ciphertext */ Ciphertext<Element> LevelReduce( ConstCiphertext<Element> cipherText1, const LPEvalKeyNTRU<Element> linearKeySwitchHint, size_t levels = 1) const { const auto cryptoParams = std::dynamic_pointer_cast<LPCryptoParametersCKKS<DCRTPoly>>( cipherText1->GetCryptoParameters()); if (cipherText1 == nullptr || Mismatched(cipherText1->GetCryptoContext())) { PALISADE_THROW(config_error, "Information passed to LevelReduce was not generated with " "this crypto context"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->LevelReduce( cipherText1, linearKeySwitchHint, levels); if (doTiming) { timeSamples->push_back(TimingInfo(OpLevelReduce, TOC_US(t))); } return rv; } /** * ComposedEvalMult - PALISADE composed evalmult * @param ciphertext1 - vector for first cipher text * @param ciphertext2 - vector for second cipher text * @param quadKeySwitchHint - is the quadratic key switch hint from original * private key to the quadratic key return vector of resulting ciphertext */ Ciphertext<Element> ComposedEvalMult( ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2) const { if (ciphertext1 == nullptr || ciphertext2 == nullptr || ciphertext1->GetKeyTag() != ciphertext2->GetKeyTag() || Mismatched(ciphertext1->GetCryptoContext())) PALISADE_THROW(config_error, "Ciphertexts passed to ComposedEvalMult were not " "generated with this crypto context"); auto ek = GetEvalMultKeyVector(ciphertext1->GetKeyTag()); if (!ek.size()) { PALISADE_THROW(type_error, "Evaluation key has not been generated for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->ComposedEvalMult(ciphertext1, ciphertext2, ek[0]); if (doTiming) { timeSamples->push_back(TimingInfo(OpComposedEvalMult, TOC_US(t))); } return rv; } static LPPublicKey<Element> deserializePublicKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static LPPrivateKey<Element> deserializeSecretKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static LPEvalKey<Element> deserializeEvalKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static LPEvalKey<Element> deserializeEvalKeyInContext( const Serialized& serObj, CryptoContext<Element> cc) __attribute__(( deprecated("serialization changed, see wiki for details"))); template <class Archive> void save(Archive& ar, std::uint32_t const version) const { ar(cereal::make_nvp("cc", params)); ar(cereal::make_nvp("kt", scheme)); ar(cereal::make_nvp("si", m_schemeId)); } template <class Archive> void load(Archive& ar, std::uint32_t const version) { if (version > SerializedVersion()) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar(cereal::make_nvp("cc", params)); ar(cereal::make_nvp("kt", scheme)); ar(cereal::make_nvp("si", m_schemeId)); // NOTE: a pointer to this object will be wrapped in a shared_ptr, and is a // "CryptoContext". PALISADE relies on the notion that identical // CryptoContextImpls are not duplicated in memory Once we deserialize this // object, we must check to see if there is a matching object for this // object that's already existing in memory if it DOES exist, use it. If it // does NOT exist, add this to the cache of all contexts } virtual std::string SerializedObjectName() const { return "CryptoContext"; } static uint32_t SerializedVersion() { return 1; } }; /** * @brief CryptoObject * * A class to aid in referring to the crypto context that an object belongs to */ template <typename Element> class CryptoObject { protected: CryptoContext<Element> context; // crypto context this object belongs to // tag used to find the evaluation key needed // for SHE/FHE operations string keyTag; public: explicit CryptoObject(CryptoContext<Element> cc = nullptr, const string& tag = "") : context(cc), keyTag(tag) {} CryptoObject(const CryptoObject& rhs) { context = rhs.context; keyTag = rhs.keyTag; } CryptoObject(const CryptoObject&& rhs) { context = std::move(rhs.context); keyTag = std::move(rhs.keyTag); } virtual ~CryptoObject() {} const CryptoObject& operator=(const CryptoObject& rhs) { this->context = rhs.context; this->keyTag = rhs.keyTag; return *this; } const CryptoObject& operator=(const CryptoObject&& rhs) { this->context = std::move(rhs.context); this->keyTag = std::move(rhs.keyTag); return *this; } bool operator==(const CryptoObject& rhs) const { return context.get() == rhs.context.get() && keyTag == rhs.keyTag; } CryptoContext<Element> GetCryptoContext() const { return context; } const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return context->GetCryptoParameters(); } const EncodingParams GetEncodingParameters() const { return context->GetCryptoParameters()->GetEncodingParams(); } const string GetKeyTag() const { return keyTag; } void SetKeyTag(const string& tag) { keyTag = tag; } template <class Archive> void save(Archive& ar, std::uint32_t const version) const { ar(::cereal::make_nvp("cc", context)); ar(::cereal::make_nvp("kt", keyTag)); } template <class Archive> void load(Archive& ar, std::uint32_t const version) { if (version > SerializedVersion()) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar(::cereal::make_nvp("cc", context)); ar(::cereal::make_nvp("kt", keyTag)); context = CryptoContextFactory<Element>::GetContext( context->GetCryptoParameters(), context->GetEncryptionAlgorithm()); } std::string SerializedObjectName() const { return "CryptoObject"; } static uint32_t SerializedVersion() { return 1; } }; /** * @brief CryptoContextFactory * * A class that contains static methods to generate new crypto contexts from * user parameters * */ template <typename Element> class CryptoContextFactory { using ParmType = typename Element::Params; using IntType = typename Element::Integer; protected: static vector<CryptoContext<Element>> AllContexts; public: static void ReleaseAllContexts(); static int GetContextCount(); static CryptoContext<Element> GetSingleContext(); static CryptoContext<Element> GetContext( shared_ptr<LPCryptoParameters<Element>> params, shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme, const string& schemeId = "Not"); static CryptoContext<Element> GetContextForPointer( CryptoContextImpl<Element>* cc); static const vector<CryptoContext<Element>>& GetAllContexts(); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme * @param params ring parameters * @param plaintextModulus plaintext modulus * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param delta - the plaintext scaling parameter floor(q/t) in BFV * @param mode - mode for generating secret keys (RLWE vs OPTIMIZED) * @param bigmodulus - large modulus used in tensoring of homomorphic * multiplication * @param bigrootofunity - root of unity for bigmodulus * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - * sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @param bigmodulusarb - additional large modulus for bigmoduls for the case * of general (non-power-of-two) cyclotomics * @param bigrootofunityarb - root of unity for bigmodulusarb * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @return new context */ static CryptoContext<Element> genCryptoContextBFV( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, const std::string& delta, MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0", int depth = 0, int assuranceMeasure = 0, float securityLevel = 0, const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0", int maxDepth = 2); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme * @param params ring parameters * @param encodingParams plaintext encoding parameters * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param delta - the plaintext scaling parameter floor(q/t) in BFV * @param mode - mode for generating secret keys (RLWE vs OPTIMIZED) * @param bigmodulus - large modulus used in tensoring of homomorphic * multiplication * @param bigrootofunity - root of unity for bigmodulus * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - * sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @param bigmodulusarb - additional large modulus for bigmoduls for the case * of general (non-power-of-two) cyclotomics * @param bigrootofunityarb - root of unity for bigmodulusarb * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @return new context */ static CryptoContext<Element> genCryptoContextBFV( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, const std::string& delta, MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0", int depth = 0, int assuranceMeasure = 0, float securityLevel = 0, const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0", int maxDepth = 2); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel root Hermite factor (lattice security parameter) * @param relinWindow bits in the base of digits in key * switching/relinearization * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( const PlaintextModulus plaintextModulus, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel standard security level * @param relinWindow bits in the base of digits in key * switching/relinearization * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel root Hermite factor (lattice security parameter) * @param distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( EncodingParams encodingParams, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel standard security level * @param distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( EncodingParams encodingParams, SecurityLevel securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window (bits in the base for digits) * used for digit decomposition (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( const PlaintextModulus plaintextModulus, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel standard secuirity level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window (bits in the base for digits) * used for digit decomposition (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( EncodingParams encodingParams, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel standard security level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( EncodingParams encodingParams, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( const PlaintextModulus plaintextModulus, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel standard security level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( EncodingParams encodingParams, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel standard security level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( EncodingParams encodingParams, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BGV Scheme * @param params ring parameters * @param plaintextModulus plaintext modulus * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param depth of supported computation circuit (not used; for future use) * @return new context */ static CryptoContext<Element> genCryptoContextBGV( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContextImpl for the BGV Scheme * @param params ring parameters * @param encodingParams plaintext encoding parameters * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param depth of supported computation circuit (not used; for future use) * @return new context */ static CryptoContext<Element> genCryptoContextBGV( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContextImpl for the CKKS Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param depth * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or * EXACTRESCALE) * @return new context */ static CryptoContext<Element> genCryptoContextCKKS( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, KeySwitchTechnique ksTech = BV, RescalingTechnique rsTech = APPROXRESCALE); /** * construct a PALISADE CryptoContextImpl for the CKKS Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or * EXACTRESCALE) * @return new context */ static CryptoContext<Element> genCryptoContextCKKS( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, enum KeySwitchTechnique ksTech = BV, RescalingTechnique rsTech = APPROXRESCALE); /** * Automatically generate the moduli chain and construct a PALISADE * CryptoContextImpl for the CKKS Scheme with it. * * @param cyclOrder the cyclotomic order M * @param numPrimes the number of towers/primes to use when building the * moduli chain * @param scaleExp the plaintext scaling factor, which is equal to dcrtBits in * our implementation of CKKS * @param batchSize the batch size of the ciphertext * @param mode RLWE or OPTIMIZED * @param depth * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param firstModSize the bit-length of the first modulus * @param ksTech key switching technique to use (e.g., GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or * EXACTRESCALE) * @param numLargeDigits the number of big digits to use in HYBRID key * switching * @return new context */ static CryptoContext<Element> genCryptoContextCKKSWithParamsGen( usint cyclOrder, usint numPrimes, usint scaleExp, usint relinWindow, usint batchSize, MODE mode, int depth = 1, int maxDepth = 2, usint firstModSize = 60, enum KeySwitchTechnique ksTech = BV, enum RescalingTechnique rsTech = APPROXRESCALE, uint32_t numLargeDigits = 4); /** * Construct a PALISADE CryptoContextImpl for the CKKS Scheme. * * @param multiplicativeDepth the depth of multiplications supported by the * scheme (equal to number of towers - 1) * @param scalingFactorBits the size of the scaling factor in bits * @param batchSize the number of slots being used in the ciphertext * @param stdLevel the standard security level we want the scheme to satisfy * @param ringDim the ring dimension (if not specified selected automatically * based on stdLevel) * @param ksTech key switching technique to use (e.g., HYBRID, GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or * EXACTRESCALE) * @param numLargeDigits the number of big digits to use in HYBRID key * switching * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param firstModSize the bit-length of the first modulus * @param relinWindow the relinearization windows (used in BV key switching, * use 0 for RNS decomposition) * @param mode RLWE (gaussian distribution) or OPTIMIZED (ternary * distribution) * @return new context */ static CryptoContext<Element> genCryptoContextCKKS( usint multiplicativeDepth, usint scalingFactorBits, usint batchSize, SecurityLevel stdLevel = HEStd_128_classic, usint ringDim = 0, enum RescalingTechnique rsTech = EXACTRESCALE, enum KeySwitchTechnique ksTech = HYBRID, uint32_t numLargeDigits = 0, int maxDepth = 2, usint firstModSize = 60, usint relinWindow = 0, MODE mode = OPTIMIZED); /** * construct a PALISADE CryptoContextImpl for the BGVrns Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param depth * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param msMethod mod switch method * @return new context */ static CryptoContext<Element> genCryptoContextBGVrns( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, KeySwitchTechnique ksTech = BV, enum ModSwitchMethod msMethod = MANUAL); /** * construct a PALISADE CryptoContextImpl for the BGVrns Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param msMethod mod switch method * @return new context */ static CryptoContext<Element> genCryptoContextBGVrns( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, enum KeySwitchTechnique ksTech = BV, enum ModSwitchMethod msMethod = MANUAL); /** * Automatically generate the moduli chain and construct a PALISADE * CryptoContextImpl for the BGVrns Scheme with it. * * @param cyclOrder the cyclotomic order M * @param numPrimes the number of towers/primes to use when building the * moduli chain * @param ptm the plaintext modulus * @param mode RLWE or OPTIMIZED * @param depth * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param firstModSize the bit-length of the first modulus * @param dcrtrBits the size of the moduli in bits * @param ksTech key switching technique to use (e.g., GHS or BV) * @param numLargeDigits the number of big digits to use in HYBRID key * switching * @param batchSize the number of slots being used in the ciphertext * @param msMethod mod switch method * @return new context */ static CryptoContext<Element> genCryptoContextBGVrnsWithParamsGen( usint cyclOrder, usint numPrimes, usint ptm, usint relinWindow, MODE mode, int depth = 1, int maxDepth = 2, enum KeySwitchTechnique ksTech = BV, usint firstModSize = 0, usint dcrtBits = 0, uint32_t numLargeDigits = 4, usint batchSize = 0, enum ModSwitchMethod msMethod = MANUAL); /** * Construct a PALISADE CryptoContextImpl for the BGVrns Scheme. * * @param multiplicativeDepth the depth of multiplications supported by the * scheme (equal to number of towers - 1) * @param ptm the plaintext modulus * @param stdLevel the standard security level we want the scheme to satisfy * @param stdDev sigma - distribution parameter for error distribution * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param mode RLWE (gaussian distribution) or OPTIMIZED (ternary * distribution) * @param ksTech key switching technique to use (e.g., HYBRID, GHS or BV) * @param ringDim the ring dimension (if not specified selected automatically * based on stdLevel) * @param numLargeDigits the number of big digits to use in HYBRID key * switching * @param firstModSize the bit-length of the first modulus * @param dcrtrBits the size of the moduli in bits * @param relinWindow the relinearization windows (used in BV key switching, * use 0 for RNS decomposition) * @param batchSize the number of slots being used in the ciphertext * @param msMethod mod switch method * @return new context */ static CryptoContext<Element> genCryptoContextBGVrns( usint multiplicativeDepth, usint ptm, SecurityLevel stdLevel = HEStd_128_classic, float stdDev = 3.19, int maxDepth = 2, MODE mode = OPTIMIZED, enum KeySwitchTechnique ksTech = HYBRID, usint ringDim = 0, uint32_t numLargeDigits = 0, usint firstModSize = 0, usint dcrtBits = 0, usint relinWindow = 0, usint batchSize = 0, enum ModSwitchMethod msMethod = AUTO); /** * construct a PALISADE CryptoContextImpl for the StehleSteinfeld Scheme * @param params ring parameters * @param plaintextModulus plaintext modulus * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param stdDev distribution parameter for secret key distribution * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - * sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @return new context */ static CryptoContext<Element> genCryptoContextStehleSteinfeld( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContextImpl for the StehleSteinfeld Scheme * @param params ring parameters * @param encodingParams plaintext encoding parameters * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param stdDev distribution parameter for secret key distribution * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - * sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @return new context */ static CryptoContext<Element> genCryptoContextStehleSteinfeld( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContextImpl for the Null Scheme * @param m cyclotomic order (ring dimension n = m/2 for power-of-two * cyclotomics) * @param plaintextModulus plaintext modulus * @return */ static CryptoContext<Element> genCryptoContextNull( unsigned int m, const PlaintextModulus ptModulus); /** * construct a PALISADE CryptoContextImpl for the Null Scheme * @param m cyclotomic order (ring dimension n = m/2 for power-of-two * cyclotomics) * @param encodingParams plaintext encoding parameters * @return */ static CryptoContext<Element> genCryptoContextNull( unsigned int m, EncodingParams encodingParams); static CryptoContext<Element> DeserializeAndCreateContext( const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); }; } // namespace lbcrypto #endif /* SRC_PKE_CRYPTOCONTEXT_H_ */
// @file cryptocontext.h -- Control for encryption operations. // @author TPOC: [email protected] // // @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT)) // All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. THIS SOFTWARE IS // PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO // EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef SRC_PKE_CRYPTOCONTEXT_H_ #define SRC_PKE_CRYPTOCONTEXT_H_ #include <map> #include <memory> #include <string> #include <utility> #include <vector> #include "palisade.h" #include "scheme/allscheme.h" #include "cryptocontexthelper.h" #include "cryptotiming.h" #include "utils/caller_info.h" #include "utils/serial.h" #include "utils/serialize-binary.h" #include "utils/serialize-json.h" namespace lbcrypto { template <typename Element> class CryptoContextFactory; template <typename Element> class CryptoContextImpl; template <typename Element> using CryptoContext = shared_ptr<CryptoContextImpl<Element>>; /** * @brief CryptoContextImpl * * A CryptoContextImpl is the object used to access the PALISADE library * * All PALISADE functionality is accessed by way of an instance of a * CryptoContextImpl; we say that various objects are "created in" a context, * and can only be used in the context in which they were created * * All PALISADE methods are accessed through CryptoContextImpl methods. Guards * are implemented to make certain that only valid objects that have been * created in the context are used * * Contexts are created using the CryptoContextFactory, and can be serialized * and recovered from a serialization */ template <typename Element> class CryptoContextImpl : public Serializable { using IntType = typename Element::Integer; using ParmType = typename Element::Params; friend class CryptoContextFactory<Element>; protected: // crypto parameters used for this context shared_ptr<LPCryptoParameters<Element>> params; // algorithm used; accesses all crypto methods shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme; static std::map<string, std::vector<LPEvalKey<Element>>>& evalMultKeyMap() { // cached evalmult keys, by secret key UID static std::map<string, std::vector<LPEvalKey<Element>>> s_evalMultKeyMap; return s_evalMultKeyMap; } static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>& evalSumKeyMap() { // cached evalsum keys, by secret key UID static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> s_evalSumKeyMap; return s_evalSumKeyMap; } static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>& evalAutomorphismKeyMap() { // cached evalautomorphism keys, by secret key UID static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> s_evalAutomorphismKeyMap; return s_evalAutomorphismKeyMap; } bool doTiming; vector<TimingInfo>* timeSamples; string m_schemeId; size_t m_keyGenLevel; /** * TypeCheck makes sure that an operation between two ciphertexts is permitted * @param a * @param b */ void TypeCheck(ConstCiphertext<Element> a, ConstCiphertext<Element> b, CALLER_INFO_ARGS_HDR) const { if (a == nullptr || b == nullptr) { std::string errorMsg(std::string("Null Ciphertext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext() != b->GetCryptoContext()) { std::string errorMsg( std::string( "Ciphertexts were not created in the same CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetKeyTag() != b->GetKeyTag()) { std::string errorMsg( std::string("Ciphertexts were not encrypted with same keys") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetEncodingType() != b->GetEncodingType()) { std::stringstream ss; ss << "Ciphertext encoding types " << a->GetEncodingType(); ss << " and " << b->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } /** * TypeCheck makes sure that an operation between two ciphertexts is permitted * This is intended for mutable methods, hence inputs are Ciphretext instead * of ConstCiphertext. * * @param a * @param b */ /* void TypeCheck(Ciphertext<Element> a, Ciphertext<Element> b, CALLER_INFO_ARGS_HDR) const { if (a == nullptr || b == nullptr) { std::string errorMsg(std::string("Null Ciphertext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext() != b->GetCryptoContext()) { std::string errorMsg( std::string("Ciphertexts were not created in the same CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetKeyTag() != b->GetKeyTag()) { std::string errorMsg( std::string("Ciphertexts were not encrypted with same keys") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetEncodingType() != b->GetEncodingType()) { std::stringstream ss; ss << "Ciphertext encoding types " << a->GetEncodingType(); ss << " and " << b->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } */ /** * TypeCheck makes sure that an operation between a ciphertext and a plaintext * is permitted * @param a * @param b */ void TypeCheck(ConstCiphertext<Element> a, ConstPlaintext b, CALLER_INFO_ARGS_HDR) const { if (a == nullptr) { std::string errorMsg(std::string("Null Ciphertext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (b == nullptr) { std::string errorMsg(std::string("Null Plaintext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetEncodingType() != b->GetEncodingType()) { std::stringstream ss; ss << "Ciphertext encoding type " << a->GetEncodingType(); ss << " and Plaintext encoding type " << b->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } /** * TypeCheck makes sure that an operation between two ciphertexts is permitted * @param a * @param b */ void TypeCheck(const RationalCiphertext<Element>& a, const RationalCiphertext<Element>& b, CALLER_INFO_ARGS_HDR) const { if (a.GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetCryptoContext() != b.GetCryptoContext()) { std::string errorMsg( std::string( "Ciphertexts were not created in the same CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetKeyTag() != b.GetKeyTag()) { std::string errorMsg( std::string("Ciphertexts were not encrypted with same keys") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetNumerator()->GetEncodingType() != b.GetNumerator()->GetEncodingType()) { std::stringstream ss; ss << "RationalCiphertext encoding types " << a.GetNumerator()->GetEncodingType(); ss << " and " << b.GetNumerator()->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } /** * TypeCheck makes sure that an operation between a ciphertext and a plaintext * is permitted * @param a * @param b */ void TypeCheck(const RationalCiphertext<Element>& a, ConstPlaintext b, CALLER_INFO_ARGS_HDR) const { if (b == nullptr) { std::string errorMsg(std::string("Null Plaintext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetNumerator()->GetEncodingType() != b->GetEncodingType()) { std::stringstream ss; ss << "RationalCiphertext encoding type " << a.GetNumerator()->GetEncodingType(); ss << " and Plaintext encoding type " << b->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } bool Mismatched(const CryptoContext<Element> a) const { if (a.get() != this) { return true; } return false; } public: LPPrivateKey<Element> privateKey; /** * This stores the private key in the crypto context. * This is only intended for debugging and should not be * used in production systems. Please define DEBUG_KEY in * palisade.h to enable this. * * If used, one can create a key pair and store the secret * key in th crypto context like this: * * auto keys = cc->KeyGen(); * cc->SetPrivateKey(keys.secretKey); * * After that, anyone in the code, one can access the * secret key by getting the crypto context and doing the * following: * * auto sk = cc->GetPrivateKey(); * * This key can be used for decrypting any intermediate * ciphertexts for debugging purposes. * * @param sk the secret key * */ void SetPrivateKey(const LPPrivateKey<Element> sk) { #ifdef DEBUG_KEY cerr << "Warning - SetPrivateKey is only intended to be used for debugging " "purposes - not for production systems." << endl; this->privateKey = sk; #else PALISADE_THROW( not_available_error, "SetPrivateKey is only allowed if DEBUG_KEY is set in palisade.h"); #endif } /** * This gets the private key from the crypto context. * This is only intended for debugging and should not be * used in production systems. Please define DEBUG_KEY in * palisade.h to enable this. * * If used, one can create a key pair and store the secret * key in th crypto context like this: * * auto keys = cc->KeyGen(); * cc->SetPrivateKey(keys.secretKey); * * After that, anyone in the code, one can access the * secret key by getting the crypto context and doing the * following: * * auto sk = cc->GetPrivateKey(); * * This key can be used for decrypting any intermediate * ciphertexts for debugging purposes. * * @return the secret key * */ const LPPrivateKey<Element> GetPrivateKey() { #ifdef DEBUG_KEY return this->privateKey; #else PALISADE_THROW( not_available_error, "GetPrivateKey is only allowed if DEBUG_KEY is set in palisade.h"); #endif } void setSchemeId(string schemeTag) { this->m_schemeId = schemeTag; } string getSchemeId() const { return this->m_schemeId; } /** * CryptoContextImpl constructor from pointers to parameters and scheme * @param params - pointer to CryptoParameters * @param scheme - pointer to Crypto Scheme */ CryptoContextImpl(LPCryptoParameters<Element>* params = nullptr, LPPublicKeyEncryptionScheme<Element>* scheme = nullptr, const string& schemeId = "Not") { this->params.reset(params); this->scheme.reset(scheme); this->doTiming = false; this->timeSamples = 0; this->m_keyGenLevel = 0; this->m_schemeId = schemeId; } /** * CryptoContextImpl constructor from shared pointers to parameters and scheme * @param params - shared pointer to CryptoParameters * @param scheme - sharedpointer to Crypto Scheme */ CryptoContextImpl(shared_ptr<LPCryptoParameters<Element>> params, shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme, const string& schemeId = "Not") { this->params = params; this->scheme = scheme; this->doTiming = false; this->timeSamples = 0; this->m_keyGenLevel = 0; this->m_schemeId = schemeId; } /** * Copy constructor * @param c - source */ CryptoContextImpl(const CryptoContextImpl<Element>& c) { params = c.params; scheme = c.scheme; doTiming = c.doTiming; timeSamples = c.timeSamples; this->m_keyGenLevel = 0; this->m_schemeId = c.m_schemeId; } /** * Assignment * @param rhs - assigning from * @return this */ CryptoContextImpl<Element>& operator=(const CryptoContextImpl<Element>& rhs) { params = rhs.params; scheme = rhs.scheme; doTiming = rhs.doTiming; timeSamples = rhs.timeSamples; m_keyGenLevel = rhs.m_keyGenLevel; m_schemeId = rhs.m_schemeId; return *this; } /** * A CryptoContextImpl is only valid if the shared pointers are both valid */ operator bool() const { return params && scheme; } /** * Private methods to compare two contexts; this is only used internally and * is not generally available * @param a - operand 1 * @param b - operand 2 * @return true if the implementations have identical parms and scheme */ friend bool operator==(const CryptoContextImpl<Element>& a, const CryptoContextImpl<Element>& b) { // Identical if the parameters and the schemes are identical... the exact // same object, OR the same type and the same values if (a.params.get() == b.params.get()) { return true; } else { if (typeid(*a.params.get()) != typeid(*b.params.get())) { return false; } if (*a.params.get() != *b.params.get()) return false; } if (a.scheme.get() == b.scheme.get()) { return true; } else { if (typeid(*a.scheme.get()) != typeid(*b.scheme.get())) { return false; } if (*a.scheme.get() != *b.scheme.get()) return false; } return true; } friend bool operator!=(const CryptoContextImpl<Element>& a, const CryptoContextImpl<Element>& b) { return !(a == b); } // TIMING METHODS /** * StartTiming method activates timing of CryptoMethods * * @param timeSamples points to a vector in which timing samples will be * stored */ void StartTiming(vector<TimingInfo>* timeSamples) { this->timeSamples = timeSamples; doTiming = true; } /* * StopTiming - turns off timing */ void StopTiming() { doTiming = false; } /** * ResumeTiming - re-enables timing with existing TimingInfo vector */ void ResumeTiming() { doTiming = true; } /** * ResetTiming - erases measurements */ void ResetTiming() { this->timeSamples->clear(); } static bool SerializeEvalMultKey(Serialized* serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalMultKey(Serialized* serObj, const string& id) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalMultKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool DeserializeEvalMultKey(Serialized* serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * SerializeEvalMultKey for a single EvalMult key or all EvalMult keys * * @param ser - stream to serialize to * @param sertype - type of serialization * @param id for key to serialize - if empty string, serialize them all * @return true on success */ template <typename ST> static bool SerializeEvalMultKey(std::ostream& ser, const ST& sertype, string id = ""); /** * SerializeEvalMultKey for all EvalMultKeys made in a given context * * @param cc whose keys should be serialized * @param ser - stream to serialize to * @param sertype - type of serialization * @return true on success (false on failure or no keys found) */ template <typename ST> static bool SerializeEvalMultKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) { std::map<string, std::vector<LPEvalKey<Element>>> omap; for (const auto& k : GetAllEvalMultKeys()) { if (k.second[0]->GetCryptoContext() == cc) { omap[k.first] = k.second; } } if (omap.size() == 0) return false; Serial::Serialize(omap, ser, sertype); return true; } /** * DeserializeEvalMultKey deserialize all keys in the serialization * deserialized keys silently replace any existing matching keys * deserialization will create CryptoContextImpl if necessary * * @param serObj - stream with a serialization * @return true on success */ template <typename ST> static bool DeserializeEvalMultKey(std::istream& ser, const ST& sertype) { std::map<string, std::vector<LPEvalKey<Element>>> evalMultKeys; Serial::Deserialize(GetAllEvalMultKeys(), ser, sertype); // The deserialize call created any contexts that needed to be created.... // so all we need to do is put the keys into the maps for their context for (auto k : GetAllEvalMultKeys()) { GetAllEvalMultKeys()[k.first] = k.second; } return true; } /** * ClearEvalMultKeys - flush EvalMultKey cache */ static void ClearEvalMultKeys(); /** * ClearEvalMultKeys - flush EvalMultKey cache for a given id * @param id */ static void ClearEvalMultKeys(const string& id); /** * ClearEvalMultKeys - flush EvalMultKey cache for a given context * @param cc */ static void ClearEvalMultKeys(const CryptoContext<Element> cc); /** * InsertEvalMultKey - add the given vector of keys to the map, replacing the * existing vector if there * @param vectorToInsert */ static void InsertEvalMultKey( const std::vector<LPEvalKey<Element>>& vectorToInsert); static bool SerializeEvalSumKey(Serialized* serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalSumKey(Serialized* serObj, const string& id) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalSumKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool DeserializeEvalSumKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * SerializeEvalSumKey for a single EvalSum key or all of the EvalSum keys * * @param ser - stream to serialize to * @param sertype - type of serialization * @param id - key to serialize; empty string means all keys * @return true on success */ template <typename ST> static bool SerializeEvalSumKey(std::ostream& ser, const ST& sertype, string id = "") { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>* smap; std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> omap; if (id.length() == 0) { smap = &GetAllEvalSumKeys(); } else { auto k = GetAllEvalSumKeys().find(id); if (k == GetAllEvalSumKeys().end()) return false; // no such id smap = &omap; omap[k->first] = k->second; } Serial::Serialize(*smap, ser, sertype); return true; } /** * SerializeEvalSumKey for all of the EvalSum keys for a context * * @param ser - stream to serialize to * @param sertype - type of serialization * @param cc - context * @return true on success */ template <typename ST> static bool SerializeEvalSumKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> omap; for (const auto& k : GetAllEvalSumKeys()) { if (k.second->begin()->second->GetCryptoContext() == cc) { omap[k.first] = k.second; } } if (omap.size() == 0) return false; Serial::Serialize(omap, ser, sertype); return true; } /** * DeserializeEvalSumKey deserialize all keys in the serialization * deserialized keys silently replace any existing matching keys * deserialization will create CryptoContextImpl if necessary * * @param ser - stream to serialize from * @param sertype - type of serialization * @return true on success */ template <typename ST> static bool DeserializeEvalSumKey(std::istream& ser, const ST& sertype) { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> evalSumKeys; Serial::Deserialize(evalSumKeys, ser, sertype); // The deserialize call created any contexts that needed to be created.... // so all we need to do is put the keys into the maps for their context for (auto k : evalSumKeys) { GetAllEvalSumKeys()[k.first] = k.second; } return true; } /** * ClearEvalSumKeys - flush EvalSumKey cache */ static void ClearEvalSumKeys(); /** * ClearEvalSumKeys - flush EvalSumKey cache for a given id * @param id */ static void ClearEvalSumKeys(const string& id); /** * ClearEvalSumKeys - flush EvalSumKey cache for a given context * @param cc */ static void ClearEvalSumKeys(const CryptoContext<Element> cc); /** * InsertEvalSumKey - add the given map of keys to the map, replacing the * existing map if there * @param mapToInsert */ static void InsertEvalSumKey( const shared_ptr<std::map<usint, LPEvalKey<Element>>> mapToInsert); static bool SerializeEvalAutomorphismKey(Serialized* serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalAutomorphismKey(Serialized* serObj, const string& id) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalAutomorphismKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool DeserializeEvalAutomorphismKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * SerializeEvalAutomorphismKey for a single EvalAuto key or all of the * EvalAuto keys * * @param ser - stream to serialize to * @param sertype - type of serialization * @param id - key to serialize; empty string means all keys * @return true on success */ template <typename ST> static bool SerializeEvalAutomorphismKey(std::ostream& ser, const ST& sertype, string id = "") { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>* smap; std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> omap; if (id.length() == 0) { smap = &GetAllEvalAutomorphismKeys(); } else { auto k = GetAllEvalAutomorphismKeys().find(id); if (k == GetAllEvalAutomorphismKeys().end()) return false; // no such id smap = &omap; omap[k->first] = k->second; } Serial::Serialize(*smap, ser, sertype); return true; } /** * SerializeEvalAutomorphismKey for all of the EvalAuto keys for a context * * @param ser - stream to serialize to * @param sertype - type of serialization * @param cc - context * @return true on success */ template <typename ST> static bool SerializeEvalAutomorphismKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> omap; for (const auto& k : GetAllEvalAutomorphismKeys()) { if (k.second->begin()->second->GetCryptoContext() == cc) { omap[k.first] = k.second; } } if (omap.size() == 0) return false; Serial::Serialize(omap, ser, sertype); return true; } /** * DeserializeEvalAutomorphismKey deserialize all keys in the serialization * deserialized keys silently replace any existing matching keys * deserialization will create CryptoContextImpl if necessary * * @param ser - stream to serialize from * @param sertype - type of serialization * @return true on success */ template <typename ST> static bool DeserializeEvalAutomorphismKey(std::istream& ser, const ST& sertype) { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> evalSumKeys; Serial::Deserialize(evalSumKeys, ser, sertype); // The deserialize call created any contexts that needed to be created.... // so all we need to do is put the keys into the maps for their context for (auto k : evalSumKeys) { GetAllEvalAutomorphismKeys()[k.first] = k.second; } return true; } /** * ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache */ static void ClearEvalAutomorphismKeys(); /** * ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache for a given id * @param id */ static void ClearEvalAutomorphismKeys(const string& id); /** * ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache for a given * context * @param cc */ static void ClearEvalAutomorphismKeys(const CryptoContext<Element> cc); /** * InsertEvalAutomorphismKey - add the given map of keys to the map, replacing * the existing map if there * @param mapToInsert */ static void InsertEvalAutomorphismKey( const shared_ptr<std::map<usint, LPEvalKey<Element>>> mapToInsert); // TURN FEATURES ON /** * Enable a particular feature for use with this CryptoContextImpl * @param feature - the feature that should be enabled */ void Enable(PKESchemeFeature feature) { scheme->Enable(feature); } /** * Enable several features at once * @param featureMask - bitwise or of several PKESchemeFeatures */ void Enable(usint featureMask) { scheme->Enable(featureMask); } // GETTERS /** * Getter for Scheme * @return scheme */ const shared_ptr<LPPublicKeyEncryptionScheme<Element>> GetEncryptionAlgorithm() const { return scheme; } /** * Getter for CryptoParams * @return params */ const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return params; } size_t GetKeyGenLevel() const { return m_keyGenLevel; } void SetKeyGenLevel(size_t level) { m_keyGenLevel = level; } /** * Getter for element params * @return */ const shared_ptr<ParmType> GetElementParams() const { return params->GetElementParams(); } /** * Getter for encoding params * @return */ const EncodingParams GetEncodingParams() const { return params->GetEncodingParams(); } /** * Get the cyclotomic order used for this context * * @return */ usint GetCyclotomicOrder() const { return params->GetElementParams()->GetCyclotomicOrder(); } /** * Get the ring dimension used for this context * * @return */ usint GetRingDimension() const { return params->GetElementParams()->GetRingDimension(); } /** * Get the ciphertext modulus used for this context * * @return */ const IntType& GetModulus() const { return params->GetElementParams()->GetModulus(); } /** * Get the ciphertext modulus used for this context * * @return */ const IntType& GetRootOfUnity() const { return params->GetElementParams()->GetRootOfUnity(); } /** * KeyGen generates a key pair using this algorithm's KeyGen method * @return a public/secret key pair */ LPKeyPair<Element> KeyGen() { TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->KeyGen( CryptoContextFactory<Element>::GetContextForPointer(this), false); if (doTiming) { timeSamples->push_back(TimingInfo(OpKeyGen, TOC_US(t))); } return r; } /** * Threshold FHE: Generation of a public key derived * from a previous joined public key (for prior secret shares) and the secret * key share of the current party. * * @param pk joined public key from prior parties. * @param makeSparse set to true if ring reduce by a factor of 2 is to be * used. NOT SUPPORTED BY ANY SCHEME ANYMORE. * @param fresh set to true if proxy re-encryption is used in the multi-party * protocol or star topology is used * @return key pair including the secret share for the current party and * joined public key */ LPKeyPair<Element> MultipartyKeyGen(const LPPublicKey<Element> pk, bool makeSparse = false, bool fresh = false) { if (!pk) PALISADE_THROW(config_error, "Input public key is empty"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultipartyKeyGen( CryptoContextFactory<Element>::GetContextForPointer(this), pk, makeSparse, fresh); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiPartyKeyGenKey, TOC_US(t))); } return r; } /** * Threshold FHE: Generates a public key from a vector of secret shares. * ONLY FOR DEBUGGIN PURPOSES. SHOULD NOT BE USED IN PRODUCTION. * * @param secretkeys secrete key shares. * @return key pair including the private for the current party and joined * public key */ LPKeyPair<Element> MultipartyKeyGen( const vector<LPPrivateKey<Element>>& secretKeys) { if (!secretKeys.size()) PALISADE_THROW(config_error, "Input private key vector is empty"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultipartyKeyGen( CryptoContextFactory<Element>::GetContextForPointer(this), secretKeys, false); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiPartyKeyGenKeyvec, TOC_US(t))); } return r; } /** * Threshold FHE: Method for decryption operation run by the lead decryption * client * * @param privateKey secret key share used for decryption. * @param ciphertext ciphertext id decrypted. */ vector<Ciphertext<Element>> MultipartyDecryptLead( const LPPrivateKey<Element> privateKey, const vector<Ciphertext<Element>>& ciphertext) const { if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to MultipartyDecryptLead was not " "generated with this crypto context"); vector<Ciphertext<Element>> newCiphertext; TimeVar t; if (doTiming) TIC(t); for (size_t i = 0; i < ciphertext.size(); i++) { if (ciphertext[i] == nullptr || Mismatched(ciphertext[i]->GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to MultipartyDecryptLead was not " "generated with this crypto context"); newCiphertext.push_back(GetEncryptionAlgorithm()->MultipartyDecryptLead( privateKey, ciphertext[i])); } if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiPartyDecryptLead, TOC_US(t))); } return newCiphertext; } /** * Threshold FHE: "Partial" decryption computed by all parties except for the * lead one * * @param privateKey secret key share used for decryption. * @param ciphertext ciphertext that is being decrypted. */ vector<Ciphertext<Element>> MultipartyDecryptMain( const LPPrivateKey<Element> privateKey, const vector<Ciphertext<Element>>& ciphertext) const { if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to MultipartyDecryptMain was not " "generated with this crypto context"); vector<Ciphertext<Element>> newCiphertext; TimeVar t; if (doTiming) TIC(t); for (size_t i = 0; i < ciphertext.size(); i++) { if (ciphertext[i] == nullptr || Mismatched(ciphertext[i]->GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to MultipartyDecryptMain was not " "generated with this crypto context"); newCiphertext.push_back(GetEncryptionAlgorithm()->MultipartyDecryptMain( privateKey, ciphertext[i])); } if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiPartyDecryptMain, TOC_US(t))); } return newCiphertext; } /** * Threshold FHE: Method for combining the partially decrypted ciphertexts * and getting the final decryption in the clear. * * @param &partialCiphertextVec vector of "partial" decryptions. * @param *plaintext the plaintext output. * @return the decoding result. */ DecryptResult MultipartyDecryptFusion( const vector<Ciphertext<Element>>& partialCiphertextVec, Plaintext* plaintext) const; /** * Threshold FHE: Generates a joined evaluation key * from the current secret share and a prior joined * evaluation key * * @param originalPrivateKey secret key transformed from. * @param newPrivateKey secret key transformed to. * @param ek the prior joined evaluation key. * @return the new joined evaluation key. */ LPEvalKey<Element> MultiKeySwitchGen( const LPPrivateKey<Element> originalPrivateKey, const LPPrivateKey<Element> newPrivateKey, const LPEvalKey<Element> ek) const { if (!originalPrivateKey) PALISADE_THROW(config_error, "Input first private key is nullptr"); if (!newPrivateKey) PALISADE_THROW(config_error, "Input second private key is nullptr"); if (!ek) PALISADE_THROW(config_error, "Input evaluation key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiKeySwitchGen(originalPrivateKey, newPrivateKey, ek); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiKeySwitchGen, TOC_US(t))); } return r; } /** * Threshold FHE: Generates joined automorphism keys * from the current secret share and prior joined * automorphism keys * * @param privateKey secret key share. * @param eAuto a dictionary with prior joined automorphism keys. * @param &indexList a vector of automorphism indices. * @param keyId - new key identifier used for the resulting evaluation key * @return a dictionary with new joined automorphism keys. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiEvalAutomorphismKeyGen( const LPPrivateKey<Element> privateKey, const shared_ptr<std::map<usint, LPEvalKey<Element>>> eAuto, const std::vector<usint>& indexList, const std::string& keyId = "") { if (!privateKey) PALISADE_THROW(config_error, "Input private key is nullptr"); if (!eAuto) PALISADE_THROW(config_error, "Input evaluation key map is nullptr"); if (!indexList.size()) PALISADE_THROW(config_error, "Input index vector is empty"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiEvalAutomorphismKeyGen( privateKey, eAuto, indexList, keyId); if (doTiming) { timeSamples->push_back( TimingInfo(OpMultiEvalAutomorphismKeyGen, TOC_US(t))); } return r; } /** * Threshold FHE: Generates joined rotation keys * from the current secret share and prior joined * rotation keys * * @param privateKey secret key share. * @param eAuto a dictionary with prior joined rotation keys. * @param &indexList a vector of rotation indices. * @param keyId - new key identifier used for the resulting evaluation key * @return a dictionary with new joined rotation keys. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiEvalAtIndexKeyGen( const LPPrivateKey<Element> privateKey, const shared_ptr<std::map<usint, LPEvalKey<Element>>> eAuto, const std::vector<int32_t>& indexList, const std::string& keyId = "") { if (!privateKey) PALISADE_THROW(config_error, "Input private key is nullptr"); if (!eAuto) PALISADE_THROW(config_error, "Input evaluation key map is nullptr"); if (!indexList.size()) PALISADE_THROW(config_error, "Input index vector is empty"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiEvalAtIndexKeyGen(privateKey, eAuto, indexList, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiEvalAtIndexKeyGen, TOC_US(t))); } return r; } /** * Threshold FHE: Generates joined summation evaluation keys * from the current secret share and prior joined * summation keys * * @param privateKey secret key share. * @param eSum a dictionary with prior joined summation keys. * @param keyId - new key identifier used for the resulting evaluation key * @return new joined summation keys. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiEvalSumKeyGen( const LPPrivateKey<Element> privateKey, const shared_ptr<std::map<usint, LPEvalKey<Element>>> eSum, const std::string& keyId = "") { if (!privateKey) PALISADE_THROW(config_error, "Input private key is nullptr"); if (!eSum) PALISADE_THROW(config_error, "Input evaluation key map is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiEvalSumKeyGen(privateKey, eSum, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiEvalSumKeyGen, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two prior evaluation keys * * @param a first evaluation key. * @param b second evaluation key. * @param keyId - new key identifier used for the resulting evaluation key * @return the new joined key. */ LPEvalKey<Element> MultiAddEvalKeys(LPEvalKey<Element> a, LPEvalKey<Element> b, const std::string& keyId = "") { if (!a) PALISADE_THROW(config_error, "Input first evaluation key is nullptr"); if (!b) PALISADE_THROW(config_error, "Input second evaluation key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddEvalKeys(a, b, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiAddEvalKeys, TOC_US(t))); } return r; } /** * Threshold FHE: Generates a partial evaluation key for homomorphic * multiplication based on the current secret share and an existing partial * evaluation key * * @param evalKey prior evaluation key. * @param sk current secret share. * @param keyId - new key identifier used for the resulting evaluation key * @return the new joined key. */ LPEvalKey<Element> MultiMultEvalKey(LPEvalKey<Element> evalKey, LPPrivateKey<Element> sk, const std::string& keyId = "") { if (!evalKey) PALISADE_THROW(config_error, "Input evaluation key is nullptr"); if (!sk) PALISADE_THROW(config_error, "Input private key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiMultEvalKey(evalKey, sk, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiMultEvalKey, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two prior evaluation key sets for summation * * @param es1 first summation key set. * @param es2 second summation key set. * @param keyId - new key identifier used for the resulting evaluation key * @return the new joined key set for summation. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiAddEvalSumKeys( const shared_ptr<std::map<usint, LPEvalKey<Element>>> es1, const shared_ptr<std::map<usint, LPEvalKey<Element>>> es2, const std::string& keyId = "") { if (!es1) PALISADE_THROW(config_error, "Input first evaluation key map is nullptr"); if (!es2) PALISADE_THROW(config_error, "Input second evaluation key map is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddEvalSumKeys(es1, es2, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiAddEvalSumKeys, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two prior evaluation key sets for automorphisms * * @param es1 first automorphism key set. * @param es2 second automorphism key set. * @param keyId - new key identifier used for the resulting evaluation key. * @return the new joined key set for summation. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiAddEvalAutomorphismKeys( const shared_ptr<std::map<usint, LPEvalKey<Element>>> es1, const shared_ptr<std::map<usint, LPEvalKey<Element>>> es2, const std::string& keyId = "") { if (!es1) PALISADE_THROW(config_error, "Input first evaluation key map is nullptr"); if (!es2) PALISADE_THROW(config_error, "Input second evaluation key map is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddEvalAutomorphismKeys(es1, es2, keyId); if (doTiming) { timeSamples->push_back( TimingInfo(OpMultiAddEvalAutomorphismKeys, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two partial public keys * * @param pubKey1 first public key. * @param pubKey2 second public key. * @param keyId - new key identifier used for the resulting evaluation key. * @return the new joined key. */ LPPublicKey<Element> MultiAddPubKeys(LPPublicKey<Element> pubKey1, LPPublicKey<Element> pubKey2, const std::string& keyId = "") { if (!pubKey1) PALISADE_THROW(config_error, "Input first public key is nullptr"); if (!pubKey2) PALISADE_THROW(config_error, "Input second public key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddPubKeys(pubKey1, pubKey2, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiAddPubKeys, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two partial evaluation keys for multiplication * * @param evalKey1 first evaluation key. * @param evalKey2 second evaluation key. * @param keyId - new key identifier used for the resulting evaluation key. * @return the new joined key. */ LPEvalKey<Element> MultiAddEvalMultKeys(LPEvalKey<Element> evalKey1, LPEvalKey<Element> evalKey2, const std::string& keyId = "") { if (!evalKey1) PALISADE_THROW(config_error, "Input first evaluation key is nullptr"); if (!evalKey2) PALISADE_THROW(config_error, "Input second evaluation key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddEvalMultKeys(evalKey1, evalKey2, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiAddEvalMultKeys, TOC_US(t))); } return r; } /** * SparseKeyGen generates a key pair with special structure, and without full * entropy, for use in special cases like Ring Reduction * @return a public/secret key pair */ LPKeyPair<Element> SparseKeyGen() { TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->KeyGen( CryptoContextFactory<Element>::GetContextForPointer(this), true); if (doTiming) { timeSamples->push_back(TimingInfo(OpSparseKeyGen, TOC_US(t))); } return r; } /** * ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption * @param newKey (public) * @param oldKey (private) * @return new evaluation key */ LPEvalKey<Element> ReKeyGen(const LPPublicKey<Element> newKey, const LPPrivateKey<Element> oldKey) const { if (newKey == nullptr || oldKey == nullptr || Mismatched(newKey->GetCryptoContext()) || Mismatched(oldKey->GetCryptoContext())) PALISADE_THROW(config_error, "Keys passed to ReKeyGen were not generated with this " "crypto context"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->ReKeyGen(newKey, oldKey); if (doTiming) { timeSamples->push_back(TimingInfo(OpReKeyGenPubPri, TOC_US(t))); } return r; } /** * ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption * NOTE this functionality has been completely removed from PALISADE * @param newKey (private) * @param oldKey (private) * @return new evaluation key */ LPEvalKey<Element> ReKeyGen(const LPPrivateKey<Element> newKey, const LPPrivateKey<Element> oldKey) const __attribute__((deprecated("functionality removed from PALISADE"))); /** * EvalMultKeyGen creates a key that can be used with the PALISADE EvalMult * operator * the new evaluation key is stored in cryptocontext * @param key */ void EvalMultKeyGen(const LPPrivateKey<Element> key); /** * EvalMultsKeyGen creates a vector evalmult keys that can be used with the * PALISADE EvalMult operator 1st key (for s^2) is used for multiplication of * ciphertexts of depth 1 2nd key (for s^3) is used for multiplication of * ciphertexts of depth 2, etc. * a vector of new evaluation keys is stored in crytpocontext * * @param key */ void EvalMultKeysGen(const LPPrivateKey<Element> key); /** * GetEvalMultKeyVector fetches the eval mult keys for a given KeyID * @param keyID * @return key vector from ID */ static const vector<LPEvalKey<Element>>& GetEvalMultKeyVector( const string& keyID); /** * GetEvalMultKeys * @return map of all the keys */ static std::map<string, std::vector<LPEvalKey<Element>>>& GetAllEvalMultKeys(); /** * KeySwitchGen creates a key that can be used with the PALISADE KeySwitch * operation * @param key1 * @param key2 * @return new evaluation key */ LPEvalKey<Element> KeySwitchGen(const LPPrivateKey<Element> key1, const LPPrivateKey<Element> key2) const { if (key1 == nullptr || key2 == nullptr || Mismatched(key1->GetCryptoContext()) || Mismatched(key2->GetCryptoContext())) PALISADE_THROW(config_error, "Keys passed to KeySwitchGen were not generated with this " "crypto context"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->KeySwitchGen(key1, key2); if (doTiming) { timeSamples->push_back(TimingInfo(OpKeySwitchGen, TOC_US(t))); } return r; } /** * Encrypt a plaintext using a given public key * @param publicKey * @param plaintext * @return ciphertext (or null on failure) */ Ciphertext<Element> Encrypt(const LPPublicKey<Element> publicKey, Plaintext plaintext) { if (publicKey == nullptr) PALISADE_THROW(type_error, "null key passed to Encrypt"); if (plaintext == nullptr) PALISADE_THROW(type_error, "Input plaintext is nullptr"); if (Mismatched(publicKey->GetCryptoContext())) PALISADE_THROW( config_error, "key passed to Encrypt was not generated with this crypto context"); TimeVar t; if (doTiming) TIC(t); Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt( publicKey, plaintext->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType(plaintext->GetEncodingType()); ciphertext->SetScalingFactor(plaintext->GetScalingFactor()); ciphertext->SetDepth(plaintext->GetDepth()); ciphertext->SetLevel(plaintext->GetLevel()); } if (doTiming) { timeSamples->push_back(TimingInfo(OpEncryptPub, TOC_US(t))); } return ciphertext; } /** * Encrypt a plaintext using a given private key * @param privateKey * @param plaintext * @return ciphertext (or null on failure) */ Ciphertext<Element> Encrypt(const LPPrivateKey<Element> privateKey, Plaintext plaintext) const { if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW( config_error, "key passed to Encrypt was not generated with this crypto context"); if (plaintext == nullptr) PALISADE_THROW(type_error, "Input plaintext is nullptr"); TimeVar t; if (doTiming) TIC(t); Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt( privateKey, plaintext->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType(plaintext->GetEncodingType()); ciphertext->SetScalingFactor(plaintext->GetScalingFactor()); ciphertext->SetDepth(plaintext->GetDepth()); ciphertext->SetLevel(plaintext->GetLevel()); } if (doTiming) { timeSamples->push_back(TimingInfo(OpEncryptPriv, TOC_US(t))); } return ciphertext; } /** * Encrypt a matrix of Plaintext * @param publicKey - for encryption * @param plaintext - to encrypt * @param doEncryption encrypts if true, embeds (encodes) the plaintext into * cryptocontext if false * @return a vector of pointers to Ciphertexts created by encrypting the * plaintext */ shared_ptr<Matrix<RationalCiphertext<Element>>> EncryptMatrix( const LPPublicKey<Element> publicKey, Matrix<Plaintext>& plaintext) { if (publicKey == nullptr || Mismatched(publicKey->GetCryptoContext())) PALISADE_THROW(config_error, "key passed to EncryptMatrix was not generated with this " "crypto context"); auto zeroAlloc = [=]() { return RationalCiphertext<Element>(publicKey->GetCryptoContext(), true); }; auto cipherResults = std::make_shared<Matrix<RationalCiphertext<Element>>>( zeroAlloc, plaintext.GetRows(), plaintext.GetCols()); TimeVar t; if (doTiming) TIC(t); for (size_t row = 0; row < plaintext.GetRows(); row++) { for (size_t col = 0; col < plaintext.GetCols(); col++) { if (plaintext(row, col)->Encode() == false) return 0; Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt( publicKey, plaintext(row, col)->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType(plaintext(row, col)->GetEncodingType()); } (*cipherResults)(row, col).SetNumerator(ciphertext); } } if (doTiming) { timeSamples->push_back(TimingInfo(OpEncryptMatrixPlain, TOC_US(t))); } return cipherResults; } /** * Encrypt a matrix of Plaintext * @param publicKey - for encryption * @param plaintext - to encrypt * @param doEncryption encrypts if true, embeds (encodes) the plaintext into * cryptocontext if false * @return a vector of pointers to Ciphertexts created by encrypting the * plaintext */ Matrix<Ciphertext<Element>> EncryptMatrixCiphertext( const LPPublicKey<Element> publicKey, Matrix<Plaintext>& plaintext) { if (publicKey == nullptr || Mismatched(publicKey->GetCryptoContext())) PALISADE_THROW(config_error, "key passed to EncryptMatrix was not generated with this " "crypto context"); auto zeroAlloc = [=]() { return Ciphertext<Element>(std::make_shared<CiphertextImpl<Element>>( publicKey->GetCryptoContext())); }; Matrix<Ciphertext<Element>> cipherResults(zeroAlloc, plaintext.GetRows(), plaintext.GetCols()); TimeVar t; if (doTiming) TIC(t); for (size_t row = 0; row < plaintext.GetRows(); row++) { for (size_t col = 0; col < plaintext.GetCols(); col++) { if (plaintext(row, col)->Encode() == false) PALISADE_THROW(math_error, "Plaintext is not encoded"); Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt( publicKey, plaintext(row, col)->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType(plaintext(row, col)->GetEncodingType()); } cipherResults(row, col) = (ciphertext); } } if (doTiming) { timeSamples->push_back(TimingInfo(OpEncryptMatrixPlain, TOC_US(t))); } return cipherResults; } /** * Perform an encryption by reading plaintext from a stream, serializing each * piece of ciphertext, and writing the serializations to an output stream * @param publicKey - the encryption key in use * @param instream - where to read the input from * @param ostream - where to write the serialization to * @param doEncryption encrypts if true, embeds (encodes) the plaintext into * cryptocontext if false * @return */ void EncryptStream(const LPPublicKey<Element> publicKey, std::istream& instream, std::ostream& outstream) const __attribute__(( deprecated("serialization changed, see wiki for details"))); // PLAINTEXT FACTORY METHODS // FIXME to be deprecated in 2.0 /** * MakeScalarPlaintext constructs a ScalarEncoding in this context * @param value * @param isSigned * @return plaintext */ Plaintext MakeScalarPlaintext(int64_t value) const { auto p = PlaintextFactory::MakePlaintext(Scalar, this->GetElementParams(), this->GetEncodingParams(), value); return p; } /** * MakeStringPlaintext constructs a StringEncoding in this context * @param str * @return plaintext */ Plaintext MakeStringPlaintext(const string& str) const { auto p = PlaintextFactory::MakePlaintext(String, this->GetElementParams(), this->GetEncodingParams(), str); return p; } /** * MakeIntegerPlaintext constructs an IntegerEncoding in this context * @param value * @return plaintext */ Plaintext MakeIntegerPlaintext(int64_t value) const { auto p = PlaintextFactory::MakePlaintext(Integer, this->GetElementParams(), this->GetEncodingParams(), value); return p; } /** * MakeIntegerPlaintext constructs a FractionalEncoding in this context * @param value * @param truncatedBits limit on fractional * @return plaintext */ Plaintext MakeFractionalPlaintext(int64_t value, size_t truncatedBits = 0) const { auto p = PlaintextFactory::MakePlaintext( Fractional, this->GetElementParams(), this->GetEncodingParams(), value, truncatedBits); return p; } /** * MakeCoefPackedPlaintext constructs a CoefPackedEncoding in this context * @param value * @return plaintext */ Plaintext MakeCoefPackedPlaintext(const vector<int64_t>& value) const { auto p = PlaintextFactory::MakePlaintext( CoefPacked, this->GetElementParams(), this->GetEncodingParams(), value); return p; } /** * MakePackedPlaintext constructs a PackedEncoding in this context * @param value * @return plaintext */ Plaintext MakePackedPlaintext(const vector<int64_t>& value) const { auto p = PlaintextFactory::MakePlaintext(Packed, this->GetElementParams(), this->GetEncodingParams(), value); return p; } /** * MakePlaintext static that takes a cc and calls the Plaintext Factory * @param encoding * @param cc * @param value * @return */ template <typename Value1> static Plaintext MakePlaintext(PlaintextEncodings encoding, CryptoContext<Element> cc, const Value1& value) { return PlaintextFactory::MakePlaintext(encoding, cc->GetElementParams(), cc->GetEncodingParams(), value); } template <typename Value1, typename Value2> static Plaintext MakePlaintext(PlaintextEncodings encoding, CryptoContext<Element> cc, const Value1& value, const Value2& value2) { return PlaintextFactory::MakePlaintext(encoding, cc->GetElementParams(), cc->GetEncodingParams(), value, value2); } /** * COMPLEX ARITHMETIC IS NOT AVAILABLE STARTING WITH 1.10.6, * AND THIS METHOD BE DEPRECATED. USE THE REAL-NUMBER METHOD INSTEAD. * MakeCKKSPackedPlaintext constructs a CKKSPackedEncoding in this context * from a vector of complex numbers * @param value - input vector * @paran depth - depth used to encode the vector * @param level - level at each the vector will get encrypted * @param params - parameters to be usef for the ciphertext * @return plaintext */ virtual Plaintext MakeCKKSPackedPlaintext( const std::vector<std::complex<double>>& value, size_t depth = 1, uint32_t level = 0, const shared_ptr<ParmType> params = nullptr) const { Plaintext p; const auto cryptoParamsCKKS = std::dynamic_pointer_cast<LPCryptoParametersCKKS<DCRTPoly>>( this->GetCryptoParameters()); double scFact = cryptoParamsCKKS->GetScalingFactorOfLevel(level); if (params == nullptr) { shared_ptr<ILDCRTParams<DCRTPoly::Integer>> elemParamsPtr; if (level != 0) { ILDCRTParams<DCRTPoly::Integer> elemParams = *(cryptoParamsCKKS->GetElementParams()); for (uint32_t i = 0; i < level; i++) { elemParams.PopLastParam(); } elemParamsPtr = std::make_shared<ILDCRTParams<DCRTPoly::Integer>>(elemParams); } else { elemParamsPtr = cryptoParamsCKKS->GetElementParams(); } p = Plaintext(std::make_shared<CKKSPackedEncoding>( elemParamsPtr, this->GetEncodingParams(), value, depth, level, scFact)); } else { p = Plaintext(std::make_shared<CKKSPackedEncoding>( params, this->GetEncodingParams(), value, depth, level, scFact)); } p->Encode(); return p; } /** * MakeCKKSPackedPlaintext constructs a CKKSPackedEncoding in this context * from a vector of real numbers * @param value - input vector * @paran depth - depth used to encode the vector * @param level - level at each the vector will get encrypted * @param params - parameters to be usef for the ciphertext * @return plaintext */ virtual Plaintext MakeCKKSPackedPlaintext( const std::vector<double>& value, size_t depth = 1, uint32_t level = 0, const shared_ptr<ParmType> params = nullptr) const { std::vector<std::complex<double>> complexValue(value.size()); std::transform(value.begin(), value.end(), complexValue.begin(), [](double da) { return std::complex<double>(da); }); return MakeCKKSPackedPlaintext(complexValue, depth, level, params); } /** * GetPlaintextForDecrypt returns a new Plaintext to be used in decryption. * * @param pte Type of plaintext we want to return * @param evp Element parameters * @param ep Encoding parameters * @return plaintext */ static Plaintext GetPlaintextForDecrypt(PlaintextEncodings pte, shared_ptr<ParmType> evp, EncodingParams ep); public: /** * Decrypt a single ciphertext into the appropriate plaintext * * @param privateKey - decryption key * @param ciphertext - ciphertext to decrypt * @param plaintext - resulting plaintext object pointer is here * @return */ DecryptResult Decrypt(const LPPrivateKey<Element> privateKey, ConstCiphertext<Element> ciphertext, Plaintext* plaintext); /** * Decrypt method for a matrix of ciphertexts * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrix( const LPPrivateKey<Element> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, shared_ptr<Matrix<Plaintext>>* numerator, shared_ptr<Matrix<Plaintext>>* denominator) const { if (ciphertext == nullptr) PALISADE_THROW(type_error, "Input ciphertext is nullptr"); if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to DecryptMatrix was not generated " "with this crypto context"); // edge case if ((ciphertext->GetCols() == 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); const Ciphertext<Element> ctN = (*ciphertext)(0, 0).GetNumerator(); // need to build matrices for the result Plaintext ptx = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); auto zeroPackingAlloc = [=]() { return Plaintext(ptx); }; *numerator = std::make_shared<Matrix<Plaintext>>( zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()); *denominator = std::make_shared<Matrix<Plaintext>>( zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()); TimeVar t; if (doTiming) TIC(t); for (size_t row = 0; row < ciphertext->GetRows(); row++) { for (size_t col = 0; col < ciphertext->GetCols(); col++) { if (Mismatched((*ciphertext)(row, col).GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not " "generated with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(row, col).GetNumerator(); // determine which type of plaintext that you need to decrypt into Plaintext decryptedNumerator = GetPlaintextForDecrypt( ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt( privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); if (resultN.isValid == false) return resultN; (**numerator)(row, col) = decryptedNumerator; (**numerator)(row, col)->Decode(); Plaintext decryptedDenominator = GetPlaintextForDecrypt( ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); if ((*ciphertext)(row, col).GetIntegerFlag() == true) { decryptedDenominator->GetElement<Poly>().SetValuesToZero(); decryptedDenominator->GetElement<Poly>().at(0) = 1; } else { const Ciphertext<Element> ctD = (*ciphertext)(row, col).GetDenominator(); DecryptResult resultD = GetEncryptionAlgorithm()->Decrypt( privateKey, ctD, &decryptedDenominator->GetElement<NativePoly>()); if (resultD.isValid == false) return resultD; (**denominator)(row, col) = decryptedDenominator; } (**denominator)(row, col)->Decode(); } } if (doTiming) { timeSamples->push_back(TimingInfo(OpDecryptMatrixPlain, TOC_US(t))); } return DecryptResult( (**numerator)((*numerator)->GetRows() - 1, (*numerator)->GetCols() - 1) ->GetLength()); } /** * Decrypt method for a matrix of ciphertexts * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrixCiphertext( const LPPrivateKey<Element> privateKey, const Matrix<Ciphertext<Element>> ciphertext, Matrix<Plaintext>* numerator) const { if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to DecryptMatrix was not generated " "with this crypto context"); // edge case if ((ciphertext.GetCols() == 0) && (ciphertext.GetRows() == 0)) return DecryptResult(); const Ciphertext<Element> ctN = (ciphertext)(0, 0); // need to build matrices for the result // Plaintext ptx = // GetPlaintextForDecrypt(ctN->GetEncodingType(), // this->GetElementParams(), // this->GetEncodingParams()); // auto zeroPackingAlloc = [=]() { return Plaintext(ptx); }; // numerator = std::make_shared<Matrix<Plaintext>>(zeroPackingAlloc, // ciphertext.GetRows(), // ciphertext.GetCols()); TimeVar t; if (doTiming) TIC(t); for (size_t row = 0; row < ciphertext.GetRows(); row++) { for (size_t col = 0; col < ciphertext.GetCols(); col++) { if (Mismatched((ciphertext(row, col))->GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not " "generated with this crypto context"); const Ciphertext<Element> ctN = (ciphertext)(row, col); // determine which type of plaintext that you need to decrypt into Plaintext decryptedNumerator = GetPlaintextForDecrypt( ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt( privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); if (resultN.isValid == false) return resultN; (*numerator)(row, col) = decryptedNumerator; (*numerator)(row, col)->Decode(); } } if (doTiming) { timeSamples->push_back(TimingInfo(OpDecryptMatrixPlain, TOC_US(t))); } return DecryptResult( (*numerator)(numerator->GetRows() - 1, numerator->GetCols() - 1) ->GetLength()); } /** * Decrypt method for numerators in a matrix of ciphertexts (packed encoding) * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrixNumerator( const LPPrivateKey<Element> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, shared_ptr<Matrix<Plaintext>>* numerator) const { if (ciphertext == nullptr) PALISADE_THROW(type_error, "Input ciphertext is nullptr"); if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to DecryptMatrix was not generated " "with this crypto context"); // edge case if ((ciphertext->GetCols() == 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); TimeVar t; if (doTiming) TIC(t); // force all precomputations to take place in advance if (Mismatched((*ciphertext)(0, 0).GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not generated " "with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(0, 0).GetNumerator(); // need to build a numerator matrix for the result Plaintext ptx = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); auto zeroPackingAlloc = [=]() { return Plaintext(ptx); }; *numerator = std::make_shared<Matrix<Plaintext>>( zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()); Plaintext decryptedNumerator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt( privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); if (resultN.isValid == false) return resultN; (**numerator)(0, 0) = decryptedNumerator; (**numerator)(0, 0)->Decode(); for (size_t row = 0; row < ciphertext->GetRows(); row++) { #pragma omp parallel for for (size_t col = 0; col < ciphertext->GetCols(); col++) { if (row + col > 0) { if (Mismatched((*ciphertext)(row, col).GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not " "generated with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(row, col).GetNumerator(); Plaintext decryptedNumerator = GetPlaintextForDecrypt( ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); GetEncryptionAlgorithm()->Decrypt( privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); (**numerator)(row, col) = decryptedNumerator; (**numerator)(row, col)->Decode(); } } } if (doTiming) { timeSamples->push_back(TimingInfo(OpDecryptMatrixPacked, TOC_US(t))); } return DecryptResult( (**numerator)((*numerator)->GetRows() - 1, (*numerator)->GetCols() - 1) ->GetLength()); } /** * read instream for a sequence of serialized ciphertext; deserialize it, * decrypt it, and write it to outstream * @param privateKey - reference to the decryption key * @param instream - input stream with sequence of serialized ciphertexts * @param outstream - output stream for plaintext * @return total bytes processed */ size_t DecryptStream(const LPPrivateKey<Element> privateKey, std::istream& instream, std::ostream& outstream) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * ReEncrypt - Proxy Re Encryption mechanism for PALISADE * @param evalKey - evaluation key from the PRE keygen method * @param ciphertext - vector of shared pointers to encrypted Ciphertext * @param publicKey the public key of the recipient of the re-encrypted * ciphertext. * @return vector of shared pointers to re-encrypted ciphertexts */ Ciphertext<Element> ReEncrypt( LPEvalKey<Element> evalKey, ConstCiphertext<Element> ciphertext, const LPPublicKey<Element> publicKey = nullptr) const { if (evalKey == nullptr || Mismatched(evalKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to ReEncrypt was not generated with " "this crypto context"); if (ciphertext == nullptr || Mismatched(ciphertext->GetCryptoContext())) PALISADE_THROW(config_error, "The ciphertext passed to ReEncrypt was not generated " "with this crypto context"); TimeVar t; if (doTiming) TIC(t); Ciphertext<Element> newCiphertext = GetEncryptionAlgorithm()->ReEncrypt(evalKey, ciphertext, publicKey); if (doTiming) { timeSamples->push_back(TimingInfo(OpReEncrypt, TOC_US(t))); } return newCiphertext; } /** * read instream for a serialized ciphertext. deserialize, re-encrypt, * serialize, and write to outstream * @param evalKey - reference to the re-encryption key * @param instream - input stream with sequence of serialized ciphertext * @param outstream - output stream with sequence of serialized re-encrypted * ciphertext */ void ReEncryptStream(const LPEvalKey<Element> evalKey, std::istream& instream, std::ostream& outstream, const LPPublicKey<Element> publicKey = nullptr) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * EvalAdd - PALISADE EvalAdd method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 + ct2 */ Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAdd(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAdd, TOC_US(t))); } return rv; } /** * EvalAdd - PALISADE EvalAddMutable method for a pair of ciphertexts. * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ct1 * @param ct2 * @return new ciphertext for ct1 + ct2 */ Ciphertext<Element> EvalAddMutable(Ciphertext<Element>& ct1, Ciphertext<Element>& ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAddMutable(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAdd, TOC_US(t))); } return rv; } /** * EvalAddMatrix - PALISADE EvalAdd method for a pair of matrices of * ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalAddMatrix( const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0, 0), (*ct2)(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<RationalCiphertext<Element>> rv = *ct1 + *ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddMatrix, TOC_US(t))); } return std::make_shared<Matrix<RationalCiphertext<Element>>>(rv); } /** * EvalAddMatrix - PALISADE EvalAdd method for a pair of matrices of * ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ Matrix<Ciphertext<Element>> EvalAddMatrix( const Matrix<Ciphertext<Element>>& ct1, const Matrix<Ciphertext<Element>>& ct2) const { TypeCheck(ct1(0, 0), ct2(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<Ciphertext<Element>> rv = ct1 + ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddMatrix, TOC_US(t))); } // Matrix<Ciphertext<Element>> a(rv); return rv; } /** * EvalSub - PALISADE EvalSub method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 - ct2 */ Ciphertext<Element> EvalSub(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSub(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSub, TOC_US(t))); } return rv; } /** * EvalSub - PALISADE EvalSubMutable method for a pair of ciphertexts * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ct1 * @param ct2 * @return new ciphertext for ct1 - ct2 */ Ciphertext<Element> EvalSubMutable(Ciphertext<Element>& ct1, Ciphertext<Element>& ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSubMutable(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSub, TOC_US(t))); } return rv; } /** * EvalSubMatrix - PALISADE EvalSub method for a pair of matrices of * ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalSubMatrix( const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0, 0), (*ct2)(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<RationalCiphertext<Element>> rv = *ct1 - *ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubMatrix, TOC_US(t))); } return std::make_shared<Matrix<RationalCiphertext<Element>>>(rv); } /** * EvalSubMatrix - PALISADE EvalSub method for a pair of matrices of * ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ Matrix<Ciphertext<Element>> EvalSubMatrix( const Matrix<Ciphertext<Element>>& ct1, const Matrix<Ciphertext<Element>>& ct2) const { TypeCheck(ct1(0, 0), ct2(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<Ciphertext<Element>> rv = ct1 - ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubMatrix, TOC_US(t))); } return rv; } /** * EvalAdd - PALISADE EvalAdd method for a ciphertext and plaintext * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext + plaintext */ Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext, ConstPlaintext plaintext) const { TypeCheck(ciphertext, plaintext); TimeVar t; if (doTiming) TIC(t); plaintext->SetFormat(EVALUATION); auto rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, plaintext); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddPlain, TOC_US(t))); } return rv; } /** * EvalAdd - PALISADE EvalAddMutable method for a ciphertext and plaintext * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext + plaintext */ Ciphertext<Element> EvalAddMutable(Ciphertext<Element>& ciphertext, Plaintext plaintext) const { TypeCheck((ConstCiphertext<Element>)ciphertext, (ConstPlaintext)plaintext); TimeVar t; if (doTiming) TIC(t); plaintext->SetFormat(EVALUATION); auto rv = GetEncryptionAlgorithm()->EvalAddMutable(ciphertext, plaintext); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddPlain, TOC_US(t))); } return rv; } /** * EvalAdd - PALISADE EvalAdd method for a ciphertext and constant * @param ciphertext * @param constant * @return new ciphertext for ciphertext + constant */ Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext, double constant) const { TimeVar t; Ciphertext<Element> rv; if (constant >= 0) { if (doTiming) TIC(t); rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddConst, TOC_US(t))); } } else { TimeVar t; if (doTiming) TIC(t); rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, -constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddConst, TOC_US(t))); } } return rv; } /** * EvalLinearWSum - PALISADE EvalLinearWSum method to compute a linear * weighted sum * * @param ciphertexts a list of ciphertexts * @param constants a list of weights * @return new ciphertext containing the weighted sum */ Ciphertext<Element> EvalLinearWSum(vector<Ciphertext<Element>> ciphertexts, vector<double> constants) const { TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalLinearWSum(ciphertexts, constants); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalLinearWSum, TOC_US(t))); } return rv; } /** * EvalLinearWSum - method to compute a linear weighted sum. * This is a mutable version, meaning the level/depth of input * ciphertexts may change in the process. * * @param ciphertexts a list of ciphertexts * @param constants a list of weights * @return new ciphertext containing the weighted sum */ Ciphertext<Element> EvalLinearWSumMutable( vector<Ciphertext<Element>> ciphertexts, vector<double> constants) const { TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalLinearWSumMutable(ciphertexts, constants); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalLinearWSum, TOC_US(t))); } return rv; } inline Ciphertext<Element> EvalLinearWSum( vector<double> constants, vector<Ciphertext<Element>> ciphertexts) const { return EvalLinearWSum(ciphertexts, constants); } inline Ciphertext<Element> EvalLinearWSumMutable( vector<double> constants, vector<Ciphertext<Element>> ciphertexts) const { return EvalLinearWSumMutable(ciphertexts, constants); } inline Ciphertext<Element> EvalAdd( ConstPlaintext plaintext, ConstCiphertext<Element> ciphertext) const { return EvalAdd(ciphertext, plaintext); } inline Ciphertext<Element> EvalAddMutable( Plaintext plaintext, Ciphertext<Element>& ciphertext) const { return EvalAddMutable(ciphertext, plaintext); } inline Ciphertext<Element> EvalAdd( double constant, ConstCiphertext<Element> ciphertext) const { return EvalAdd(ciphertext, constant); } /** * EvalSubPlain - PALISADE EvalSub method for a ciphertext and plaintext * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext - plaintext */ Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext, ConstPlaintext plaintext) const { TypeCheck(ciphertext, plaintext); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, plaintext); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubPlain, TOC_US(t))); } return rv; } /** * EvalSubPlain - PALISADE EvalSubMutable method for a ciphertext and * plaintext This is a mutable version - input ciphertexts may get * automatically rescaled, or level-reduced. * * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext - plaintext */ Ciphertext<Element> EvalSubMutable(Ciphertext<Element>& ciphertext, Plaintext plaintext) const { TypeCheck((ConstCiphertext<Element>)ciphertext, (ConstPlaintext)plaintext); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSubMutable(ciphertext, plaintext); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubPlain, TOC_US(t))); } return rv; } /** * EvalSub - PALISADE EvalSub method for a ciphertext and constant * @param ciphertext * @param constant * @return new ciphertext for ciphertext - constant */ Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext, double constant) const { TimeVar t; Ciphertext<Element> rv; if (constant >= 0) { if (doTiming) TIC(t); rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubConst, TOC_US(t))); } } else { if (doTiming) TIC(t); rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, -constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubConst, TOC_US(t))); } } return rv; } inline Ciphertext<Element> EvalSub( ConstPlaintext plaintext, ConstCiphertext<Element> ciphertext) const { return EvalAdd(EvalNegate(ciphertext), plaintext); } inline Ciphertext<Element> EvalSubMutable( Plaintext plaintext, Ciphertext<Element>& ciphertext) const { Ciphertext<Element> negated = EvalNegate(ciphertext); Ciphertext<Element> result = EvalAddMutable(negated, plaintext); ciphertext = EvalNegate(negated); return result; } inline Ciphertext<Element> EvalSub( double constant, ConstCiphertext<Element> ciphertext) const { return EvalAdd(EvalNegate(ciphertext), constant); } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts - with key * switching * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ Ciphertext<Element> EvalMult(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); auto ek = GetEvalMultKeyVector(ct1->GetKeyTag()); if (!ek.size()) { PALISADE_THROW(type_error, "Evaluation key has not been generated for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2, ek[0]); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts - with key * switching This is a mutable version - input ciphertexts may get * automatically rescaled, or level-reduced. * * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ Ciphertext<Element> EvalMultMutable(Ciphertext<Element>& ct1, Ciphertext<Element>& ct2) const { TypeCheck(ct1, ct2); auto ek = GetEvalMultKeyVector(ct1->GetKeyTag()); if (!ek.size()) { PALISADE_THROW(type_error, "Evaluation key has not been generated for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ct1, ct2, ek[0]); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts - no key * switching (relinearization) * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ Ciphertext<Element> EvalMultNoRelin(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMultMany - PALISADE function for evaluating multiplication on * ciphertext followed by relinearization operation (at the end). It computes * the multiplication in a binary tree manner. Also, it reduces the number of * elements in the ciphertext to two after each multiplication. * Currently it assumes that the consecutive two input arguments have * total depth smaller than the supported depth. Otherwise, it throws an * error. * * @param cipherTextList is the ciphertext list. * * @return new ciphertext. */ Ciphertext<Element> EvalMultMany( const vector<Ciphertext<Element>>& ct) const { // input parameter check if (!ct.size()) PALISADE_THROW(type_error, "Empty input ciphertext vector"); const auto ek = GetEvalMultKeyVector(ct[0]->GetKeyTag()); if (ek.size() < (ct[0]->GetElements().size() - 2)) { PALISADE_THROW(type_error, "Insufficient value was used for maxDepth to generate " "keys for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMany(ct, ek); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMultMany, TOC_US(t))); } return rv; } /** * EvalAddMany - Evaluate addition on a vector of ciphertexts. * It computes the addition in a binary tree manner. * * @param ctList is the list of ciphertexts. * * @return new ciphertext. */ Ciphertext<Element> EvalAddMany( const vector<Ciphertext<Element>>& ctList) const { // input parameter check if (!ctList.size()) PALISADE_THROW(type_error, "Empty input ciphertext vector"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAddMany(ctList); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddMany, TOC_US(t))); } return rv; } /** * EvalAddManyInPlace - Evaluate addition on a vector of ciphertexts. * Addition is computed in a binary tree manner. Difference with EvalAddMany * is that EvalAddManyInPlace uses the input ciphertext vector to store * intermediate results, to avoid the overhead of using extra tepmorary * space. * * @param ctList is the list of ciphertexts. * * @return new ciphertext. */ Ciphertext<Element> EvalAddManyInPlace( vector<Ciphertext<Element>>& ctList) const { // input parameter check if (!ctList.size()) PALISADE_THROW(type_error, "Empty input ciphertext vector"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAddManyInPlace(ctList); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddManyInPlace, TOC_US(t))); } return rv; } /** * Function for evaluating multiplication on ciphertext followed by * relinearization operation. Currently it assumes that the input arguments * have total depth smaller than the supported depth. Otherwise, it throws an * error. * * @param ct1 first input ciphertext. * @param ct2 second input ciphertext. * * @return new ciphertext */ Ciphertext<Element> EvalMultAndRelinearize( ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { // input parameter check if (!ct1 || !ct2) PALISADE_THROW(type_error, "Input ciphertext is nullptr"); const auto ek = GetEvalMultKeyVector(ct1->GetKeyTag()); if (ek.size() < (ct1->GetElements().size() + ct2->GetElements().size() - 3)) { PALISADE_THROW(type_error, "Insufficient value was used for maxDepth to generate " "keys for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultAndRelinearize(ct1, ct2, ek); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * Function for relinearization of a ciphertext. * * @param ct input ciphertext. * * @return relinearized ciphertext */ Ciphertext<Element> Relinearize(ConstCiphertext<Element> ct) const { // input parameter check if (!ct) PALISADE_THROW(type_error, "Input ciphertext is nullptr"); const auto ek = GetEvalMultKeyVector(ct->GetKeyTag()); if (ek.size() < (ct->GetElements().size() - 2)) { PALISADE_THROW(type_error, "Insufficient value was used for maxDepth to generate " "keys for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->Relinearize(ct, ek); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalRelin, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalMult method for plaintext * ciphertext * @param pt2 * @param ct1 * @return new ciphertext for ct1 * pt2 */ inline Ciphertext<Element> EvalMult(ConstPlaintext pt2, ConstCiphertext<Element> ct1) const { return EvalMult(ct1, pt2); } /** * EvalMult - PALISADE EvalMultMutable method for plaintext * ciphertext * @param pt2 * @param ct1 * @return new ciphertext for ct1 * pt2 */ inline Ciphertext<Element> EvalMultMutable(Plaintext pt2, Ciphertext<Element>& ct1) const { return EvalMultMutable(ct1, pt2); } /** * EvalMult - PALISADE EvalMult method for constant * ciphertext * @param constant * @param ct1 * @return new ciphertext for ct1 * constant */ inline Ciphertext<Element> EvalMult(double constant, ConstCiphertext<Element> ct1) const { return EvalMult(ct1, constant); } inline Ciphertext<Element> EvalMultMutable(double constant, Ciphertext<Element>& ct1) const { return EvalMultMutable(ct1, constant); } /** * EvalShiftRight - works only for Fractional Encoding * @param pt2 * @param ct1 * @return new ciphertext for ct1 * pt2 */ Ciphertext<Element> EvalRightShift(ConstCiphertext<Element> ct1, size_t divisor) const { if (ct1 && ct1->GetEncodingType() != Fractional) { std::stringstream ss; ss << "A " << Fractional << " encoded ciphertext is required for the EvalRightShift operation"; PALISADE_THROW(type_error, ss.str()); } Plaintext plaintextShift = MakeFractionalPlaintext(0, divisor); TypeCheck(ct1, plaintextShift); double start = 0; if (doTiming) start = currentDateTime(); auto rv = EvalMult(ct1, plaintextShift); if (doTiming) { timeSamples->push_back( TimingInfo(OpEvalRightShift, currentDateTime() - start)); } return rv; } /** * EvalMult - PALISADE EvalMult method for plaintext * ciphertext * @param ct1 * @param pt2 * @return new ciphertext for ct1 * pt2 */ Ciphertext<Element> EvalMult(ConstCiphertext<Element> ct1, ConstPlaintext pt2) const { TypeCheck(ct1, pt2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, pt2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalMultMutable method for plaintext * ciphertext * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ct1 * @param pt2 * @return new ciphertext for ct1 * pt2 */ Ciphertext<Element> EvalMultMutable(Ciphertext<Element>& ct1, Plaintext pt2) const { TypeCheck(ct1, pt2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ct1, pt2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalSub method for a ciphertext and constant * @param ciphertext * @param constant * @return new ciphertext for ciphertext - constant */ Ciphertext<Element> EvalMult(ConstCiphertext<Element> ciphertext, double constant) const { // input parameter check if (!ciphertext) { PALISADE_THROW(type_error, "Input ciphertext is nullptr"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ciphertext, constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMultConst, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalSub method for a ciphertext and constant * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ciphertext * @param constant * @return new ciphertext for ciphertext - constant */ Ciphertext<Element> EvalMultMutable(Ciphertext<Element>& ciphertext, double constant) const { // input parameter check if (!ciphertext) { PALISADE_THROW(type_error, "Input ciphertext is nullptr"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ciphertext, constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMultConst, TOC_US(t))); } return rv; } /** * EvalMultMatrix - PALISADE EvalMult method for two matrices of ciphertext * @param ct1 * @param ct2 * @return new matrix for ct1 * ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalMultMatrix( const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0, 0), (*ct2)(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<RationalCiphertext<Element>> rv = *ct1 * *ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMultMatrix, TOC_US(t))); } return std::make_shared<Matrix<RationalCiphertext<Element>>>(rv); } /** * EvalSub - PALISADE Negate method for a ciphertext * @param ct * @return new ciphertext -ct */ Ciphertext<Element> EvalNegate(ConstCiphertext<Element> ct) const { if (ct == nullptr || Mismatched(ct->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to EvalNegate was not generated with " "this crypto context"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalNegate(ct); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalNeg, TOC_US(t))); } return rv; } /** * EvalSub - PALISADE Negate method for a ciphertext * @param ct * @return new ciphertext -ct */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalNegateMatrix( const shared_ptr<Matrix<RationalCiphertext<Element>>> ct) const { if (ct == nullptr || Mismatched((*ct)(0, 0).GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to EvalNegateMatrix was not generated " "with this crypto context"); TimeVar t; if (doTiming) TIC(t); auto m = std::make_shared<Matrix<RationalCiphertext<Element>>>( ct->GetAllocator(), ct->GetRows(), ct->GetCols()); for (size_t r = 0; r < m->GetRows(); r++) for (size_t c = 0; c < m->GetCols(); c++) (*m)(r, c) = -((*ct)(r, c)); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalNegMatrix, TOC_US(t))); } return m; } /** * Generate automophism keys for a given private key * * @param publicKey original public key. * @param origPrivateKey original private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys; index 0 of the vector corresponds to * plaintext index 2, index 1 to plaintex index 3, etc. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen( const LPPublicKey<Element> publicKey, const LPPrivateKey<Element> origPrivateKey, const std::vector<usint>& indexList) const { if (publicKey == nullptr || origPrivateKey == nullptr) PALISADE_THROW(type_error, "Null Keys"); if (!indexList.size()) PALISADE_THROW(config_error, "Input index vector is empty"); if (publicKey->GetCryptoContext().get() != this) PALISADE_THROW(type_error, "Key was not created in this CryptoContextImpl"); if (publicKey->GetCryptoContext() != origPrivateKey->GetCryptoContext()) PALISADE_THROW(type_error, "Keys were not created in the same CryptoContextImpl"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen( publicKey, origPrivateKey, indexList); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAutomorphismKeyGen, TOC_US(t))); } return rv; } /** * Function for evaluating automorphism of ciphertext at index i * * @param ciphertext the input ciphertext. * @param i automorphism index * @param &evalKeys - reference to the vector of evaluation keys generated by * EvalAutomorphismKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalAutomorphism( ConstCiphertext<Element> ciphertext, usint i, const std::map<usint, LPEvalKey<Element>>& evalKeys, CALLER_INFO_ARGS_HDR) const { if (nullptr == ciphertext) { std::string errorMsg(std::string("Input ciphertext is nullptr") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (evalKeys.empty()) { std::string errorMsg(std::string("Empty input key map") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } auto tk = evalKeys.begin()->second; if (nullptr == tk) { std::string errorMsg(std::string("Invalid evalKey") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (ciphertext->GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (ciphertext->GetCryptoContext() != tk->GetCryptoContext()) { std::string errorMsg( std::string("Items were not created in the same CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (ciphertext->GetKeyTag() != tk->GetKeyTag()) { std::string errorMsg( std::string("Items were not encrypted with same keys") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAutomorphism(ciphertext, i, evalKeys); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAutomorphismI, TOC_US(t))); } return rv; } /** * Generate automophism keys for a given private key; Uses the private key for * encryption * * @param privateKey private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys */ shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen( const LPPrivateKey<Element> privateKey, const std::vector<usint>& indexList) const { if (privateKey == nullptr) PALISADE_THROW(type_error, "Null input"); if (!indexList.size()) PALISADE_THROW(config_error, "Input index vector is empty"); if (privateKey->GetCryptoContext().get() != this) PALISADE_THROW(type_error, "Key was not created in this CryptoContextImpl"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen(privateKey, indexList); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAutomorphismK, TOC_US(t))); } return rv; } /** * EvalSumKeyGen Generates the key map to be used by evalsum * * @param privateKey private key. * @param publicKey public key (used in NTRU schemes). */ void EvalSumKeyGen(const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey = nullptr); shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumRowsKeyGen( const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey = nullptr, usint rowSize = 0); shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumColsKeyGen( const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey = nullptr); /** * GetEvalSumKey returns the map * * @return the EvalSum key map */ static const std::map<usint, LPEvalKey<Element>>& GetEvalSumKeyMap( const string& id); static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>& GetAllEvalSumKeys(); /** * Function for evaluating a sum of all components * * @param ciphertext the input ciphertext. * @param batchSize size of the batch * @return resulting ciphertext */ Ciphertext<Element> EvalSum(ConstCiphertext<Element> ciphertext, usint batchSize) const; Ciphertext<Element> EvalSumRows( ConstCiphertext<Element> ciphertext, usint rowSize, const std::map<usint, LPEvalKey<Element>>& evalKeys) const; Ciphertext<Element> EvalSumCols( ConstCiphertext<Element> ciphertext, usint rowSize, const std::map<usint, LPEvalKey<Element>>& evalKeys) const; /** * EvalSumKeyGen Generates the key map to be used by evalsum * * @param privateKey private key. * @param indexList list of indices. * @param publicKey public key (used in NTRU schemes). */ void EvalAtIndexKeyGen(const LPPrivateKey<Element> privateKey, const std::vector<int32_t>& indexList, const LPPublicKey<Element> publicKey = nullptr); /** * EvalFastRotationPrecompute implements the precomputation step of * hoisted automorphisms. * * Please refer to Section 5 of Halevi and Shoup, "Faster Homomorphic * linear transformations in HELib." for more details, link: * https://eprint.iacr.org/2018/244. * * Generally, automorphisms are performed with three steps: (1) the * automorphism is applied on the ciphertext, (2) the automorphed values are * decomposed into digits, and (3) key switching is applied to make it * possible to further compute on the ciphertext. * * Hoisted automorphisms is a technique that performs the digit decomposition * for the original ciphertext first, and then performs the automorphism and * the key switching on the decomposed digits. The benefit of this is that the * digit decomposition is independent of the automorphism rotation index, so * it can be reused for multiple different indices. This can greatly improve * performance when we have to compute many automorphisms on the same * ciphertext. This routinely happens when we do permutations (EvalPermute). * * EvalFastRotationPrecompute implements the digit decomposition step of * hoisted automorphisms. * * @param ct the input ciphertext on which to do the precomputation (digit * decomposition) */ shared_ptr<vector<Element>> EvalFastRotationPrecompute( ConstCiphertext<Element> ct) const { TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalFastRotationPrecompute(ct); if (doTiming) { timeSamples->push_back(TimingInfo(OpFastRotPrecomp, TOC_US(t))); } return rv; } /** * EvalFastRotation implements the automorphism and key switching step of * hoisted automorphisms. * * Please refer to Section 5 of Halevi and Shoup, "Faster Homomorphic * linear transformations in HELib." for more details, link: * https://eprint.iacr.org/2018/244. * * Generally, automorphisms are performed with three steps: (1) the * automorphism is applied on the ciphertext, (2) the automorphed values are * decomposed into digits, and (3) key switching is applied to make it * possible to further compute on the ciphertext. * * Hoisted automorphisms is a technique that performs the digit decomposition * for the original ciphertext first, and then performs the automorphism and * the key switching on the decomposed digits. The benefit of this is that the * digit decomposition is independent of the automorphism rotation index, so * it can be reused for multiple different indices. This can greatly improve * performance when we have to compute many automorphisms on the same * ciphertext. This routinely happens when we do permutations (EvalPermute). * * EvalFastRotation implements the automorphism and key swithcing step of * hoisted automorphisms. * * This method assumes that all required rotation keys exist. This may not be * true if we are using baby-step/giant-step key switching. Please refer to * Section 5.1 of the above reference and EvalPermuteBGStepHoisted to see how * to deal with this issue. * * @param ct the input ciphertext to perform the automorphism on * @param index the index of the rotation. Positive indices correspond to left * rotations and negative indices correspond to right rotations. * @param m is the cyclotomic order * @param digits the digit decomposition created by EvalFastRotationPrecompute * at the precomputation step. */ Ciphertext<Element> EvalFastRotation( ConstCiphertext<Element> ct, const usint index, const usint m, const shared_ptr<vector<Element>> digits) const { TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalFastRotation(ct, index, m, digits); if (doTiming) { timeSamples->push_back(TimingInfo(OpFastRot, TOC_US(t))); } return rv; } /** * Merges multiple ciphertexts with encrypted results in slot 0 into a single * ciphertext The slot assignment is done based on the order of ciphertexts in * the vector * * @param ciphertextVector vector of ciphertexts to be merged. * @param &evalKeys - reference to the map of evaluation keys generated by * EvalAutomorphismKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalMerge( const vector<Ciphertext<Element>>& ciphertextVector) const; /** * GetEvalAutomorphismKey returns the map * * @return the EvalAutomorphism key map */ static const std::map<usint, LPEvalKey<Element>>& GetEvalAutomorphismKeyMap( const string& id); static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>& GetAllEvalAutomorphismKeys(); /** * Moves i-th slot to slot 0 * * @param ciphertext. * @param i the index. * @return resulting ciphertext */ Ciphertext<Element> EvalAtIndex(ConstCiphertext<Element> ciphertext, int32_t index) const; /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector. * @param ciphertext2 second vector. * @param batchSize size of the batch to be summed up * @return resulting ciphertext */ Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2, usint batchSize) const; /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector - ciphertext. * @param plaintext second vector - plaintext. * @param batchSize size of the batch to be summed up * @return resulting ciphertext */ Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1, ConstPlaintext plaintext, usint batchSize) const; /** * EvalCrossCorrelation - Computes the sliding sum of inner products (known as * as cross-correlation, sliding inner product, or sliding dot product in * image processing * @param x - first vector of row vectors * @param y - second vector of row vectors * @param batchSize - batch size for packed encoding * @param indexStart - starting index in the vectors of row vectors * @param length - length of the slice in the vectors of row vectors; default * is 0 meaning to use the full length of the vector * @return sum(x_i*y_i), i.e., a sum of inner products */ Ciphertext<Element> EvalCrossCorrelation( const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize, usint indexStart = 0, usint length = 0) const; /** * Method for polynomial evaluation for polynomials represented as power * series. * * @param &cipherText input ciphertext * @param &coefficients is the vector of coefficients in the polynomial; the * size of the vector is the degree of the polynomial + 1 * @return the result of polynomial evaluation. */ virtual Ciphertext<Element> EvalPoly( ConstCiphertext<Element> ciphertext, const std::vector<double>& coefficients) const { if (ciphertext == nullptr || this->Mismatched(ciphertext->GetCryptoContext())) throw std::logic_error( "Information passed to EvalPoly was not generated with this crypto " "context"); TimeVar t; if (this->doTiming) TIC(t); auto rv = std::static_pointer_cast<LPPublicKeyEncryptionScheme<Element>>( this->GetEncryptionAlgorithm()) ->EvalPoly(ciphertext, coefficients); if (this->doTiming) { this->timeSamples->push_back(TimingInfo(OpEvalPoly, TOC_US(t))); } return rv; } /** * EvalLinRegressBatched- Computes the parameter vector for linear regression * using the least squares method Supported only in batched mode; currently * works only for two regressors * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares * method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegressBatched( const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize) const; /** * EvalLinRegression - Computes the parameter vector for linear regression * using the least squares method * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares * method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegression( const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y) const { TypeCheck((*x)(0, 0), (*y)(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalLinRegression(x, y); if (doTiming) { timeSamples->push_back(TimingInfo(OpLinRegression, TOC_US(t))); } return rv; } /** * KeySwitch - PALISADE KeySwitch method * @param keySwitchHint - reference to KeySwitchHint * @param ciphertext - vector of ciphertext * @return new CiphertextImpl after applying key switch */ Ciphertext<Element> KeySwitch(const LPEvalKey<Element> keySwitchHint, ConstCiphertext<Element> ciphertext) const { if (keySwitchHint == nullptr || Mismatched(keySwitchHint->GetCryptoContext())) PALISADE_THROW( config_error, "Key passed to KeySwitch was not generated with this crypto context"); if (ciphertext == nullptr || Mismatched(ciphertext->GetCryptoContext())) PALISADE_THROW(config_error, "Ciphertext passed to KeySwitch was not generated with " "this crypto context"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->KeySwitch(keySwitchHint, ciphertext); if (doTiming) { timeSamples->push_back(TimingInfo(OpKeySwitch, TOC_US(t))); } return rv; } /** * Rescale - An alias for PALISADE ModReduce method. * This is because ModReduce is called Rescale in CKKS. * * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ Ciphertext<Element> Rescale(ConstCiphertext<Element> ciphertext) const { if (ciphertext == nullptr || Mismatched(ciphertext->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to Rescale was not generated with " "this crypto context"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->ModReduce(ciphertext); if (doTiming) { timeSamples->push_back(TimingInfo(OpModReduce, TOC_US(t))); } return rv; } /** * ModReduce - PALISADE ModReduce method used only for BGVrns * @param ciphertext - vector of ciphertext * @param numTowersToDrop - number of towers to drop * @return vector of mod reduced ciphertext */ Ciphertext<Element> ModReduce(ConstCiphertext<Element> ciphertext) const { if (ciphertext == nullptr || Mismatched(ciphertext->GetCryptoContext())) PALISADE_THROW( not_available_error, "Information passed to ModReduce was not generated with this crypto " "context"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->ModReduce(ciphertext); if (doTiming) { timeSamples->push_back(TimingInfo(OpModReduce, TOC_US(t))); } return rv; } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ RationalCiphertext<Element> ModReduceRational( RationalCiphertext<Element> ciphertext) const { TimeVar t; if (doTiming) TIC(t); Ciphertext<Element> n = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetNumerator()); Ciphertext<Element> d = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetDenominator()); if (doTiming) { timeSamples->push_back(TimingInfo(OpModReduce, TOC_US(t))); } return RationalCiphertext<Element>(n, d); } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ shared_ptr<Matrix<RationalCiphertext<Element>>> ModReduceMatrix( shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext) const { // needs context check TimeVar t; if (doTiming) TIC(t); auto m = std::make_shared<Matrix<RationalCiphertext<Element>>>( ciphertext->GetAllocator(), ciphertext->GetRows(), ciphertext->GetCols()); for (size_t r = 0; r < m->GetRows(); r++) for (size_t c = 0; c < m->GetCols(); c++) (*m)(r, c) = ModReduceRational((*ciphertext)(r, c)); if (doTiming) { timeSamples->push_back(TimingInfo(OpModReduceMatrix, TOC_US(t))); } return m; } /** * LevelReduce - PALISADE LevelReduce method * @param cipherText1 * @param linearKeySwitchHint * @return vector of level reduced ciphertext */ Ciphertext<Element> LevelReduce( ConstCiphertext<Element> cipherText1, const LPEvalKeyNTRU<Element> linearKeySwitchHint, size_t levels = 1) const { const auto cryptoParams = std::dynamic_pointer_cast<LPCryptoParametersCKKS<DCRTPoly>>( cipherText1->GetCryptoParameters()); if (cipherText1 == nullptr || Mismatched(cipherText1->GetCryptoContext())) { PALISADE_THROW(config_error, "Information passed to LevelReduce was not generated with " "this crypto context"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->LevelReduce( cipherText1, linearKeySwitchHint, levels); if (doTiming) { timeSamples->push_back(TimingInfo(OpLevelReduce, TOC_US(t))); } return rv; } /** * ComposedEvalMult - PALISADE composed evalmult * @param ciphertext1 - vector for first cipher text * @param ciphertext2 - vector for second cipher text * @param quadKeySwitchHint - is the quadratic key switch hint from original * private key to the quadratic key return vector of resulting ciphertext */ Ciphertext<Element> ComposedEvalMult( ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2) const { if (ciphertext1 == nullptr || ciphertext2 == nullptr || ciphertext1->GetKeyTag() != ciphertext2->GetKeyTag() || Mismatched(ciphertext1->GetCryptoContext())) PALISADE_THROW(config_error, "Ciphertexts passed to ComposedEvalMult were not " "generated with this crypto context"); auto ek = GetEvalMultKeyVector(ciphertext1->GetKeyTag()); if (!ek.size()) { PALISADE_THROW(type_error, "Evaluation key has not been generated for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->ComposedEvalMult(ciphertext1, ciphertext2, ek[0]); if (doTiming) { timeSamples->push_back(TimingInfo(OpComposedEvalMult, TOC_US(t))); } return rv; } static LPPublicKey<Element> deserializePublicKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static LPPrivateKey<Element> deserializeSecretKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static LPEvalKey<Element> deserializeEvalKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static LPEvalKey<Element> deserializeEvalKeyInContext( const Serialized& serObj, CryptoContext<Element> cc) __attribute__(( deprecated("serialization changed, see wiki for details"))); template <class Archive> void save(Archive& ar, std::uint32_t const version) const { ar(cereal::make_nvp("cc", params)); ar(cereal::make_nvp("kt", scheme)); ar(cereal::make_nvp("si", m_schemeId)); } template <class Archive> void load(Archive& ar, std::uint32_t const version) { if (version > SerializedVersion()) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar(cereal::make_nvp("cc", params)); ar(cereal::make_nvp("kt", scheme)); ar(cereal::make_nvp("si", m_schemeId)); // NOTE: a pointer to this object will be wrapped in a shared_ptr, and is a // "CryptoContext". PALISADE relies on the notion that identical // CryptoContextImpls are not duplicated in memory Once we deserialize this // object, we must check to see if there is a matching object for this // object that's already existing in memory if it DOES exist, use it. If it // does NOT exist, add this to the cache of all contexts } virtual std::string SerializedObjectName() const { return "CryptoContext"; } static uint32_t SerializedVersion() { return 1; } }; /** * @brief CryptoObject * * A class to aid in referring to the crypto context that an object belongs to */ template <typename Element> class CryptoObject { protected: CryptoContext<Element> context; // crypto context this object belongs to // tag used to find the evaluation key needed // for SHE/FHE operations string keyTag; public: explicit CryptoObject(CryptoContext<Element> cc = nullptr, const string& tag = "") : context(cc), keyTag(tag) {} CryptoObject(const CryptoObject& rhs) { context = rhs.context; keyTag = rhs.keyTag; } CryptoObject(const CryptoObject&& rhs) { context = std::move(rhs.context); keyTag = std::move(rhs.keyTag); } virtual ~CryptoObject() {} const CryptoObject& operator=(const CryptoObject& rhs) { this->context = rhs.context; this->keyTag = rhs.keyTag; return *this; } const CryptoObject& operator=(const CryptoObject&& rhs) { this->context = std::move(rhs.context); this->keyTag = std::move(rhs.keyTag); return *this; } bool operator==(const CryptoObject& rhs) const { return context.get() == rhs.context.get() && keyTag == rhs.keyTag; } CryptoContext<Element> GetCryptoContext() const { return context; } const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return context->GetCryptoParameters(); } const EncodingParams GetEncodingParameters() const { return context->GetCryptoParameters()->GetEncodingParams(); } const string GetKeyTag() const { return keyTag; } void SetKeyTag(const string& tag) { keyTag = tag; } template <class Archive> void save(Archive& ar, std::uint32_t const version) const { ar(::cereal::make_nvp("cc", context)); ar(::cereal::make_nvp("kt", keyTag)); } template <class Archive> void load(Archive& ar, std::uint32_t const version) { if (version > SerializedVersion()) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar(::cereal::make_nvp("cc", context)); ar(::cereal::make_nvp("kt", keyTag)); context = CryptoContextFactory<Element>::GetContext( context->GetCryptoParameters(), context->GetEncryptionAlgorithm()); } std::string SerializedObjectName() const { return "CryptoObject"; } static uint32_t SerializedVersion() { return 1; } }; /** * @brief CryptoContextFactory * * A class that contains static methods to generate new crypto contexts from * user parameters * */ template <typename Element> class CryptoContextFactory { using ParmType = typename Element::Params; using IntType = typename Element::Integer; protected: static vector<CryptoContext<Element>> AllContexts; public: static void ReleaseAllContexts(); static int GetContextCount(); static CryptoContext<Element> GetSingleContext(); static CryptoContext<Element> GetContext( shared_ptr<LPCryptoParameters<Element>> params, shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme, const string& schemeId = "Not"); static CryptoContext<Element> GetContextForPointer( CryptoContextImpl<Element>* cc); static const vector<CryptoContext<Element>>& GetAllContexts(); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme * @param params ring parameters * @param plaintextModulus plaintext modulus * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param delta - the plaintext scaling parameter floor(q/t) in BFV * @param mode - mode for generating secret keys (RLWE vs OPTIMIZED) * @param bigmodulus - large modulus used in tensoring of homomorphic * multiplication * @param bigrootofunity - root of unity for bigmodulus * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - * sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @param bigmodulusarb - additional large modulus for bigmoduls for the case * of general (non-power-of-two) cyclotomics * @param bigrootofunityarb - root of unity for bigmodulusarb * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @return new context */ static CryptoContext<Element> genCryptoContextBFV( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, const std::string& delta, MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0", int depth = 0, int assuranceMeasure = 0, float securityLevel = 0, const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0", int maxDepth = 2); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme * @param params ring parameters * @param encodingParams plaintext encoding parameters * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param delta - the plaintext scaling parameter floor(q/t) in BFV * @param mode - mode for generating secret keys (RLWE vs OPTIMIZED) * @param bigmodulus - large modulus used in tensoring of homomorphic * multiplication * @param bigrootofunity - root of unity for bigmodulus * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - * sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @param bigmodulusarb - additional large modulus for bigmoduls for the case * of general (non-power-of-two) cyclotomics * @param bigrootofunityarb - root of unity for bigmodulusarb * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @return new context */ static CryptoContext<Element> genCryptoContextBFV( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, const std::string& delta, MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0", int depth = 0, int assuranceMeasure = 0, float securityLevel = 0, const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0", int maxDepth = 2); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel root Hermite factor (lattice security parameter) * @param relinWindow bits in the base of digits in key * switching/relinearization * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( const PlaintextModulus plaintextModulus, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel standard security level * @param relinWindow bits in the base of digits in key * switching/relinearization * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel root Hermite factor (lattice security parameter) * @param distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( EncodingParams encodingParams, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel standard security level * @param distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( EncodingParams encodingParams, SecurityLevel securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window (bits in the base for digits) * used for digit decomposition (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( const PlaintextModulus plaintextModulus, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel standard secuirity level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window (bits in the base for digits) * used for digit decomposition (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( EncodingParams encodingParams, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel standard security level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( EncodingParams encodingParams, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( const PlaintextModulus plaintextModulus, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel standard security level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( EncodingParams encodingParams, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel standard security level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( EncodingParams encodingParams, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BGV Scheme * @param params ring parameters * @param plaintextModulus plaintext modulus * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param depth of supported computation circuit (not used; for future use) * @return new context */ static CryptoContext<Element> genCryptoContextBGV( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContextImpl for the BGV Scheme * @param params ring parameters * @param encodingParams plaintext encoding parameters * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param depth of supported computation circuit (not used; for future use) * @return new context */ static CryptoContext<Element> genCryptoContextBGV( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContextImpl for the CKKS Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param depth * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or * EXACTRESCALE) * @return new context */ static CryptoContext<Element> genCryptoContextCKKS( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, KeySwitchTechnique ksTech = BV, RescalingTechnique rsTech = APPROXRESCALE); /** * construct a PALISADE CryptoContextImpl for the CKKS Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or * EXACTRESCALE) * @return new context */ static CryptoContext<Element> genCryptoContextCKKS( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, enum KeySwitchTechnique ksTech = BV, RescalingTechnique rsTech = APPROXRESCALE); /** * Automatically generate the moduli chain and construct a PALISADE * CryptoContextImpl for the CKKS Scheme with it. * * @param cyclOrder the cyclotomic order M * @param numPrimes the number of towers/primes to use when building the * moduli chain * @param scaleExp the plaintext scaling factor, which is equal to dcrtBits in * our implementation of CKKS * @param batchSize the batch size of the ciphertext * @param mode RLWE or OPTIMIZED * @param depth * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param firstModSize the bit-length of the first modulus * @param ksTech key switching technique to use (e.g., GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or * EXACTRESCALE) * @param numLargeDigits the number of big digits to use in HYBRID key * switching * @return new context */ static CryptoContext<Element> genCryptoContextCKKSWithParamsGen( usint cyclOrder, usint numPrimes, usint scaleExp, usint relinWindow, usint batchSize, MODE mode, int depth = 1, int maxDepth = 2, usint firstModSize = 60, enum KeySwitchTechnique ksTech = BV, enum RescalingTechnique rsTech = APPROXRESCALE, uint32_t numLargeDigits = 4); /** * Construct a PALISADE CryptoContextImpl for the CKKS Scheme. * * @param multiplicativeDepth the depth of multiplications supported by the * scheme (equal to number of towers - 1) * @param scalingFactorBits the size of the scaling factor in bits * @param batchSize the number of slots being used in the ciphertext * @param stdLevel the standard security level we want the scheme to satisfy * @param ringDim the ring dimension (if not specified selected automatically * based on stdLevel) * @param ksTech key switching technique to use (e.g., HYBRID, GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or * EXACTRESCALE) * @param numLargeDigits the number of big digits to use in HYBRID key * switching * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param firstModSize the bit-length of the first modulus * @param relinWindow the relinearization windows (used in BV key switching, * use 0 for RNS decomposition) * @param mode RLWE (gaussian distribution) or OPTIMIZED (ternary * distribution) * @return new context */ static CryptoContext<Element> genCryptoContextCKKS( usint multiplicativeDepth, usint scalingFactorBits, usint batchSize, SecurityLevel stdLevel = HEStd_128_classic, usint ringDim = 0, enum RescalingTechnique rsTech = EXACTRESCALE, enum KeySwitchTechnique ksTech = HYBRID, uint32_t numLargeDigits = 0, int maxDepth = 2, usint firstModSize = 60, usint relinWindow = 0, MODE mode = OPTIMIZED); /** * construct a PALISADE CryptoContextImpl for the BGVrns Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param depth * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param msMethod mod switch method * @return new context */ static CryptoContext<Element> genCryptoContextBGVrns( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, KeySwitchTechnique ksTech = BV, enum ModSwitchMethod msMethod = MANUAL); /** * construct a PALISADE CryptoContextImpl for the BGVrns Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param msMethod mod switch method * @return new context */ static CryptoContext<Element> genCryptoContextBGVrns( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, enum KeySwitchTechnique ksTech = BV, enum ModSwitchMethod msMethod = MANUAL); /** * Automatically generate the moduli chain and construct a PALISADE * CryptoContextImpl for the BGVrns Scheme with it. * * @param cyclOrder the cyclotomic order M * @param numPrimes the number of towers/primes to use when building the * moduli chain * @param ptm the plaintext modulus * @param mode RLWE or OPTIMIZED * @param depth * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param firstModSize the bit-length of the first modulus * @param dcrtrBits the size of the moduli in bits * @param ksTech key switching technique to use (e.g., GHS or BV) * @param numLargeDigits the number of big digits to use in HYBRID key * switching * @param batchSize the number of slots being used in the ciphertext * @param msMethod mod switch method * @return new context */ static CryptoContext<Element> genCryptoContextBGVrnsWithParamsGen( usint cyclOrder, usint numPrimes, usint ptm, usint relinWindow, MODE mode, int depth = 1, int maxDepth = 2, enum KeySwitchTechnique ksTech = BV, usint firstModSize = 0, usint dcrtBits = 0, uint32_t numLargeDigits = 4, usint batchSize = 0, enum ModSwitchMethod msMethod = MANUAL); /** * Construct a PALISADE CryptoContextImpl for the BGVrns Scheme. * * @param multiplicativeDepth the depth of multiplications supported by the * scheme (equal to number of towers - 1) * @param ptm the plaintext modulus * @param stdLevel the standard security level we want the scheme to satisfy * @param stdDev sigma - distribution parameter for error distribution * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param mode RLWE (gaussian distribution) or OPTIMIZED (ternary * distribution) * @param ksTech key switching technique to use (e.g., HYBRID, GHS or BV) * @param ringDim the ring dimension (if not specified selected automatically * based on stdLevel) * @param numLargeDigits the number of big digits to use in HYBRID key * switching * @param firstModSize the bit-length of the first modulus * @param dcrtrBits the size of the moduli in bits * @param relinWindow the relinearization windows (used in BV key switching, * use 0 for RNS decomposition) * @param batchSize the number of slots being used in the ciphertext * @param msMethod mod switch method * @return new context */ static CryptoContext<Element> genCryptoContextBGVrns( usint multiplicativeDepth, usint ptm, SecurityLevel stdLevel = HEStd_128_classic, float stdDev = 3.19, int maxDepth = 2, MODE mode = OPTIMIZED, enum KeySwitchTechnique ksTech = HYBRID, usint ringDim = 0, uint32_t numLargeDigits = 0, usint firstModSize = 0, usint dcrtBits = 0, usint relinWindow = 0, usint batchSize = 0, enum ModSwitchMethod msMethod = AUTO); /** * construct a PALISADE CryptoContextImpl for the StehleSteinfeld Scheme * @param params ring parameters * @param plaintextModulus plaintext modulus * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param stdDev distribution parameter for secret key distribution * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - * sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @return new context */ static CryptoContext<Element> genCryptoContextStehleSteinfeld( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContextImpl for the StehleSteinfeld Scheme * @param params ring parameters * @param encodingParams plaintext encoding parameters * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param stdDev distribution parameter for secret key distribution * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - * sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @return new context */ static CryptoContext<Element> genCryptoContextStehleSteinfeld( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContextImpl for the Null Scheme * @param m cyclotomic order (ring dimension n = m/2 for power-of-two * cyclotomics) * @param plaintextModulus plaintext modulus * @return */ static CryptoContext<Element> genCryptoContextNull( unsigned int m, const PlaintextModulus ptModulus); /** * construct a PALISADE CryptoContextImpl for the Null Scheme * @param m cyclotomic order (ring dimension n = m/2 for power-of-two * cyclotomics) * @param encodingParams plaintext encoding parameters * @return */ static CryptoContext<Element> genCryptoContextNull( unsigned int m, EncodingParams encodingParams); static CryptoContext<Element> DeserializeAndCreateContext( const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); }; } // namespace lbcrypto #endif /* SRC_PKE_CRYPTOCONTEXT_H_ */
ZQ_CNN_MTCNN.h
#ifndef _ZQ_CNN_MTCNN_H_ #define _ZQ_CNN_MTCNN_H_ #pragma once #include "ZQ_CNN_Net.h" #include "ZQ_CNN_BBoxUtils.h" #include <omp.h> namespace ZQ { class ZQ_CNN_MTCNN { public: using string = std::string; ZQ_CNN_MTCNN() { min_size = 60; thresh[0] = 0.6; thresh[1] = 0.7; thresh[2] = 0.7; nms_thresh[0] = 0.6; nms_thresh[1] = 0.7; nms_thresh[2] = 0.7; width = 0; height = 0; factor = 0.709; pnet_overlap_thresh_count = 4; pnet_size = 12; pnet_stride = 2; special_handle_very_big_face = false; force_run_pnet_multithread = false; show_debug_info = false; limit_r_num = 0; limit_o_num = 0; limit_l_num = 0; } ~ZQ_CNN_MTCNN() { } private: #if __ARM_NEON const int BATCH_SIZE = 16; #else const int BATCH_SIZE = 64; #endif std::vector<ZQ_CNN_Net> pnet, rnet, onet, lnet; bool has_lnet; int thread_num; float thresh[3], nms_thresh[3]; int min_size; int width, height; float factor; int pnet_overlap_thresh_count; int pnet_size; int pnet_stride; int rnet_size; int onet_size; int lnet_size; bool special_handle_very_big_face; bool do_landmark; float early_accept_thresh; float nms_thresh_per_scale; bool force_run_pnet_multithread; std::vector<float> scales; std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> pnet_images; ZQ_CNN_Tensor4D_NHW_C_Align128bit ori_input, rnet_image, onet_image; bool show_debug_info; int limit_r_num; int limit_o_num; int limit_l_num; public: void TurnOnShowDebugInfo() { show_debug_info = true; } void TurnOffShowDebugInfo() { show_debug_info = false; } void SetLimit(int limit_r = 0, int limit_o = 0, int limit_l = 0) { limit_r_num = limit_r; limit_o_num = limit_o; limit_l_num = limit_l; } bool Init(const string& pnet_param, const string& pnet_model, const string& rnet_param, const string& rnet_model, const string& onet_param, const string& onet_model, int thread_num = 1, bool has_lnet = false, const string& lnet_param = "", const std::string& lnet_model = "") { if (thread_num < 1) force_run_pnet_multithread = true; else force_run_pnet_multithread = false; thread_num = __max(1, thread_num); pnet.resize(thread_num); rnet.resize(thread_num); onet.resize(thread_num); this->has_lnet = has_lnet; if (has_lnet) { lnet.resize(thread_num); } bool ret = true; for (int i = 0; i < thread_num; i++) { ret = pnet[i].LoadFrom(pnet_param, pnet_model,true,1e-9, true) && rnet[i].LoadFrom(rnet_param, rnet_model, true, 1e-9, true) && onet[i].LoadFrom(onet_param, onet_model, true, 1e-9, true); if (has_lnet && ret) ret = lnet[i].LoadFrom(lnet_param, lnet_model, true, 1e-9, true); if (!ret) break; } if (!ret) { pnet.clear(); rnet.clear(); onet.clear(); if (has_lnet) lnet.clear(); this->thread_num = 0; } else this->thread_num = thread_num; if (show_debug_info) { printf("rnet = %.2f M, onet = %.2f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0), onet[0].GetNumOfMulAdd() / (1024.0*1024.0)); if (has_lnet) printf("lnet = %.2f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0)); } int C, H, W; rnet[0].GetInputDim(C, H, W); rnet_size = H; onet[0].GetInputDim(C, H, W); onet_size = H; if (has_lnet) { lnet[0].GetInputDim(C, H, W); lnet_size = H; } return ret; } bool InitFromBuffer( const char* pnet_param, __int64 pnet_param_len, const char* pnet_model, __int64 pnet_model_len, const char* rnet_param, __int64 rnet_param_len, const char* rnet_model, __int64 rnet_model_len, const char* onet_param, __int64 onet_param_len, const char* onet_model, __int64 onet_model_len, int thread_num = 1, bool has_lnet = false, const char* lnet_param = 0, __int64 lnet_param_len = 0, const char* lnet_model = 0, __int64 lnet_model_len = 0) { if (thread_num < 1) force_run_pnet_multithread = true; else force_run_pnet_multithread = false; thread_num = __max(1, thread_num); pnet.resize(thread_num); rnet.resize(thread_num); onet.resize(thread_num); this->has_lnet = has_lnet; if(has_lnet) lnet.resize(thread_num); bool ret = true; for (int i = 0; i < thread_num; i++) { ret = pnet[i].LoadFromBuffer(pnet_param, pnet_param_len,pnet_model,pnet_model_len, true, 1e-9, true) && rnet[i].LoadFromBuffer(rnet_param, rnet_param_len, rnet_model, rnet_model_len, true, 1e-9, true) && onet[i].LoadFromBuffer(onet_param, onet_param_len, onet_model, onet_model_len, true, 1e-9, true); if (has_lnet && ret) ret = lnet[i].LoadFromBuffer(lnet_param, lnet_param_len, lnet_model, lnet_model_len, true, 1e-9, true); if (!ret) break; } if (!ret) { pnet.clear(); rnet.clear(); onet.clear(); if (has_lnet) lnet.clear(); this->thread_num = 0; } else this->thread_num = thread_num; if (show_debug_info) { printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0), onet[0].GetNumOfMulAdd() / (1024.0*1024.0)); if (has_lnet) printf("lnet = %.1f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0)); } int C, H, W; rnet[0].GetInputDim(C, H, W); rnet_size = H; onet[0].GetInputDim(C, H, W); onet_size = H; if (has_lnet) { lnet [0].GetInputDim (C, H, W); lnet_size = H; } return ret; } void SetPara(int w, int h, int min_face_size = 60, float pthresh = 0.6, float rthresh = 0.7, float othresh = 0.7, float nms_pthresh = 0.6, float nms_rthresh = 0.7, float nms_othresh = 0.7, float scale_factor = 0.709, int pnet_overlap_thresh_count = 4, int pnet_size = 12, int pnet_stride = 2, bool special_handle_very_big_face = false, bool do_landmark = true, float early_accept_thresh = 1.00) { min_size = __max(pnet_size, min_face_size); thresh[0] = __max(0.1, pthresh); thresh[1] = __max(0.1, rthresh); thresh[2] = __max(0.1, othresh); nms_thresh[0] = __max(0.1, nms_pthresh); nms_thresh[1] = __max(0.1, nms_rthresh); nms_thresh[2] = __max(0.1, nms_othresh); scale_factor = __max(0.5, __min(0.97, scale_factor)); this->pnet_overlap_thresh_count = __max(0, pnet_overlap_thresh_count); this->pnet_size = pnet_size; this->pnet_stride = pnet_stride; this->special_handle_very_big_face = special_handle_very_big_face; this->do_landmark = do_landmark; this->early_accept_thresh = early_accept_thresh; if (pnet_size == 20 && pnet_stride == 4) nms_thresh_per_scale = 0.45; else nms_thresh_per_scale = 0.495; if (width != w || height != h || factor != scale_factor) { scales.clear(); pnet_images.clear(); width = w; height = h; float minside = __min(width, height); int MIN_DET_SIZE = pnet_size; float m = (float)MIN_DET_SIZE / min_size; minside *= m; while (minside > MIN_DET_SIZE) { scales.push_back(m); minside *= factor; m *= factor; } minside = __min(width, height); int count = scales.size(); for (int i = scales.size() - 1; i >= 0; i--) { if (ceil(scales[i] * minside) <= pnet_size) { count--; } } if (special_handle_very_big_face) { if (count > 2) count--; scales.resize(count); if (count > 0) { float last_size = ceil(scales[count - 1] * minside); for (int tmp_size = last_size - 1; tmp_size >= pnet_size + 1; tmp_size -= 2) { scales.push_back((float)tmp_size / minside); count++; } } scales.push_back((float)pnet_size / minside); count++; } else { scales.push_back((float)pnet_size / minside); count++; } pnet_images.resize(count); } } bool Find(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& results) { double t1 = omp_get_wtime(); if (width != _width || height != _height) return false; if (!ori_input.ConvertFromBGR(bgr_img, width, height, _widthStep)) return false; double t2 = omp_get_wtime(); if (show_debug_info) printf("convert cost: %.3f ms\n", 1000 * (t2 - t1)); return Find(ori_input, results); } bool Find106(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox106>& results) { double t1 = omp_get_wtime(); if (width != _width || height != _height) return false; if (!ori_input.ConvertFromBGR(bgr_img, width, height, _widthStep)) return false; double t2 = omp_get_wtime(); if (show_debug_info) printf("convert cost: %.3f ms\n", 1000 * (t2 - t1)); return Find106(ori_input, results); } bool Find(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(input, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, input.GetW(), input.GetH()); } double t2 = omp_get_wtime(); if (!_Rnet_stage(input, firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, input.GetW(), input.GetH()); } if (!has_lnet || !do_landmark) { double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, results)) return false; double t4 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms)\n", 1000 * (t4 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3)); } } else { double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, thirdBbox)) return false; if (limit_l_num > 0) { _select(thirdBbox, limit_l_num, input.GetW(), input.GetH()); } double t4 = omp_get_wtime(); if (!_Lnet_stage(input, thirdBbox, results)) return false; double t5 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n", 1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4)); } } return true; } bool Find106(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox106>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(input, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, input.GetW(), input.GetH()); } double t2 = omp_get_wtime(); if (!_Rnet_stage(input, firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, input.GetW(), input.GetH()); } if (!has_lnet || !do_landmark) { return false; } double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, thirdBbox)) return false; if (limit_l_num > 0) { _select(thirdBbox, limit_l_num, input.GetW(), input.GetH()); } double t4 = omp_get_wtime(); if (!_Lnet106_stage(input, thirdBbox, results)) return false; double t5 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n", 1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4)); } return true; } private: void _compute_Pnet_single_thread(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } for (int i = 0; i < scale_num; i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; double t10 = omp_get_wtime(); if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } double t11 = omp_get_wtime(); if (scales[i] != 1) pnet[0].Forward(pnet_images[i]); else pnet[0].Forward(input); double t12 = omp_get_wtime(); if (show_debug_info) printf("Pnet [%d]: resolution [%dx%d], resize:%.3f ms, cost:%.3f ms\n", i, changedW, changedH, 1000 * (t11 - t10), 1000 * (t12 - t11)); const ZQ_CNN_Tensor4D* score = pnet[0].GetBlobByName("prob1"); //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if(row < mapH[i] && col < mapW[i]) maps[i][row*mapW[i] + col] = *p; p += scorePixStep; } } } } void _compute_Pnet_multi_thread(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { if (thread_num <= 1) { for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic, 1) for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } } int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } std::vector<int> task_rect_off_x; std::vector<int> task_rect_off_y; std::vector<int> task_rect_width; std::vector<int> task_rect_height; std::vector<float> task_scale; std::vector<int> task_scale_id; int stride = pnet_stride; const int block_size = 64 * stride; int cellsize = pnet_size; int border_size = cellsize - stride; int overlap_border_size = cellsize / stride; int jump_size = block_size - border_size; for (int i = 0; i < scales.size(); i++) { int changeH = (int)ceil(height*scales[i]); int changeW = (int)ceil(width*scales[i]); if (changeH < pnet_size || changeW < pnet_size) continue; int block_H_num = 0; int block_W_num = 0; int start = 0; while (start < changeH) { block_H_num++; if (start + block_size >= changeH) break; start += jump_size; } start = 0; while (start < changeW) { block_W_num++; if (start + block_size >= changeW) break; start += jump_size; } for (int s = 0; s < block_H_num; s++) { for (int t = 0; t < block_W_num; t++) { int rect_off_x = t * jump_size; int rect_off_y = s * jump_size; int rect_width = __min(changeW, rect_off_x + block_size) - rect_off_x; int rect_height = __min(changeH, rect_off_y + block_size) - rect_off_y; if (rect_width >= cellsize && rect_height >= cellsize) { task_rect_off_x.push_back(rect_off_x); task_rect_off_y.push_back(rect_off_y); task_rect_width.push_back(rect_width); task_rect_height.push_back(rect_height); task_scale.push_back(scales[i]); task_scale_id.push_back(i); } } } } // int task_num = task_scale.size(); std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_pnet_images(thread_num); if (thread_num <= 1) { for (int i = 0; i < task_num; i++) { int thread_id = 0;// omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } else { #pragma omp parallel for num_threads(thread_num) for (int i = 0; i < task_num; i++) { int thread_id = omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } } bool _Pnet_stage(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& firstBbox) { if (thread_num <= 0) return false; double t1 = omp_get_wtime(); firstBbox.clear(); std::vector<std::vector<float> > maps; std::vector<int> mapH; std::vector<int> mapW; if (thread_num == 1 && !force_run_pnet_multithread) { pnet[0].TurnOffShowDebugInfo(); //pnet[0].TurnOnShowDebugInfo(); _compute_Pnet_single_thread(input, maps, mapH, mapW); } else { _compute_Pnet_multi_thread(input, maps, mapH, mapW); } ZQ_CNN_OrderScore order; std::vector<std::vector<ZQ_CNN_BBox> > bounding_boxes(scales.size()); std::vector<std::vector<ZQ_CNN_OrderScore> > bounding_scores(scales.size()); const int block_size = 32; int stride = pnet_stride; int cellsize = pnet_size; int border_size = cellsize / stride; for (int i = 0; i < maps.size(); i++) { double t13 = omp_get_wtime(); int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; int count = 0; //score p int scoreH = mapH[i]; int scoreW = mapW[i]; const float *p = &maps[i][0]; if (scoreW <= block_size && scoreH < block_size) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bounding_boxes[i].push_back(bbox); bounding_scores[i].push_back(order); count++; } p ++; } } int before_count = bounding_boxes[i].size(); ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } else { int before_count = 0, after_count = 0; int block_H_num = __max(1, scoreH / block_size); int block_W_num = __max(1, scoreW / block_size); int block_num = block_H_num*block_W_num; int width_per_block = scoreW / block_W_num; int height_per_block = scoreH / block_H_num; std::vector<std::vector<ZQ_CNN_BBox> > tmp_bounding_boxes(block_num); std::vector<std::vector<ZQ_CNN_OrderScore> > tmp_bounding_scores(block_num); std::vector<int> block_start_w(block_num), block_end_w(block_num); std::vector<int> block_start_h(block_num), block_end_h(block_num); for (int bh = 0; bh < block_H_num; bh++) { for (int bw = 0; bw < block_W_num; bw++) { int bb = bh * block_W_num + bw; block_start_w[bb] = (bw == 0) ? 0 : (bw*width_per_block - border_size); block_end_w[bb] = (bw == block_num - 1) ? scoreW : ((bw + 1)*width_per_block); block_start_h[bb] = (bh == 0) ? 0 : (bh*height_per_block - border_size); block_end_h[bb] = (bh == block_num - 1) ? scoreH : ((bh + 1)*height_per_block); } } int chunk_size = 1;// ceil((float)block_num / thread_num); if (thread_num <= 1) { for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } else { #pragma omp parallel for schedule(dynamic, chunk_size) num_threads(thread_num) for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { const float* p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } count = 0; for (int bb = 0; bb < block_num; bb++) { std::vector<ZQ_CNN_BBox>::iterator it = tmp_bounding_boxes[bb].begin(); for (; it != tmp_bounding_boxes[bb].end(); it++) { if ((*it).exist) { bounding_boxes[i].push_back(*it); order.score = (*it).score; order.oriOrder = count; bounding_scores[i].push_back(order); count++; } } } //ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", 0); after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } } std::vector<ZQ_CNN_OrderScore> firstOrderScore; int count = 0; for (int i = 0; i < scales.size(); i++) { std::vector<ZQ_CNN_BBox>::iterator it = bounding_boxes[i].begin(); for (; it != bounding_boxes[i].end(); it++) { if ((*it).exist) { firstBbox.push_back(*it); order.score = (*it).score; order.oriOrder = count; firstOrderScore.push_back(order); count++; } } } //the first stage's nms if (count < 1) return false; double t15 = omp_get_wtime(); ZQ_CNN_BBoxUtils::_nms(firstBbox, firstOrderScore, nms_thresh[0], "Union", 0, 1); ZQ_CNN_BBoxUtils::_refine_and_square_bbox(firstBbox, width, height,true); double t16 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms\n", 1000 * (t16 - t15)); if (show_debug_info) printf("first stage candidate count: %d\n", count); double t3 = omp_get_wtime(); if (show_debug_info) printf("stage 1: cost %.3f ms\n", 1000 * (t3 - t1)); return true; } bool _Rnet_stage(const ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& firstBbox, std::vector<ZQ_CNN_BBox>& secondBbox) { double t3 = omp_get_wtime(); secondBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = firstBbox.begin(); std::vector<ZQ_CNN_OrderScore> secondScore; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int r_count = 0; for (; it != firstBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); r_count++; secondBbox.push_back(*it); } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)r_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)r_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_rnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_secondBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(r_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_secondBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_secondBbox[i][j] = secondBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[0].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D* score = rnet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = rnet[0].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[thread_id].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D* score = rnet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = rnet[thread_id].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_secondBbox[i].size(); } secondBbox.resize(count); secondScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_secondBbox[i].size(); j++) { secondBbox[id] = task_secondBbox[i][j]; secondScore[id].score = secondBbox[id].score; secondScore[id].oriOrder = id; id++; } } //ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Union"); ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Min"); ZQ_CNN_BBoxUtils::_refine_and_square_bbox(secondBbox, width, height, true); count = secondBbox.size(); double t4 = omp_get_wtime(); if (show_debug_info) printf("run Rnet [%d] times, candidate after nms: %d \n", r_count, count); if (show_debug_info) printf("stage 2: cost %.3f ms\n", 1000 * (t4 - t3)); return true; } bool _Onet_stage(const ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& secondBbox, std::vector<ZQ_CNN_BBox>& thirdBbox) { double t4 = omp_get_wtime(); thirdBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = secondBbox.begin(); std::vector<ZQ_CNN_OrderScore> thirdScore; std::vector<ZQ_CNN_BBox> early_accept_thirdBbox; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int o_count = 0; for (; it != secondBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { if (!do_landmark && it->score > early_accept_thresh) { early_accept_thirdBbox.push_back(*it); } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); o_count++; thirdBbox.push_back(*it); } } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)o_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)o_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_onet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_thirdBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(o_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_thirdBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_thirdBbox[i][j] = thirdBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0 || task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[0].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* score = onet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = onet[0].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D* keyPoint = onet[0].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0 || task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[thread_id].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* score = onet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = onet[thread_id].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D* keyPoint = onet[thread_id].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_thirdBbox[i].size(); } thirdBbox.resize(count); thirdScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_thirdBbox[i].size(); j++) { thirdBbox[id] = task_thirdBbox[i][j]; thirdScore[id].score = task_thirdBbox[i][j].score; thirdScore[id].oriOrder = id; id++; } } ZQ_CNN_BBoxUtils::_refine_and_square_bbox(thirdBbox, width, height, false); ZQ_CNN_OrderScore order; for (int i = 0; i < early_accept_thirdBbox.size(); i++) { order.score = early_accept_thirdBbox[i].score; order.oriOrder = count++; thirdScore.push_back(order); thirdBbox.push_back(early_accept_thirdBbox[i]); } ZQ_CNN_BBoxUtils::_nms(thirdBbox, thirdScore, nms_thresh[2], "Min"); double t5 = omp_get_wtime(); if (show_debug_info) printf("run Onet [%d] times, candidate before nms: %d \n", o_count, count); if (show_debug_info) printf("stage 3: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } bool _Lnet_stage(const ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox>& fourthBbox) { double t4 = omp_get_wtime(); fourthBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin(); std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int l_count = 0; for (; it != thirdBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { l_count++; fourthBbox.push_back(*it); } } } std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox; ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height); for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); } int batch_size = BATCH_SIZE; int per_num = ceil((float)l_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)l_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_fourthBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(l_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_fourthBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_fourthBbox[i][j] = copy_fourthBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[0].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[0].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < 5; num++) { task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[thread_id].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[thread_id].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < 5; num++) { task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_fourthBbox[i].size(); } fourthBbox.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_fourthBbox[i].size(); j++) { memcpy(fourthBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 10); id++; } } double t5 = omp_get_wtime(); if (show_debug_info) printf("run Lnet [%d] times \n", l_count); if (show_debug_info) printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } bool _Lnet106_stage(const ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox106>& resultBbox) { double t4 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> fourthBbox; std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin(); std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int l_count = 0; for (; it != thirdBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { l_count++; fourthBbox.push_back(*it); } } } std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox; ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height); for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); } int batch_size = BATCH_SIZE; int per_num = ceil((float)l_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)l_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox106> > task_fourthBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(l_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_fourthBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_fourthBbox[i][j].col1 = copy_fourthBbox[st_id + j].col1; task_fourthBbox[i][j].col2 = copy_fourthBbox[st_id + j].col2; task_fourthBbox[i][j].row1 = copy_fourthBbox[st_id + j].row1; task_fourthBbox[i][j].row2 = copy_fourthBbox[st_id + j].row2; task_fourthBbox[i][j].area = copy_fourthBbox[st_id + j].area; task_fourthBbox[i][j].score = copy_fourthBbox[st_id + j].score; task_fourthBbox[i][j].exist = copy_fourthBbox[st_id + j].exist; } } } resultBbox.resize(l_count); for (int i = 0; i < l_count; i++) { resultBbox[i].col1 = fourthBbox[i].col1; resultBbox[i].col2 = fourthBbox[i].col2; resultBbox[i].row1 = fourthBbox[i].row1; resultBbox[i].row2 = fourthBbox[i].row2; resultBbox[i].score = fourthBbox[i].score; resultBbox[i].exist = fourthBbox[i].exist; resultBbox[i].area = fourthBbox[i].area; } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[0].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[0].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keypoint_num = keyPoint->GetC() / 2; int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < keypoint_num; num++) { if ((num >= 33 && num < 43) || (num >= 64 && num < 72) || (num >= 84 && num < 104)) { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]/**0.25*/; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]/**0.25*/; } else { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]; } } } } } else { #pragma omp parallel for num_threads(thread_num) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[thread_id].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[thread_id].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keypoint_num = keyPoint->GetC() / 2; int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < keypoint_num; num++) { if ((num >= 33 && num < 43) || (num >= 64 && num < 72) || (num >= 84 && num < 104)) { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]*0.5; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]*0.5; } else { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]; } } } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_fourthBbox[i].size(); } resultBbox.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_fourthBbox[i].size(); j++) { memcpy(resultBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 212); id++; } } double t5 = omp_get_wtime(); if (show_debug_info) printf("run Lnet [%d] times \n", l_count); if (show_debug_info) printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } void _select(std::vector<ZQ_CNN_BBox>& bbox, int limit_num, int width, int height) { int in_num = bbox.size(); if (limit_num >= in_num) return; bbox.resize(limit_num); } }; } #endif
#ifndef _ZQ_CNN_MTCNN_H_ #define _ZQ_CNN_MTCNN_H_ #pragma once #include "ZQ_CNN_Net.h" #include "ZQ_CNN_BBoxUtils.h" #include <omp.h> namespace ZQ { class ZQ_CNN_MTCNN { public: using string = std::string; ZQ_CNN_MTCNN() { min_size = 60; thresh[0] = 0.6; thresh[1] = 0.7; thresh[2] = 0.7; nms_thresh[0] = 0.6; nms_thresh[1] = 0.7; nms_thresh[2] = 0.7; width = 0; height = 0; factor = 0.709; pnet_overlap_thresh_count = 4; pnet_size = 12; pnet_stride = 2; special_handle_very_big_face = false; force_run_pnet_multithread = false; show_debug_info = false; limit_r_num = 0; limit_o_num = 0; limit_l_num = 0; } ~ZQ_CNN_MTCNN() { } private: #if __ARM_NEON const int BATCH_SIZE = 16; #else const int BATCH_SIZE = 64; #endif std::vector<ZQ_CNN_Net> pnet, rnet, onet, lnet; bool has_lnet; int thread_num; float thresh[3], nms_thresh[3]; int min_size; int width, height; float factor; int pnet_overlap_thresh_count; int pnet_size; int pnet_stride; int rnet_size; int onet_size; int lnet_size; bool special_handle_very_big_face; bool do_landmark; float early_accept_thresh; float nms_thresh_per_scale; bool force_run_pnet_multithread; std::vector<float> scales; std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> pnet_images; ZQ_CNN_Tensor4D_NHW_C_Align128bit ori_input, rnet_image, onet_image; bool show_debug_info; int limit_r_num; int limit_o_num; int limit_l_num; public: void TurnOnShowDebugInfo() { show_debug_info = true; } void TurnOffShowDebugInfo() { show_debug_info = false; } void SetLimit(int limit_r = 0, int limit_o = 0, int limit_l = 0) { limit_r_num = limit_r; limit_o_num = limit_o; limit_l_num = limit_l; } bool Init(const string& pnet_param, const string& pnet_model, const string& rnet_param, const string& rnet_model, const string& onet_param, const string& onet_model, int thread_num = 1, bool has_lnet = false, const string& lnet_param = "", const std::string& lnet_model = "") { if (thread_num < 1) force_run_pnet_multithread = true; else force_run_pnet_multithread = false; thread_num = __max(1, thread_num); pnet.resize(thread_num); rnet.resize(thread_num); onet.resize(thread_num); this->has_lnet = has_lnet; if (has_lnet) { lnet.resize(thread_num); } bool ret = true; for (int i = 0; i < thread_num; i++) { ret = pnet[i].LoadFrom(pnet_param, pnet_model,true,1e-9, true) && rnet[i].LoadFrom(rnet_param, rnet_model, true, 1e-9, true) && onet[i].LoadFrom(onet_param, onet_model, true, 1e-9, true); if (has_lnet && ret) ret = lnet[i].LoadFrom(lnet_param, lnet_model, true, 1e-9, true); if (!ret) break; } if (!ret) { pnet.clear(); rnet.clear(); onet.clear(); if (has_lnet) lnet.clear(); this->thread_num = 0; } else this->thread_num = thread_num; if (show_debug_info) { printf("rnet = %.2f M, onet = %.2f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0), onet[0].GetNumOfMulAdd() / (1024.0*1024.0)); if (has_lnet) printf("lnet = %.2f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0)); } int C, H, W; rnet[0].GetInputDim(C, H, W); rnet_size = H; onet[0].GetInputDim(C, H, W); onet_size = H; if (has_lnet) { lnet[0].GetInputDim(C, H, W); lnet_size = H; } return ret; } bool InitFromBuffer( const char* pnet_param, __int64 pnet_param_len, const char* pnet_model, __int64 pnet_model_len, const char* rnet_param, __int64 rnet_param_len, const char* rnet_model, __int64 rnet_model_len, const char* onet_param, __int64 onet_param_len, const char* onet_model, __int64 onet_model_len, int thread_num = 1, bool has_lnet = false, const char* lnet_param = 0, __int64 lnet_param_len = 0, const char* lnet_model = 0, __int64 lnet_model_len = 0) { if (thread_num < 1) force_run_pnet_multithread = true; else force_run_pnet_multithread = false; thread_num = __max(1, thread_num); pnet.resize(thread_num); rnet.resize(thread_num); onet.resize(thread_num); this->has_lnet = has_lnet; if(has_lnet) lnet.resize(thread_num); bool ret = true; for (int i = 0; i < thread_num; i++) { ret = pnet[i].LoadFromBuffer(pnet_param, pnet_param_len,pnet_model,pnet_model_len, true, 1e-9, true) && rnet[i].LoadFromBuffer(rnet_param, rnet_param_len, rnet_model, rnet_model_len, true, 1e-9, true) && onet[i].LoadFromBuffer(onet_param, onet_param_len, onet_model, onet_model_len, true, 1e-9, true); if (has_lnet && ret) ret = lnet[i].LoadFromBuffer(lnet_param, lnet_param_len, lnet_model, lnet_model_len, true, 1e-9, true); if (!ret) break; } if (!ret) { pnet.clear(); rnet.clear(); onet.clear(); if (has_lnet) lnet.clear(); this->thread_num = 0; } else this->thread_num = thread_num; if (show_debug_info) { printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0), onet[0].GetNumOfMulAdd() / (1024.0*1024.0)); if (has_lnet) printf("lnet = %.1f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0)); } int C, H, W; rnet[0].GetInputDim(C, H, W); rnet_size = H; onet[0].GetInputDim(C, H, W); onet_size = H; if (has_lnet) { lnet [0].GetInputDim (C, H, W); lnet_size = H; } return ret; } void SetPara(int w, int h, int min_face_size = 60, float pthresh = 0.6, float rthresh = 0.7, float othresh = 0.7, float nms_pthresh = 0.6, float nms_rthresh = 0.7, float nms_othresh = 0.7, float scale_factor = 0.709, int pnet_overlap_thresh_count = 4, int pnet_size = 12, int pnet_stride = 2, bool special_handle_very_big_face = false, bool do_landmark = true, float early_accept_thresh = 1.00) { min_size = __max(pnet_size, min_face_size); thresh[0] = __max(0.1, pthresh); thresh[1] = __max(0.1, rthresh); thresh[2] = __max(0.1, othresh); nms_thresh[0] = __max(0.1, nms_pthresh); nms_thresh[1] = __max(0.1, nms_rthresh); nms_thresh[2] = __max(0.1, nms_othresh); scale_factor = __max(0.5, __min(0.97, scale_factor)); this->pnet_overlap_thresh_count = __max(0, pnet_overlap_thresh_count); this->pnet_size = pnet_size; this->pnet_stride = pnet_stride; this->special_handle_very_big_face = special_handle_very_big_face; this->do_landmark = do_landmark; this->early_accept_thresh = early_accept_thresh; if (pnet_size == 20 && pnet_stride == 4) nms_thresh_per_scale = 0.45; else nms_thresh_per_scale = 0.495; if (width != w || height != h || factor != scale_factor) { scales.clear(); pnet_images.clear(); width = w; height = h; float minside = __min(width, height); int MIN_DET_SIZE = pnet_size; float m = (float)MIN_DET_SIZE / min_size; minside *= m; while (minside > MIN_DET_SIZE) { scales.push_back(m); minside *= factor; m *= factor; } minside = __min(width, height); int count = scales.size(); for (int i = scales.size() - 1; i >= 0; i--) { if (ceil(scales[i] * minside) <= pnet_size) { count--; } } if (special_handle_very_big_face) { if (count > 2) count--; scales.resize(count); if (count > 0) { float last_size = ceil(scales[count - 1] * minside); for (int tmp_size = last_size - 1; tmp_size >= pnet_size + 1; tmp_size -= 2) { scales.push_back((float)tmp_size / minside); count++; } } scales.push_back((float)pnet_size / minside); count++; } else { scales.push_back((float)pnet_size / minside); count++; } pnet_images.resize(count); } } bool Find(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& results) { double t1 = omp_get_wtime(); if (width != _width || height != _height) return false; if (!ori_input.ConvertFromBGR(bgr_img, width, height, _widthStep)) return false; double t2 = omp_get_wtime(); if (show_debug_info) printf("convert cost: %.3f ms\n", 1000 * (t2 - t1)); return Find(ori_input, results); } bool Find106(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox106>& results) { double t1 = omp_get_wtime(); if (width != _width || height != _height) return false; if (!ori_input.ConvertFromBGR(bgr_img, width, height, _widthStep)) return false; double t2 = omp_get_wtime(); if (show_debug_info) printf("convert cost: %.3f ms\n", 1000 * (t2 - t1)); return Find106(ori_input, results); } bool Find(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(input, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, input.GetW(), input.GetH()); } double t2 = omp_get_wtime(); if (!_Rnet_stage(input, firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, input.GetW(), input.GetH()); } if (!has_lnet || !do_landmark) { double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, results)) return false; double t4 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms)\n", 1000 * (t4 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3)); } } else { double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, thirdBbox)) return false; if (limit_l_num > 0) { _select(thirdBbox, limit_l_num, input.GetW(), input.GetH()); } double t4 = omp_get_wtime(); if (!_Lnet_stage(input, thirdBbox, results)) return false; double t5 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n", 1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4)); } } return true; } bool Find106(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox106>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(input, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, input.GetW(), input.GetH()); } double t2 = omp_get_wtime(); if (!_Rnet_stage(input, firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, input.GetW(), input.GetH()); } if (!has_lnet || !do_landmark) { return false; } double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, thirdBbox)) return false; if (limit_l_num > 0) { _select(thirdBbox, limit_l_num, input.GetW(), input.GetH()); } double t4 = omp_get_wtime(); if (!_Lnet106_stage(input, thirdBbox, results)) return false; double t5 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n", 1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4)); } return true; } private: void _compute_Pnet_single_thread(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } for (int i = 0; i < scale_num; i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; double t10 = omp_get_wtime(); if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } double t11 = omp_get_wtime(); if (scales[i] != 1) pnet[0].Forward(pnet_images[i]); else pnet[0].Forward(input); double t12 = omp_get_wtime(); if (show_debug_info) printf("Pnet [%d]: resolution [%dx%d], resize:%.3f ms, cost:%.3f ms\n", i, changedW, changedH, 1000 * (t11 - t10), 1000 * (t12 - t11)); const ZQ_CNN_Tensor4D* score = pnet[0].GetBlobByName("prob1"); //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if(row < mapH[i] && col < mapW[i]) maps[i][row*mapW[i] + col] = *p; p += scorePixStep; } } } } void _compute_Pnet_multi_thread(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { if (thread_num <= 1) { for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } } else { for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } } int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } std::vector<int> task_rect_off_x; std::vector<int> task_rect_off_y; std::vector<int> task_rect_width; std::vector<int> task_rect_height; std::vector<float> task_scale; std::vector<int> task_scale_id; int stride = pnet_stride; const int block_size = 64 * stride; int cellsize = pnet_size; int border_size = cellsize - stride; int overlap_border_size = cellsize / stride; int jump_size = block_size - border_size; for (int i = 0; i < scales.size(); i++) { int changeH = (int)ceil(height*scales[i]); int changeW = (int)ceil(width*scales[i]); if (changeH < pnet_size || changeW < pnet_size) continue; int block_H_num = 0; int block_W_num = 0; int start = 0; while (start < changeH) { block_H_num++; if (start + block_size >= changeH) break; start += jump_size; } start = 0; while (start < changeW) { block_W_num++; if (start + block_size >= changeW) break; start += jump_size; } for (int s = 0; s < block_H_num; s++) { for (int t = 0; t < block_W_num; t++) { int rect_off_x = t * jump_size; int rect_off_y = s * jump_size; int rect_width = __min(changeW, rect_off_x + block_size) - rect_off_x; int rect_height = __min(changeH, rect_off_y + block_size) - rect_off_y; if (rect_width >= cellsize && rect_height >= cellsize) { task_rect_off_x.push_back(rect_off_x); task_rect_off_y.push_back(rect_off_y); task_rect_width.push_back(rect_width); task_rect_height.push_back(rect_height); task_scale.push_back(scales[i]); task_scale_id.push_back(i); } } } } // int task_num = task_scale.size(); std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_pnet_images(thread_num); if (thread_num <= 1) { for (int i = 0; i < task_num; i++) { int thread_id = 0;// omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } else { for (int i = 0; i < task_num; i++) { int thread_id = omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } } bool _Pnet_stage(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& firstBbox) { if (thread_num <= 0) return false; double t1 = omp_get_wtime(); firstBbox.clear(); std::vector<std::vector<float> > maps; std::vector<int> mapH; std::vector<int> mapW; if (thread_num == 1 && !force_run_pnet_multithread) { pnet[0].TurnOffShowDebugInfo(); //pnet[0].TurnOnShowDebugInfo(); _compute_Pnet_single_thread(input, maps, mapH, mapW); } else { _compute_Pnet_multi_thread(input, maps, mapH, mapW); } ZQ_CNN_OrderScore order; std::vector<std::vector<ZQ_CNN_BBox> > bounding_boxes(scales.size()); std::vector<std::vector<ZQ_CNN_OrderScore> > bounding_scores(scales.size()); const int block_size = 32; int stride = pnet_stride; int cellsize = pnet_size; int border_size = cellsize / stride; for (int i = 0; i < maps.size(); i++) { double t13 = omp_get_wtime(); int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; int count = 0; //score p int scoreH = mapH[i]; int scoreW = mapW[i]; const float *p = &maps[i][0]; if (scoreW <= block_size && scoreH < block_size) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bounding_boxes[i].push_back(bbox); bounding_scores[i].push_back(order); count++; } p ++; } } int before_count = bounding_boxes[i].size(); ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } else { int before_count = 0, after_count = 0; int block_H_num = __max(1, scoreH / block_size); int block_W_num = __max(1, scoreW / block_size); int block_num = block_H_num*block_W_num; int width_per_block = scoreW / block_W_num; int height_per_block = scoreH / block_H_num; std::vector<std::vector<ZQ_CNN_BBox> > tmp_bounding_boxes(block_num); std::vector<std::vector<ZQ_CNN_OrderScore> > tmp_bounding_scores(block_num); std::vector<int> block_start_w(block_num), block_end_w(block_num); std::vector<int> block_start_h(block_num), block_end_h(block_num); for (int bh = 0; bh < block_H_num; bh++) { for (int bw = 0; bw < block_W_num; bw++) { int bb = bh * block_W_num + bw; block_start_w[bb] = (bw == 0) ? 0 : (bw*width_per_block - border_size); block_end_w[bb] = (bw == block_num - 1) ? scoreW : ((bw + 1)*width_per_block); block_start_h[bb] = (bh == 0) ? 0 : (bh*height_per_block - border_size); block_end_h[bb] = (bh == block_num - 1) ? scoreH : ((bh + 1)*height_per_block); } } int chunk_size = 1;// ceil((float)block_num / thread_num); if (thread_num <= 1) { for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } else { for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { const float* p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } count = 0; for (int bb = 0; bb < block_num; bb++) { std::vector<ZQ_CNN_BBox>::iterator it = tmp_bounding_boxes[bb].begin(); for (; it != tmp_bounding_boxes[bb].end(); it++) { if ((*it).exist) { bounding_boxes[i].push_back(*it); order.score = (*it).score; order.oriOrder = count; bounding_scores[i].push_back(order); count++; } } } //ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", 0); after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } } std::vector<ZQ_CNN_OrderScore> firstOrderScore; int count = 0; for (int i = 0; i < scales.size(); i++) { std::vector<ZQ_CNN_BBox>::iterator it = bounding_boxes[i].begin(); for (; it != bounding_boxes[i].end(); it++) { if ((*it).exist) { firstBbox.push_back(*it); order.score = (*it).score; order.oriOrder = count; firstOrderScore.push_back(order); count++; } } } //the first stage's nms if (count < 1) return false; double t15 = omp_get_wtime(); ZQ_CNN_BBoxUtils::_nms(firstBbox, firstOrderScore, nms_thresh[0], "Union", 0, 1); ZQ_CNN_BBoxUtils::_refine_and_square_bbox(firstBbox, width, height,true); double t16 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms\n", 1000 * (t16 - t15)); if (show_debug_info) printf("first stage candidate count: %d\n", count); double t3 = omp_get_wtime(); if (show_debug_info) printf("stage 1: cost %.3f ms\n", 1000 * (t3 - t1)); return true; } bool _Rnet_stage(const ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& firstBbox, std::vector<ZQ_CNN_BBox>& secondBbox) { double t3 = omp_get_wtime(); secondBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = firstBbox.begin(); std::vector<ZQ_CNN_OrderScore> secondScore; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int r_count = 0; for (; it != firstBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); r_count++; secondBbox.push_back(*it); } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)r_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)r_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_rnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_secondBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(r_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_secondBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_secondBbox[i][j] = secondBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[0].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D* score = rnet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = rnet[0].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } else { for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[thread_id].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D* score = rnet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = rnet[thread_id].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_secondBbox[i].size(); } secondBbox.resize(count); secondScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_secondBbox[i].size(); j++) { secondBbox[id] = task_secondBbox[i][j]; secondScore[id].score = secondBbox[id].score; secondScore[id].oriOrder = id; id++; } } //ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Union"); ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Min"); ZQ_CNN_BBoxUtils::_refine_and_square_bbox(secondBbox, width, height, true); count = secondBbox.size(); double t4 = omp_get_wtime(); if (show_debug_info) printf("run Rnet [%d] times, candidate after nms: %d \n", r_count, count); if (show_debug_info) printf("stage 2: cost %.3f ms\n", 1000 * (t4 - t3)); return true; } bool _Onet_stage(const ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& secondBbox, std::vector<ZQ_CNN_BBox>& thirdBbox) { double t4 = omp_get_wtime(); thirdBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = secondBbox.begin(); std::vector<ZQ_CNN_OrderScore> thirdScore; std::vector<ZQ_CNN_BBox> early_accept_thirdBbox; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int o_count = 0; for (; it != secondBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { if (!do_landmark && it->score > early_accept_thresh) { early_accept_thirdBbox.push_back(*it); } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); o_count++; thirdBbox.push_back(*it); } } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)o_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)o_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_onet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_thirdBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(o_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_thirdBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_thirdBbox[i][j] = thirdBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0 || task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[0].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* score = onet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = onet[0].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D* keyPoint = onet[0].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } else { for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0 || task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[thread_id].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* score = onet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = onet[thread_id].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D* keyPoint = onet[thread_id].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_thirdBbox[i].size(); } thirdBbox.resize(count); thirdScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_thirdBbox[i].size(); j++) { thirdBbox[id] = task_thirdBbox[i][j]; thirdScore[id].score = task_thirdBbox[i][j].score; thirdScore[id].oriOrder = id; id++; } } ZQ_CNN_BBoxUtils::_refine_and_square_bbox(thirdBbox, width, height, false); ZQ_CNN_OrderScore order; for (int i = 0; i < early_accept_thirdBbox.size(); i++) { order.score = early_accept_thirdBbox[i].score; order.oriOrder = count++; thirdScore.push_back(order); thirdBbox.push_back(early_accept_thirdBbox[i]); } ZQ_CNN_BBoxUtils::_nms(thirdBbox, thirdScore, nms_thresh[2], "Min"); double t5 = omp_get_wtime(); if (show_debug_info) printf("run Onet [%d] times, candidate before nms: %d \n", o_count, count); if (show_debug_info) printf("stage 3: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } bool _Lnet_stage(const ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox>& fourthBbox) { double t4 = omp_get_wtime(); fourthBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin(); std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int l_count = 0; for (; it != thirdBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { l_count++; fourthBbox.push_back(*it); } } } std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox; ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height); for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); } int batch_size = BATCH_SIZE; int per_num = ceil((float)l_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)l_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_fourthBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(l_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_fourthBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_fourthBbox[i][j] = copy_fourthBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[0].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[0].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < 5; num++) { task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } } } else { for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[thread_id].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[thread_id].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < 5; num++) { task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_fourthBbox[i].size(); } fourthBbox.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_fourthBbox[i].size(); j++) { memcpy(fourthBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 10); id++; } } double t5 = omp_get_wtime(); if (show_debug_info) printf("run Lnet [%d] times \n", l_count); if (show_debug_info) printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } bool _Lnet106_stage(const ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox106>& resultBbox) { double t4 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> fourthBbox; std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin(); std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int l_count = 0; for (; it != thirdBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { l_count++; fourthBbox.push_back(*it); } } } std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox; ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height); for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); } int batch_size = BATCH_SIZE; int per_num = ceil((float)l_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)l_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox106> > task_fourthBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(l_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_fourthBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_fourthBbox[i][j].col1 = copy_fourthBbox[st_id + j].col1; task_fourthBbox[i][j].col2 = copy_fourthBbox[st_id + j].col2; task_fourthBbox[i][j].row1 = copy_fourthBbox[st_id + j].row1; task_fourthBbox[i][j].row2 = copy_fourthBbox[st_id + j].row2; task_fourthBbox[i][j].area = copy_fourthBbox[st_id + j].area; task_fourthBbox[i][j].score = copy_fourthBbox[st_id + j].score; task_fourthBbox[i][j].exist = copy_fourthBbox[st_id + j].exist; } } } resultBbox.resize(l_count); for (int i = 0; i < l_count; i++) { resultBbox[i].col1 = fourthBbox[i].col1; resultBbox[i].col2 = fourthBbox[i].col2; resultBbox[i].row1 = fourthBbox[i].row1; resultBbox[i].row2 = fourthBbox[i].row2; resultBbox[i].score = fourthBbox[i].score; resultBbox[i].exist = fourthBbox[i].exist; resultBbox[i].area = fourthBbox[i].area; } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[0].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[0].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keypoint_num = keyPoint->GetC() / 2; int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < keypoint_num; num++) { if ((num >= 33 && num < 43) || (num >= 64 && num < 72) || (num >= 84 && num < 104)) { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]/**0.25*/; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]/**0.25*/; } else { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]; } } } } } else { for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[thread_id].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[thread_id].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keypoint_num = keyPoint->GetC() / 2; int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < keypoint_num; num++) { if ((num >= 33 && num < 43) || (num >= 64 && num < 72) || (num >= 84 && num < 104)) { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]*0.5; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]*0.5; } else { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]; } } } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_fourthBbox[i].size(); } resultBbox.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_fourthBbox[i].size(); j++) { memcpy(resultBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 212); id++; } } double t5 = omp_get_wtime(); if (show_debug_info) printf("run Lnet [%d] times \n", l_count); if (show_debug_info) printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } void _select(std::vector<ZQ_CNN_BBox>& bbox, int limit_num, int width, int height) { int in_num = bbox.size(); if (limit_num >= in_num) return; bbox.resize(limit_num); } }; } #endif
#ifndef _ZQ_CNN_MTCNN_H_ #define _ZQ_CNN_MTCNN_H_ #pragma once #include "ZQ_CNN_Net.h" #include "ZQ_CNN_BBoxUtils.h" #include <omp.h> namespace ZQ { class ZQ_CNN_MTCNN { public: using string = std::string; ZQ_CNN_MTCNN() { min_size = 60; thresh[0] = 0.6; thresh[1] = 0.7; thresh[2] = 0.7; nms_thresh[0] = 0.6; nms_thresh[1] = 0.7; nms_thresh[2] = 0.7; width = 0; height = 0; factor = 0.709; pnet_overlap_thresh_count = 4; pnet_size = 12; pnet_stride = 2; special_handle_very_big_face = false; force_run_pnet_multithread = false; show_debug_info = false; limit_r_num = 0; limit_o_num = 0; limit_l_num = 0; } ~ZQ_CNN_MTCNN() { } private: #if __ARM_NEON const int BATCH_SIZE = 16; #else const int BATCH_SIZE = 64; #endif std::vector<ZQ_CNN_Net> pnet, rnet, onet, lnet; bool has_lnet; int thread_num; float thresh[3], nms_thresh[3]; int min_size; int width, height; float factor; int pnet_overlap_thresh_count; int pnet_size; int pnet_stride; int rnet_size; int onet_size; int lnet_size; bool special_handle_very_big_face; bool do_landmark; float early_accept_thresh; float nms_thresh_per_scale; bool force_run_pnet_multithread; std::vector<float> scales; std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> pnet_images; ZQ_CNN_Tensor4D_NHW_C_Align128bit ori_input, rnet_image, onet_image; bool show_debug_info; int limit_r_num; int limit_o_num; int limit_l_num; public: void TurnOnShowDebugInfo() { show_debug_info = true; } void TurnOffShowDebugInfo() { show_debug_info = false; } void SetLimit(int limit_r = 0, int limit_o = 0, int limit_l = 0) { limit_r_num = limit_r; limit_o_num = limit_o; limit_l_num = limit_l; } bool Init(const string& pnet_param, const string& pnet_model, const string& rnet_param, const string& rnet_model, const string& onet_param, const string& onet_model, int thread_num = 1, bool has_lnet = false, const string& lnet_param = "", const std::string& lnet_model = "") { if (thread_num < 1) force_run_pnet_multithread = true; else force_run_pnet_multithread = false; thread_num = __max(1, thread_num); pnet.resize(thread_num); rnet.resize(thread_num); onet.resize(thread_num); this->has_lnet = has_lnet; if (has_lnet) { lnet.resize(thread_num); } bool ret = true; for (int i = 0; i < thread_num; i++) { ret = pnet[i].LoadFrom(pnet_param, pnet_model,true,1e-9, true) && rnet[i].LoadFrom(rnet_param, rnet_model, true, 1e-9, true) && onet[i].LoadFrom(onet_param, onet_model, true, 1e-9, true); if (has_lnet && ret) ret = lnet[i].LoadFrom(lnet_param, lnet_model, true, 1e-9, true); if (!ret) break; } if (!ret) { pnet.clear(); rnet.clear(); onet.clear(); if (has_lnet) lnet.clear(); this->thread_num = 0; } else this->thread_num = thread_num; if (show_debug_info) { printf("rnet = %.2f M, onet = %.2f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0), onet[0].GetNumOfMulAdd() / (1024.0*1024.0)); if (has_lnet) printf("lnet = %.2f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0)); } int C, H, W; rnet[0].GetInputDim(C, H, W); rnet_size = H; onet[0].GetInputDim(C, H, W); onet_size = H; if (has_lnet) { lnet[0].GetInputDim(C, H, W); lnet_size = H; } return ret; } bool InitFromBuffer( const char* pnet_param, __int64 pnet_param_len, const char* pnet_model, __int64 pnet_model_len, const char* rnet_param, __int64 rnet_param_len, const char* rnet_model, __int64 rnet_model_len, const char* onet_param, __int64 onet_param_len, const char* onet_model, __int64 onet_model_len, int thread_num = 1, bool has_lnet = false, const char* lnet_param = 0, __int64 lnet_param_len = 0, const char* lnet_model = 0, __int64 lnet_model_len = 0) { if (thread_num < 1) force_run_pnet_multithread = true; else force_run_pnet_multithread = false; thread_num = __max(1, thread_num); pnet.resize(thread_num); rnet.resize(thread_num); onet.resize(thread_num); this->has_lnet = has_lnet; if(has_lnet) lnet.resize(thread_num); bool ret = true; for (int i = 0; i < thread_num; i++) { ret = pnet[i].LoadFromBuffer(pnet_param, pnet_param_len,pnet_model,pnet_model_len, true, 1e-9, true) && rnet[i].LoadFromBuffer(rnet_param, rnet_param_len, rnet_model, rnet_model_len, true, 1e-9, true) && onet[i].LoadFromBuffer(onet_param, onet_param_len, onet_model, onet_model_len, true, 1e-9, true); if (has_lnet && ret) ret = lnet[i].LoadFromBuffer(lnet_param, lnet_param_len, lnet_model, lnet_model_len, true, 1e-9, true); if (!ret) break; } if (!ret) { pnet.clear(); rnet.clear(); onet.clear(); if (has_lnet) lnet.clear(); this->thread_num = 0; } else this->thread_num = thread_num; if (show_debug_info) { printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0), onet[0].GetNumOfMulAdd() / (1024.0*1024.0)); if (has_lnet) printf("lnet = %.1f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0)); } int C, H, W; rnet[0].GetInputDim(C, H, W); rnet_size = H; onet[0].GetInputDim(C, H, W); onet_size = H; if (has_lnet) { lnet [0].GetInputDim (C, H, W); lnet_size = H; } return ret; } void SetPara(int w, int h, int min_face_size = 60, float pthresh = 0.6, float rthresh = 0.7, float othresh = 0.7, float nms_pthresh = 0.6, float nms_rthresh = 0.7, float nms_othresh = 0.7, float scale_factor = 0.709, int pnet_overlap_thresh_count = 4, int pnet_size = 12, int pnet_stride = 2, bool special_handle_very_big_face = false, bool do_landmark = true, float early_accept_thresh = 1.00) { min_size = __max(pnet_size, min_face_size); thresh[0] = __max(0.1, pthresh); thresh[1] = __max(0.1, rthresh); thresh[2] = __max(0.1, othresh); nms_thresh[0] = __max(0.1, nms_pthresh); nms_thresh[1] = __max(0.1, nms_rthresh); nms_thresh[2] = __max(0.1, nms_othresh); scale_factor = __max(0.5, __min(0.97, scale_factor)); this->pnet_overlap_thresh_count = __max(0, pnet_overlap_thresh_count); this->pnet_size = pnet_size; this->pnet_stride = pnet_stride; this->special_handle_very_big_face = special_handle_very_big_face; this->do_landmark = do_landmark; this->early_accept_thresh = early_accept_thresh; if (pnet_size == 20 && pnet_stride == 4) nms_thresh_per_scale = 0.45; else nms_thresh_per_scale = 0.495; if (width != w || height != h || factor != scale_factor) { scales.clear(); pnet_images.clear(); width = w; height = h; float minside = __min(width, height); int MIN_DET_SIZE = pnet_size; float m = (float)MIN_DET_SIZE / min_size; minside *= m; while (minside > MIN_DET_SIZE) { scales.push_back(m); minside *= factor; m *= factor; } minside = __min(width, height); int count = scales.size(); for (int i = scales.size() - 1; i >= 0; i--) { if (ceil(scales[i] * minside) <= pnet_size) { count--; } } if (special_handle_very_big_face) { if (count > 2) count--; scales.resize(count); if (count > 0) { float last_size = ceil(scales[count - 1] * minside); for (int tmp_size = last_size - 1; tmp_size >= pnet_size + 1; tmp_size -= 2) { scales.push_back((float)tmp_size / minside); count++; } } scales.push_back((float)pnet_size / minside); count++; } else { scales.push_back((float)pnet_size / minside); count++; } pnet_images.resize(count); } } bool Find(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& results) { double t1 = omp_get_wtime(); if (width != _width || height != _height) return false; if (!ori_input.ConvertFromBGR(bgr_img, width, height, _widthStep)) return false; double t2 = omp_get_wtime(); if (show_debug_info) printf("convert cost: %.3f ms\n", 1000 * (t2 - t1)); return Find(ori_input, results); } bool Find106(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox106>& results) { double t1 = omp_get_wtime(); if (width != _width || height != _height) return false; if (!ori_input.ConvertFromBGR(bgr_img, width, height, _widthStep)) return false; double t2 = omp_get_wtime(); if (show_debug_info) printf("convert cost: %.3f ms\n", 1000 * (t2 - t1)); return Find106(ori_input, results); } bool Find(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(input, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, input.GetW(), input.GetH()); } double t2 = omp_get_wtime(); if (!_Rnet_stage(input, firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, input.GetW(), input.GetH()); } if (!has_lnet || !do_landmark) { double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, results)) return false; double t4 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms)\n", 1000 * (t4 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3)); } } else { double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, thirdBbox)) return false; if (limit_l_num > 0) { _select(thirdBbox, limit_l_num, input.GetW(), input.GetH()); } double t4 = omp_get_wtime(); if (!_Lnet_stage(input, thirdBbox, results)) return false; double t5 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n", 1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4)); } } return true; } bool Find106(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox106>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(input, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, input.GetW(), input.GetH()); } double t2 = omp_get_wtime(); if (!_Rnet_stage(input, firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, input.GetW(), input.GetH()); } if (!has_lnet || !do_landmark) { return false; } double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, thirdBbox)) return false; if (limit_l_num > 0) { _select(thirdBbox, limit_l_num, input.GetW(), input.GetH()); } double t4 = omp_get_wtime(); if (!_Lnet106_stage(input, thirdBbox, results)) return false; double t5 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n", 1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4)); } return true; } private: void _compute_Pnet_single_thread(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } for (int i = 0; i < scale_num; i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; double t10 = omp_get_wtime(); if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } double t11 = omp_get_wtime(); if (scales[i] != 1) pnet[0].Forward(pnet_images[i]); else pnet[0].Forward(input); double t12 = omp_get_wtime(); if (show_debug_info) printf("Pnet [%d]: resolution [%dx%d], resize:%.3f ms, cost:%.3f ms\n", i, changedW, changedH, 1000 * (t11 - t10), 1000 * (t12 - t11)); const ZQ_CNN_Tensor4D* score = pnet[0].GetBlobByName("prob1"); //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if(row < mapH[i] && col < mapW[i]) maps[i][row*mapW[i] + col] = *p; p += scorePixStep; } } } } void _compute_Pnet_multi_thread(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { if (thread_num <= 1) { for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic, 1) for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } } int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } std::vector<int> task_rect_off_x; std::vector<int> task_rect_off_y; std::vector<int> task_rect_width; std::vector<int> task_rect_height; std::vector<float> task_scale; std::vector<int> task_scale_id; int stride = pnet_stride; const int block_size = 64 * stride; int cellsize = pnet_size; int border_size = cellsize - stride; int overlap_border_size = cellsize / stride; int jump_size = block_size - border_size; for (int i = 0; i < scales.size(); i++) { int changeH = (int)ceil(height*scales[i]); int changeW = (int)ceil(width*scales[i]); if (changeH < pnet_size || changeW < pnet_size) continue; int block_H_num = 0; int block_W_num = 0; int start = 0; while (start < changeH) { block_H_num++; if (start + block_size >= changeH) break; start += jump_size; } start = 0; while (start < changeW) { block_W_num++; if (start + block_size >= changeW) break; start += jump_size; } for (int s = 0; s < block_H_num; s++) { for (int t = 0; t < block_W_num; t++) { int rect_off_x = t * jump_size; int rect_off_y = s * jump_size; int rect_width = __min(changeW, rect_off_x + block_size) - rect_off_x; int rect_height = __min(changeH, rect_off_y + block_size) - rect_off_y; if (rect_width >= cellsize && rect_height >= cellsize) { task_rect_off_x.push_back(rect_off_x); task_rect_off_y.push_back(rect_off_y); task_rect_width.push_back(rect_width); task_rect_height.push_back(rect_height); task_scale.push_back(scales[i]); task_scale_id.push_back(i); } } } } // int task_num = task_scale.size(); std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_pnet_images(thread_num); if (thread_num <= 1) { for (int i = 0; i < task_num; i++) { int thread_id = 0;// omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } else { #pragma omp parallel for num_threads(thread_num) for (int i = 0; i < task_num; i++) { int thread_id = omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } } bool _Pnet_stage(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& firstBbox) { if (thread_num <= 0) return false; double t1 = omp_get_wtime(); firstBbox.clear(); std::vector<std::vector<float> > maps; std::vector<int> mapH; std::vector<int> mapW; if (thread_num == 1 && !force_run_pnet_multithread) { pnet[0].TurnOffShowDebugInfo(); //pnet[0].TurnOnShowDebugInfo(); _compute_Pnet_single_thread(input, maps, mapH, mapW); } else { _compute_Pnet_multi_thread(input, maps, mapH, mapW); } ZQ_CNN_OrderScore order; std::vector<std::vector<ZQ_CNN_BBox> > bounding_boxes(scales.size()); std::vector<std::vector<ZQ_CNN_OrderScore> > bounding_scores(scales.size()); const int block_size = 32; int stride = pnet_stride; int cellsize = pnet_size; int border_size = cellsize / stride; for (int i = 0; i < maps.size(); i++) { double t13 = omp_get_wtime(); int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; int count = 0; //score p int scoreH = mapH[i]; int scoreW = mapW[i]; const float *p = &maps[i][0]; if (scoreW <= block_size && scoreH < block_size) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bounding_boxes[i].push_back(bbox); bounding_scores[i].push_back(order); count++; } p ++; } } int before_count = bounding_boxes[i].size(); ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } else { int before_count = 0, after_count = 0; int block_H_num = __max(1, scoreH / block_size); int block_W_num = __max(1, scoreW / block_size); int block_num = block_H_num*block_W_num; int width_per_block = scoreW / block_W_num; int height_per_block = scoreH / block_H_num; std::vector<std::vector<ZQ_CNN_BBox> > tmp_bounding_boxes(block_num); std::vector<std::vector<ZQ_CNN_OrderScore> > tmp_bounding_scores(block_num); std::vector<int> block_start_w(block_num), block_end_w(block_num); std::vector<int> block_start_h(block_num), block_end_h(block_num); for (int bh = 0; bh < block_H_num; bh++) { for (int bw = 0; bw < block_W_num; bw++) { int bb = bh * block_W_num + bw; block_start_w[bb] = (bw == 0) ? 0 : (bw*width_per_block - border_size); block_end_w[bb] = (bw == block_num - 1) ? scoreW : ((bw + 1)*width_per_block); block_start_h[bb] = (bh == 0) ? 0 : (bh*height_per_block - border_size); block_end_h[bb] = (bh == block_num - 1) ? scoreH : ((bh + 1)*height_per_block); } } int chunk_size = 1;// ceil((float)block_num / thread_num); if (thread_num <= 1) { for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } else { #pragma omp parallel for schedule(dynamic, chunk_size) num_threads(thread_num) for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { const float* p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } count = 0; for (int bb = 0; bb < block_num; bb++) { std::vector<ZQ_CNN_BBox>::iterator it = tmp_bounding_boxes[bb].begin(); for (; it != tmp_bounding_boxes[bb].end(); it++) { if ((*it).exist) { bounding_boxes[i].push_back(*it); order.score = (*it).score; order.oriOrder = count; bounding_scores[i].push_back(order); count++; } } } //ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", 0); after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } } std::vector<ZQ_CNN_OrderScore> firstOrderScore; int count = 0; for (int i = 0; i < scales.size(); i++) { std::vector<ZQ_CNN_BBox>::iterator it = bounding_boxes[i].begin(); for (; it != bounding_boxes[i].end(); it++) { if ((*it).exist) { firstBbox.push_back(*it); order.score = (*it).score; order.oriOrder = count; firstOrderScore.push_back(order); count++; } } } //the first stage's nms if (count < 1) return false; double t15 = omp_get_wtime(); ZQ_CNN_BBoxUtils::_nms(firstBbox, firstOrderScore, nms_thresh[0], "Union", 0, 1); ZQ_CNN_BBoxUtils::_refine_and_square_bbox(firstBbox, width, height,true); double t16 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms\n", 1000 * (t16 - t15)); if (show_debug_info) printf("first stage candidate count: %d\n", count); double t3 = omp_get_wtime(); if (show_debug_info) printf("stage 1: cost %.3f ms\n", 1000 * (t3 - t1)); return true; } bool _Rnet_stage(const ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& firstBbox, std::vector<ZQ_CNN_BBox>& secondBbox) { double t3 = omp_get_wtime(); secondBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = firstBbox.begin(); std::vector<ZQ_CNN_OrderScore> secondScore; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int r_count = 0; for (; it != firstBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); r_count++; secondBbox.push_back(*it); } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)r_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)r_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_rnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_secondBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(r_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_secondBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_secondBbox[i][j] = secondBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[0].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D* score = rnet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = rnet[0].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[thread_id].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D* score = rnet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = rnet[thread_id].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_secondBbox[i].size(); } secondBbox.resize(count); secondScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_secondBbox[i].size(); j++) { secondBbox[id] = task_secondBbox[i][j]; secondScore[id].score = secondBbox[id].score; secondScore[id].oriOrder = id; id++; } } //ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Union"); ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Min"); ZQ_CNN_BBoxUtils::_refine_and_square_bbox(secondBbox, width, height, true); count = secondBbox.size(); double t4 = omp_get_wtime(); if (show_debug_info) printf("run Rnet [%d] times, candidate after nms: %d \n", r_count, count); if (show_debug_info) printf("stage 2: cost %.3f ms\n", 1000 * (t4 - t3)); return true; } bool _Onet_stage(const ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& secondBbox, std::vector<ZQ_CNN_BBox>& thirdBbox) { double t4 = omp_get_wtime(); thirdBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = secondBbox.begin(); std::vector<ZQ_CNN_OrderScore> thirdScore; std::vector<ZQ_CNN_BBox> early_accept_thirdBbox; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int o_count = 0; for (; it != secondBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { if (!do_landmark && it->score > early_accept_thresh) { early_accept_thirdBbox.push_back(*it); } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); o_count++; thirdBbox.push_back(*it); } } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)o_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)o_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_onet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_thirdBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(o_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_thirdBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_thirdBbox[i][j] = thirdBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0 || task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[0].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* score = onet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = onet[0].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D* keyPoint = onet[0].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0 || task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[thread_id].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* score = onet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = onet[thread_id].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D* keyPoint = onet[thread_id].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_thirdBbox[i].size(); } thirdBbox.resize(count); thirdScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_thirdBbox[i].size(); j++) { thirdBbox[id] = task_thirdBbox[i][j]; thirdScore[id].score = task_thirdBbox[i][j].score; thirdScore[id].oriOrder = id; id++; } } ZQ_CNN_BBoxUtils::_refine_and_square_bbox(thirdBbox, width, height, false); ZQ_CNN_OrderScore order; for (int i = 0; i < early_accept_thirdBbox.size(); i++) { order.score = early_accept_thirdBbox[i].score; order.oriOrder = count++; thirdScore.push_back(order); thirdBbox.push_back(early_accept_thirdBbox[i]); } ZQ_CNN_BBoxUtils::_nms(thirdBbox, thirdScore, nms_thresh[2], "Min"); double t5 = omp_get_wtime(); if (show_debug_info) printf("run Onet [%d] times, candidate before nms: %d \n", o_count, count); if (show_debug_info) printf("stage 3: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } bool _Lnet_stage(const ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox>& fourthBbox) { double t4 = omp_get_wtime(); fourthBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin(); std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int l_count = 0; for (; it != thirdBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { l_count++; fourthBbox.push_back(*it); } } } std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox; ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height); for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); } int batch_size = BATCH_SIZE; int per_num = ceil((float)l_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)l_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_fourthBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(l_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_fourthBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_fourthBbox[i][j] = copy_fourthBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[0].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[0].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < 5; num++) { task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[thread_id].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[thread_id].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < 5; num++) { task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_fourthBbox[i].size(); } fourthBbox.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_fourthBbox[i].size(); j++) { memcpy(fourthBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 10); id++; } } double t5 = omp_get_wtime(); if (show_debug_info) printf("run Lnet [%d] times \n", l_count); if (show_debug_info) printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } bool _Lnet106_stage(const ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox106>& resultBbox) { double t4 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> fourthBbox; std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin(); std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int l_count = 0; for (; it != thirdBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { l_count++; fourthBbox.push_back(*it); } } } std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox; ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height); for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); } int batch_size = BATCH_SIZE; int per_num = ceil((float)l_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)l_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox106> > task_fourthBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(l_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_fourthBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_fourthBbox[i][j].col1 = copy_fourthBbox[st_id + j].col1; task_fourthBbox[i][j].col2 = copy_fourthBbox[st_id + j].col2; task_fourthBbox[i][j].row1 = copy_fourthBbox[st_id + j].row1; task_fourthBbox[i][j].row2 = copy_fourthBbox[st_id + j].row2; task_fourthBbox[i][j].area = copy_fourthBbox[st_id + j].area; task_fourthBbox[i][j].score = copy_fourthBbox[st_id + j].score; task_fourthBbox[i][j].exist = copy_fourthBbox[st_id + j].exist; } } } resultBbox.resize(l_count); for (int i = 0; i < l_count; i++) { resultBbox[i].col1 = fourthBbox[i].col1; resultBbox[i].col2 = fourthBbox[i].col2; resultBbox[i].row1 = fourthBbox[i].row1; resultBbox[i].row2 = fourthBbox[i].row2; resultBbox[i].score = fourthBbox[i].score; resultBbox[i].exist = fourthBbox[i].exist; resultBbox[i].area = fourthBbox[i].area; } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[0].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[0].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keypoint_num = keyPoint->GetC() / 2; int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < keypoint_num; num++) { if ((num >= 33 && num < 43) || (num >= 64 && num < 72) || (num >= 84 && num < 104)) { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]/**0.25*/; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]/**0.25*/; } else { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]; } } } } } else { #pragma omp parallel for num_threads(thread_num) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[thread_id].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[thread_id].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keypoint_num = keyPoint->GetC() / 2; int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < keypoint_num; num++) { if ((num >= 33 && num < 43) || (num >= 64 && num < 72) || (num >= 84 && num < 104)) { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]*0.5; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]*0.5; } else { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]; } } } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_fourthBbox[i].size(); } resultBbox.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_fourthBbox[i].size(); j++) { memcpy(resultBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 212); id++; } } double t5 = omp_get_wtime(); if (show_debug_info) printf("run Lnet [%d] times \n", l_count); if (show_debug_info) printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } void _select(std::vector<ZQ_CNN_BBox>& bbox, int limit_num, int width, int height) { int in_num = bbox.size(); if (limit_num >= in_num) return; bbox.resize(limit_num); } }; } #endif
syncbench.c
/**************************************************************************** * * * OpenMP MicroBenchmark Suite - Version 3.0 * * * * produced by * * * * Mark Bull, Fiona Reid and Nix Mc Donnell * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: [email protected] or [email protected] * * * * * * This version copyright (c) The University of Edinburgh, 2011. * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include "common.h" #include "syncbench.h" omp_lock_t lock; int main(int argc, char **argv) { // Start Paraver tracing #ifdef PARAVERTRACE Extrae_init(); #endif init(argc, argv); omp_init_lock(&lock); /* GENERATE REFERENCE TIME */ reference("reference time 1", &refer); /* TEST PARALLEL REGION */ benchmark("PARALLEL", &testpr); /* TEST FOR */ benchmark("FOR", &testfor); /* TEST PARALLEL FOR */ benchmark("PARALLEL FOR", &testpfor); /* TEST BARRIER */ benchmark("BARRIER", &testbar); /* TEST SINGLE */ benchmark("SINGLE", &testsing); /* TEST CRITICAL*/ benchmark("CRITICAL", &testcrit); /* TEST LOCK/UNLOCK */ benchmark("LOCK/UNLOCK", &testlock); /* TEST ORDERED SECTION */ benchmark("ORDERED", &testorder); /* GENERATE NEW REFERENCE TIME */ reference("reference time 2", &referatom); /* TEST ATOMIC */ benchmark("ATOMIC", &testatom); /* GENERATE NEW REFERENCE TIME */ reference("reference time 3", &referred); /* TEST REDUCTION (1 var) */ benchmark("REDUCTION", &testred); #ifdef PARAVERTRACE Extrae_fini(); #endif finalise(); return EXIT_SUCCESS; } void refer() { int j; for (j = 0; j < innerreps; j++) { delay(delaylength); } } void referatom(){ int j; double aaaa = 0.0; double epsilon = 1.0e-15; double b, c; b = 1.0; c = (1.0 + epsilon); for (j = 0; j < innerreps; j++) { aaaa += b; b *= c; } if (aaaa < 0.0) printf("%f\n", aaaa); } void referred() { int j; int aaaa = 0; for (j = 0; j < innerreps; j++) { delay(delaylength); aaaa += 1; } } void testpr() { int j; for (j = 0; j < innerreps; j++) { #pragma omp parallel { delay(delaylength); } } } void testfor() { int i, j; #pragma omp parallel private(j) { for (j = 0; j < innerreps; j++) { #pragma omp for for (i = 0; i < nthreads; i++) { delay(delaylength); } } } } void testpfor() { int i, j; for (j = 0; j < innerreps; j++) { #pragma omp parallel for for (i = 0; i < nthreads; i++) { delay(delaylength); } } } void testbar() { int j; #pragma omp parallel private(j) { for (j = 0; j < innerreps; j++) { delay(delaylength); #pragma omp barrier } } } void testsing() { int j; #pragma omp parallel private(j) { for (j = 0; j < innerreps; j++) { #pragma omp single delay(delaylength); } } } void testcrit() { int j; #pragma omp parallel private(j) { for (j = 0; j < innerreps / nthreads; j++) { #pragma omp critical { delay(delaylength); } } } } void testlock() { int j; #pragma omp parallel private(j) { for (j = 0; j < innerreps / nthreads; j++) { omp_set_lock(&lock); delay(delaylength); omp_unset_lock(&lock); } } } void testorder() { int j; #pragma omp parallel for ordered schedule (static,1) for (j = 0; j < (int)innerreps; j++) { #pragma omp ordered delay(delaylength); } } void testatom() { int j; double aaaa = 0.0; double epsilon = 1.0e-15; double b,c; b = 1.0; c = (1.0 + epsilon); #pragma omp parallel private(j) firstprivate(b) { for (j = 0; j < innerreps / nthreads; j++) { #pragma omp atomic aaaa += b; b *= c; } } if (aaaa < 0.0) printf("%f\n", aaaa); } void testred() { int j; int aaaa = 0; for (j = 0; j < innerreps; j++) { #pragma omp parallel reduction(+:aaaa) { delay(delaylength); aaaa += 1; } } }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include "common.h" #include "syncbench.h" omp_lock_t lock; int main(int argc, char **argv) { //Start Paraver tracing #ifdef PARAVERTRACE Extrae_init(); #endif init(argc, argv); omp_init_lock(&lock); /* GENERATE REFERENCE TIME */ reference("reference time 1", &refer); /* TEST PARALLEL REGION */ benchmark("PARALLEL", &testpr); /* TEST FOR */ benchmark("FOR", &testfor); /* TEST PARALLEL FOR */ benchmark("PARALLEL FOR", &testpfor); /* TEST BARRIER */ benchmark("BARRIER", &testbar); /* TEST SINGLE */ benchmark("SINGLE", &testsing); /* TEST CRITICAL */ benchmark("CRITICAL", &testcrit); /* TEST LOCK/UNLOCK */ benchmark("LOCK/UNLOCK", &testlock); /* TEST ORDERED SECTION */ benchmark("ORDERED", &testorder); /* GENERATE NEW REFERENCE TIME */ reference("reference time 2", &referatom); /* TEST ATOMIC */ benchmark("ATOMIC", &testatom); /* GENERATE NEW REFERENCE TIME */ reference("reference time 3", &referred); /* TEST REDUCTION (1 var) */ benchmark("REDUCTION", &testred); #ifdef PARAVERTRACE Extrae_fini(); #endif finalise(); return EXIT_SUCCESS; } void refer() { int j; for (j = 0; j < innerreps; j++) { delay(delaylength); } } void referatom() { int j; double aaaa = 0.0; double epsilon = 1.0e-15; double b, c; b = 1.0; c = (1.0 + epsilon); for (j = 0; j < innerreps; j++) { aaaa += b; b *= c; } if (aaaa < 0.0) printf("%f\n", aaaa); } void referred() { int j; int aaaa = 0; for (j = 0; j < innerreps; j++) { delay(delaylength); aaaa += 1; } } void testpr() { int j; for (j = 0; j < innerreps; j++) { delay(delaylength); } } void testfor() { int i, j; for (j = 0; j < innerreps; j++) { for (i = 0; i < nthreads; i++) { delay(delaylength); } } } void testpfor() { int i, j; for (j = 0; j < innerreps; j++) { for (i = 0; i < nthreads; i++) { delay(delaylength); } } } void testbar() { int j; for (j = 0; j < innerreps; j++) { delay(delaylength); } } void testsing() { int j; for (j = 0; j < innerreps; j++) { delay(delaylength); } } void testcrit() { int j; for (j = 0; j < innerreps / nthreads; j++) { #pragma omp critical { delay(delaylength); } } } void testlock() { int j; for (j = 0; j < innerreps / nthreads; j++) { omp_set_lock(&lock); delay(delaylength); omp_unset_lock(&lock); } } void testorder() { int j; for (j = 0; j < (int)innerreps; j++) { delay(delaylength); } } void testatom() { int j; double aaaa = 0.0; double epsilon = 1.0e-15; double b, c; b = 1.0; c = (1.0 + epsilon); for (j = 0; j < innerreps / nthreads; j++) { aaaa += b; b *= c; } if (aaaa < 0.0) printf("%f\n", aaaa); } void testred() { int j; int aaaa = 0; for (j = 0; j < innerreps; j++) { delay(delaylength); aaaa += 1; } }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include "common.h" #include "syncbench.h" omp_lock_t lock; int main(int argc, char **argv) { //Start Paraver tracing #ifdef PARAVERTRACE Extrae_init(); #endif init(argc, argv); omp_init_lock(&lock); /* GENERATE REFERENCE TIME */ reference("reference time 1", &refer); /* TEST PARALLEL REGION */ benchmark("PARALLEL", &testpr); /* TEST FOR */ benchmark("FOR", &testfor); /* TEST PARALLEL FOR */ benchmark("PARALLEL FOR", &testpfor); /* TEST BARRIER */ benchmark("BARRIER", &testbar); /* TEST SINGLE */ benchmark("SINGLE", &testsing); /* TEST CRITICAL */ benchmark("CRITICAL", &testcrit); /* TEST LOCK/UNLOCK */ benchmark("LOCK/UNLOCK", &testlock); /* TEST ORDERED SECTION */ benchmark("ORDERED", &testorder); /* GENERATE NEW REFERENCE TIME */ reference("reference time 2", &referatom); /* TEST ATOMIC */ benchmark("ATOMIC", &testatom); /* GENERATE NEW REFERENCE TIME */ reference("reference time 3", &referred); /* TEST REDUCTION (1 var) */ benchmark("REDUCTION", &testred); #ifdef PARAVERTRACE Extrae_fini(); #endif finalise(); return EXIT_SUCCESS; } void refer() { int j; for (j = 0; j < innerreps; j++) { delay(delaylength); } } void referatom() { int j; double aaaa = 0.0; double epsilon = 1.0e-15; double b, c; b = 1.0; c = (1.0 + epsilon); for (j = 0; j < innerreps; j++) { aaaa += b; b *= c; } if (aaaa < 0.0) printf("%f\n", aaaa); } void referred() { int j; int aaaa = 0; for (j = 0; j < innerreps; j++) { delay(delaylength); aaaa += 1; } } void testpr() { int j; for (j = 0; j < innerreps; j++) { #pragma omp parallel { delay(delaylength); } } } void testfor() { int i, j; #pragma omp parallel private(j) { for (j = 0; j < innerreps; j++) { #pragma omp for for (i = 0; i < nthreads; i++) { delay(delaylength); } } } } void testpfor() { int i, j; for (j = 0; j < innerreps; j++) { #pragma omp parallel for for (i = 0; i < nthreads; i++) { delay(delaylength); } } } void testbar() { int j; #pragma omp parallel private(j) { for (j = 0; j < innerreps; j++) { delay(delaylength); #pragma omp barrier } } } void testsing() { int j; #pragma omp parallel private(j) { for (j = 0; j < innerreps; j++) { #pragma omp single delay(delaylength); } } } void testcrit() { int j; #pragma omp parallel private(j) { for (j = 0; j < innerreps / nthreads; j++) { #pragma omp critical { delay(delaylength); } } } } void testlock() { int j; #pragma omp parallel private(j) { for (j = 0; j < innerreps / nthreads; j++) { omp_set_lock(&lock); delay(delaylength); omp_unset_lock(&lock); } } } void testorder() { int j; #pragma omp parallel for ordered schedule (static,1) for (j = 0; j < (int)innerreps; j++) { #pragma omp ordered delay(delaylength); } } void testatom() { int j; double aaaa = 0.0; double epsilon = 1.0e-15; double b, c; b = 1.0; c = (1.0 + epsilon); #pragma omp parallel private(j) firstprivate(b) { for (j = 0; j < innerreps / nthreads; j++) { #pragma omp atomic aaaa += b; b *= c; } } if (aaaa < 0.0) printf("%f\n", aaaa); } void testred() { int j; int aaaa = 0; for (j = 0; j < innerreps; j++) { #pragma omp parallel reduction(+:aaaa) { delay(delaylength); aaaa += 1; } } }
origin.h
#pragma once #include <assert.h> #include <iostream> //#include <malloc.h> #include <memory> #include "omp.h" #include <stdlib.h> #include <time.h> #include <vector> using namespace std; void bcxy_kcrs_conv(float *In, float *Ker, float *Out, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh) { int StrideX=uSx; int StrideY=uSy; #pragma omp parallel for collapse(5) for (int b = 0; b < Nb; b++) { for (int t = 0; t < Nt; t++) { for (int x = 0; x < Nx; x++) { for (int y = 0; y < Ny; y++) { for (int s = 0; s < Ns; s++) { for (int w = 0; w < Nw; w++) { for (int h = 0; h < Nh; h++) { Out[b * Nt * Nx * Ny + t * Nx * Ny + x * Ny + y] += In[b * Ns * (StrideX*Nx + Nw - 1) * (StrideY*Ny + Nh - 1) + s * (StrideX* Nx + Nw - 1) * (StrideY*Ny + Nh - 1) + (StrideX*x + w) * (StrideY*Ny + Nh - 1) + (StrideY*y + h)] * Ker[t * Ns * Nw * Nh + s * Nw * Nh + w * Nh + h]; } } } } } } } } void origin_conv(float *In, float *Ker, float *Out, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh) { int StrideX=uSx; int StrideY=uSy; #pragma omp parallel for collapse(5) for (int b = 0; b < Nb; b++) { for (int t = 0; t < Nt; t++) { for (int x = 0; x < Nx; x++) { for (int y = 0; y < Ny; y++) { for (int w = 0; w < Nw; w++) { for (int h = 0; h < Nh; h++) { for (int s = 0; s < Ns; s++) { /* Out[b * Nt * Nx * Ny + t * Nx * Ny + x * Ny + y] += */ /* In[b * Ns * (Nx + Nw - 1) * (Ny + Nh - 1) + */ /* s * (Nx + Nw - 1) * (Ny + Nh - 1) + */ /* (x + w) * (Ny + Nh - 1) + (y + h)] * */ /* Ker[t * Ns * Nw * Nh + s * Nw * Nh + w * Nh + h]; */ int kt1 = t /LKF; int kt2 = t %LKF; int ot1 = t /LOF; int ot2 = t %LOF; int s1 = s / LC; int s2 = s%LC; int Ooffset = b* Nt * Nx * Ny + ot1 * Nx * Ny*LOF + x*Ny*LOF + y*LOF + ot2; int Ioffset = b * Ns * (StrideX*Nx + Nw - 1) * (StrideY*Ny + Nh - 1) + s1 * (StrideX*Nx + Nw - 1) * (StrideY*Ny + Nh - 1) * LC + (StrideX*x + w) * (StrideY*Ny + Nh - 1)*LC + (StrideY*y + h) * LC + s2; int Koffset = kt1 * Ns * Nw * Nh * LKF + s * Nw * Nh*LKF + w * Nh*LKF + h*LKF + kt2; Out[Ooffset] += In[Ioffset]* Ker[Koffset]; // if(Ooffset == 896){ // cout<<"Inoff="<<Ioffset<<", Koff="<<Koffset<<endl; // } } } } } } } } } int compare(float *C1, float *C2, int size) { cout << "comparing" << endl; cout<<C1[0]<<","<<C2[0]<<endl; for (int i = 0; i < size; i++) { if(C2[i]==0){ cout<<"C2 is 0"<<endl; return -1; } if (C1[i] != C2[i]) { cout << "data at " << i << " C1=" << C1[i] << ", C2=" << C2[i] << endl; return -1; } } cout << "fin compare\n"; return 0; }
#pragma once #include <assert.h> #include <iostream> //#include <malloc.h> #include <memory> #include "omp.h" #include <stdlib.h> #include <time.h> #include <vector> using namespace std; void bcxy_kcrs_conv(float *In, float *Ker, float *Out, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh) { int StrideX=uSx; int StrideY=uSy; for (int b = 0; b < Nb; b++) { for (int t = 0; t < Nt; t++) { for (int x = 0; x < Nx; x++) { for (int y = 0; y < Ny; y++) { for (int s = 0; s < Ns; s++) { for (int w = 0; w < Nw; w++) { for (int h = 0; h < Nh; h++) { Out[b * Nt * Nx * Ny + t * Nx * Ny + x * Ny + y] += In[b * Ns * (StrideX*Nx + Nw - 1) * (StrideY*Ny + Nh - 1) + s * (StrideX* Nx + Nw - 1) * (StrideY*Ny + Nh - 1) + (StrideX*x + w) * (StrideY*Ny + Nh - 1) + (StrideY*y + h)] * Ker[t * Ns * Nw * Nh + s * Nw * Nh + w * Nh + h]; } } } } } } } } void origin_conv(float *In, float *Ker, float *Out, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh) { int StrideX=uSx; int StrideY=uSy; for (int b = 0; b < Nb; b++) { for (int t = 0; t < Nt; t++) { for (int x = 0; x < Nx; x++) { for (int y = 0; y < Ny; y++) { for (int w = 0; w < Nw; w++) { for (int h = 0; h < Nh; h++) { for (int s = 0; s < Ns; s++) { /* Out[b * Nt * Nx * Ny + t * Nx * Ny + x * Ny + y] += */ /* In[b * Ns * (Nx + Nw - 1) * (Ny + Nh - 1) + */ /* s * (Nx + Nw - 1) * (Ny + Nh - 1) + */ /* (x + w) * (Ny + Nh - 1) + (y + h)] * */ /* Ker[t * Ns * Nw * Nh + s * Nw * Nh + w * Nh + h]; */ int kt1 = t /LKF; int kt2 = t %LKF; int ot1 = t /LOF; int ot2 = t %LOF; int s1 = s / LC; int s2 = s%LC; int Ooffset = b* Nt * Nx * Ny + ot1 * Nx * Ny*LOF + x*Ny*LOF + y*LOF + ot2; int Ioffset = b * Ns * (StrideX*Nx + Nw - 1) * (StrideY*Ny + Nh - 1) + s1 * (StrideX*Nx + Nw - 1) * (StrideY*Ny + Nh - 1) * LC + (StrideX*x + w) * (StrideY*Ny + Nh - 1)*LC + (StrideY*y + h) * LC + s2; int Koffset = kt1 * Ns * Nw * Nh * LKF + s * Nw * Nh*LKF + w * Nh*LKF + h*LKF + kt2; Out[Ooffset] += In[Ioffset]* Ker[Koffset]; // if(Ooffset == 896){ // cout<<"Inoff="<<Ioffset<<", Koff="<<Koffset<<endl; // } } } } } } } } } int compare(float *C1, float *C2, int size) { cout << "comparing" << endl; cout<<C1[0]<<","<<C2[0]<<endl; for (int i = 0; i < size; i++) { if(C2[i]==0){ cout<<"C2 is 0"<<endl; return -1; } if (C1[i] != C2[i]) { cout << "data at " << i << " C1=" << C1[i] << ", C2=" << C2[i] << endl; return -1; } } cout << "fin compare\n"; return 0; }
#pragma once #include <assert.h> #include <iostream> //#include <malloc.h> #include <memory> #include "omp.h" #include <stdlib.h> #include <time.h> #include <vector> using namespace std; void bcxy_kcrs_conv(float *In, float *Ker, float *Out, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh) { int StrideX=uSx; int StrideY=uSy; #pragma omp parallel for collapse(5) for (int b = 0; b < Nb; b++) { for (int t = 0; t < Nt; t++) { for (int x = 0; x < Nx; x++) { for (int y = 0; y < Ny; y++) { for (int s = 0; s < Ns; s++) { for (int w = 0; w < Nw; w++) { for (int h = 0; h < Nh; h++) { Out[b * Nt * Nx * Ny + t * Nx * Ny + x * Ny + y] += In[b * Ns * (StrideX*Nx + Nw - 1) * (StrideY*Ny + Nh - 1) + s * (StrideX* Nx + Nw - 1) * (StrideY*Ny + Nh - 1) + (StrideX*x + w) * (StrideY*Ny + Nh - 1) + (StrideY*y + h)] * Ker[t * Ns * Nw * Nh + s * Nw * Nh + w * Nh + h]; } } } } } } } } void origin_conv(float *In, float *Ker, float *Out, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh) { int StrideX=uSx; int StrideY=uSy; #pragma omp parallel for collapse(5) for (int b = 0; b < Nb; b++) { for (int t = 0; t < Nt; t++) { for (int x = 0; x < Nx; x++) { for (int y = 0; y < Ny; y++) { for (int w = 0; w < Nw; w++) { for (int h = 0; h < Nh; h++) { for (int s = 0; s < Ns; s++) { /* Out[b * Nt * Nx * Ny + t * Nx * Ny + x * Ny + y] += */ /* In[b * Ns * (Nx + Nw - 1) * (Ny + Nh - 1) + */ /* s * (Nx + Nw - 1) * (Ny + Nh - 1) + */ /* (x + w) * (Ny + Nh - 1) + (y + h)] * */ /* Ker[t * Ns * Nw * Nh + s * Nw * Nh + w * Nh + h]; */ int kt1 = t /LKF; int kt2 = t %LKF; int ot1 = t /LOF; int ot2 = t %LOF; int s1 = s / LC; int s2 = s%LC; int Ooffset = b* Nt * Nx * Ny + ot1 * Nx * Ny*LOF + x*Ny*LOF + y*LOF + ot2; int Ioffset = b * Ns * (StrideX*Nx + Nw - 1) * (StrideY*Ny + Nh - 1) + s1 * (StrideX*Nx + Nw - 1) * (StrideY*Ny + Nh - 1) * LC + (StrideX*x + w) * (StrideY*Ny + Nh - 1)*LC + (StrideY*y + h) * LC + s2; int Koffset = kt1 * Ns * Nw * Nh * LKF + s * Nw * Nh*LKF + w * Nh*LKF + h*LKF + kt2; Out[Ooffset] += In[Ioffset]* Ker[Koffset]; // if(Ooffset == 896){ // cout<<"Inoff="<<Ioffset<<", Koff="<<Koffset<<endl; // } } } } } } } } } int compare(float *C1, float *C2, int size) { cout << "comparing" << endl; cout<<C1[0]<<","<<C2[0]<<endl; for (int i = 0; i < size; i++) { if(C2[i]==0){ cout<<"C2 is 0"<<endl; return -1; } if (C1[i] != C2[i]) { cout << "data at " << i << " C1=" << C1[i] << ", C2=" << C2[i] << endl; return -1; } } cout << "fin compare\n"; return 0; }
wand-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % W W AAA N N DDDD % % W W A A NN N D D % % W W W AAAAA N N N D D % % WW WW A A N NN D D % % W W A A N N DDDD % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickWand Wand View Methods % % % % Software Design % % Cristy % % March 2003 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Define declarations. */ #define WandViewId "WandView" /* Typedef declarations. */ struct _WandView { size_t id; char name[MaxTextExtent], *description; RectangleInfo extent; MagickWand *wand; CacheView *view; size_t number_threads; PixelWand ***pixel_wands; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneWandView() makes a copy of the specified wand view. % % The format of the CloneWandView method is: % % WandView *CloneWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport WandView *CloneWandView(const WandView *wand_view) { WandView *clone_view; register ssize_t i; assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); (void) memset(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g", WandViewId,(double) clone_view->id); clone_view->description=ConstantString(wand_view->description); clone_view->view=CloneCacheView(wand_view->view); clone_view->extent=wand_view->extent; clone_view->number_threads=wand_view->number_threads; clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,wand_view->exception); for (i=0; i < (ssize_t) wand_view->number_threads; i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) wand_view->pixel_wands[i],wand_view->extent.width); clone_view->debug=wand_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=WandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyWandView() deallocates memory associated with a wand view. % % The format of the DestroyWandView method is: % % WandView *DestroyWandView(WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands,const size_t number_threads) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport WandView *DestroyWandView(WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands, wand_view->extent.width,wand_view->number_threads); wand_view->view=DestroyCacheView(wand_view->view); wand_view->exception=DestroyExceptionInfo(wand_view->exception); wand_view->signature=(~WandSignature); RelinquishWandId(wand_view->id); wand_view=(WandView *) RelinquishMagickMemory(wand_view); return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferWandViewIterator() iterates over three wand views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination wand view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const WandView *source, % const WandView *duplex,WandView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferWandViewIterator method is: % % MagickBooleanType DuplexTransferWandViewIterator(WandView *source, % WandView *duplex,WandView *destination, % DuplexTransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o duplex: the duplex wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source, WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer, void *context) { ExceptionInfo *exception; Image *destination_image, *duplex_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; duplex_image=duplex->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *magick_restrict duplex_indexes, *magick_restrict indexes; register const PixelPacket *magick_restrict duplex_pixels, *magick_restrict pixels; register IndexPacket *magick_restrict destination_indexes; register ssize_t x; register PixelPacket *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view); for (x=0; x < (ssize_t) duplex->extent.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x); if (duplex_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) duplex->extent.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x], GetPixelBlack(duplex_indexes+x)); if (duplex_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) duplex->extent.width; x++) PixelSetIndex(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelBlack(destination_indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewException() returns the severity, reason, and description of any % error that occurs when utilizing a wand view. % % The format of the GetWandViewException method is: % % char *GetWandViewException(const WandView *wand_view, % ExceptionType *severity) % % A description of each parameter follows: % % o wand_view: the pixel wand_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetWandViewException(const WandView *wand_view, ExceptionType *severity) { char *description; assert(wand_view != (const WandView *) NULL); assert(wand_view->signature == WandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); assert(severity != (ExceptionType *) NULL); *severity=wand_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); *description='\0'; if (wand_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->reason), MaxTextExtent); if (wand_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewExtent() returns the wand view extent. % % The format of the GetWandViewExtent method is: % % RectangleInfo GetWandViewExtent(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return(wand_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewIterator() iterates over the wand view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const WandView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetWandViewIterator method is: % % MagickBooleanType GetWandViewIterator(WandView *source, % GetWandViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetWandViewIterator(WandView *source, GetWandViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (get == (GetWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *indexes; register const PixelPacket *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewPixels() returns the wand view pixel_wands. % % The format of the GetWandViewPixels method is: % % PixelWand *GetWandViewPixels(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport PixelWand **GetWandViewPixels(const WandView *wand_view) { const int id = GetOpenMPThreadId(); assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return(wand_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewWand() returns the magick wand associated with the wand view. % % The format of the GetWandViewWand method is: % % MagickWand *GetWandViewWand(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickWand *GetWandViewWand(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return(wand_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsWandView() returns MagickTrue if the the parameter is verified as a wand % view object. % % The format of the IsWandView method is: % % MagickBooleanType IsWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickBooleanType IsWandView(const WandView *wand_view) { size_t length; if (wand_view == (const WandView *) NULL) return(MagickFalse); if (wand_view->signature != WandSignature) return(MagickFalse); length=strlen(WandViewId); if (LocaleNCompare(wand_view->name,WandViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandView() returns a wand view required for all other methods in the % Wand View API. % % The format of the NewWandView method is: % % WandView *NewWandView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands, const size_t number_threads) { PixelWand ***pixel_wands; register ssize_t i; pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) memset(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads)); } return(pixel_wands); } WandExport WandView *NewWandView(MagickWand *wand) { WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) memset(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); wand_view->wand=wand; wand_view->exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images, wand_view->exception); wand_view->extent.width=wand->images->columns; wand_view->extent.height=wand->images->rows; wand_view->number_threads=GetOpenMPMaximumThreads(); wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width, wand_view->number_threads); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=WandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandViewExtent() returns a wand view required for all other methods % in the Wand View API. % % The format of the NewWandViewExtent method is: % % WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % */ WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) memset(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); wand_view->exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images, wand_view->exception); wand_view->wand=wand; wand_view->extent.width=width; wand_view->extent.height=height; wand_view->extent.x=x; wand_view->extent.y=y; wand_view->number_threads=GetOpenMPMaximumThreads(); wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width, wand_view->number_threads); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=WandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewDescription() associates a description with an image view. % % The format of the SetWandViewDescription method is: % % void SetWandViewDescription(WandView *image_view,const char *description) % % A description of each parameter follows: % % o wand_view: the wand view. % % o description: the wand view description. % */ MagickExport void SetWandViewDescription(WandView *wand_view, const char *description) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); wand_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewIterator() iterates over the wand view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetWandViewIterator method is: % % MagickBooleanType SetWandViewIterator(WandView *destination, % SetWandViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the wand view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetWandViewIterator(WandView *destination, SetWandViewMethod set,void *context) { ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (WandView *) NULL); assert(destination->signature == WandSignature); if (set == (SetWandViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (destination->extent.height-destination->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(destination_image,destination_image,height,1) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(destination_image,destination->description, progress++,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w T h r e a d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewThreads() sets the number of threads in a thread team. % % The format of the SetWandViewDescription method is: % % void SetWandViewThreads(WandView *image_view, % const size_t number_threads) % % A description of each parameter follows: % % o image_view: the image view. % % o number_threads: the number of threads in a thread team. % */ MagickExport void SetWandViewThreads(WandView *image_view, const size_t number_threads) { assert(image_view != (WandView *) NULL); assert(image_view->signature == MagickCoreSignature); image_view->number_threads=number_threads; if (number_threads > (size_t) GetMagickResourceLimit(ThreadResource)) image_view->number_threads=GetOpenMPMaximumThreads(); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferWandViewIterator() iterates over two wand views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination wand view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const WandView *source, % WandView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferWandViewIterator method is: % % MagickBooleanType TransferWandViewIterator(WandView *source, % WandView *destination,TransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferWandViewIterator(WandView *source, WandView *destination,TransferWandViewMethod transfer,void *context) { ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict pixels; register IndexPacket *magick_restrict destination_indexes; register ssize_t x; register PixelPacket *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateWandViewIterator() iterates over the wand view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateWandViewIterator method is: % % MagickBooleanType UpdateWandViewIterator(WandView *source, % UpdateWandViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdateWandViewIterator(WandView *source, UpdateWandViewMethod update,void *context) { ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (update == (UpdateWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException( source->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (update(source,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->extent.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) SetPixelBlack(indexes+x,PixelGetBlackQuantum( source->pixel_wands[id][x])); if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
/* * Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* * Define declarations. */ #define WandViewId "WandView" /* * Typedef declarations. */ struct _WandView { size_t id; char name[MaxTextExtent], *description; RectangleInfo extent; MagickWand * wand; CacheView * view; size_t number_threads; PixelWand *** pixel_wands; ExceptionInfo * exception; MagickBooleanType debug; size_t signature; }; /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C l o n e W a n d V i e w * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CloneWandView() makes a copy of the specified wand view. % % * The format of the CloneWandView method is: % % WandView * *CloneWandView(const WandView *wand_view) % % A description of each * parameter follows: % % o wand_view: the wand view. % */ WandExport WandView * CloneWandView(const WandView * wand_view) { WandView * clone_view; register ssize_t i; assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); if (wand_view->debug != MagickFalse) (void)LogMagickEvent(WandEvent, GetMagickModule(), "%s", wand_view->name); clone_view = (WandView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed", wand_view->name); (void)memset(clone_view, 0, sizeof(*clone_view)); clone_view->id = AcquireWandId(); (void)FormatLocaleString(clone_view->name, MaxTextExtent, "%s-%.20g", WandViewId, (double)clone_view->id); clone_view->description = ConstantString(wand_view->description); clone_view->view = CloneCacheView(wand_view->view); clone_view->extent = wand_view->extent; clone_view->number_threads = wand_view->number_threads; clone_view->exception = AcquireExceptionInfo(); InheritException(clone_view->exception, wand_view->exception); for (i = 0; i < (ssize_t) wand_view->number_threads; i++) clone_view->pixel_wands[i] = ClonePixelWands((const PixelWand **) wand_view->pixel_wands[i], wand_view->extent.width); clone_view->debug = wand_view->debug; if (clone_view->debug != MagickFalse) (void)LogMagickEvent(WandEvent, GetMagickModule(), "%s", clone_view->name); clone_view->signature = WandSignature; return (clone_view); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D e s t r o y W a n d V i e w * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyWandView() deallocates memory associated with a wand * view. % % The format of the DestroyWandView method is: % % WandView * *DestroyWandView(WandView *wand_view) % % A description of each parameter * follows: % % o wand_view: the wand view. % */ static PixelWand *** DestroyPixelsThreadSet(PixelWand *** pixel_wands, const size_t number_wands, const size_t number_threads) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i = 0; i < (ssize_t) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i] = DestroyPixelWands(pixel_wands[i], number_wands); pixel_wands = (PixelWand ***) RelinquishMagickMemory(pixel_wands); return (pixel_wands); } WandExport WandView * DestroyWandView(WandView * wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); wand_view->pixel_wands = DestroyPixelsThreadSet(wand_view->pixel_wands, wand_view->extent.width, wand_view->number_threads); wand_view->view = DestroyCacheView(wand_view->view); wand_view->exception = DestroyExceptionInfo(wand_view->exception); wand_view->signature = (~WandSignature); RelinquishWandId(wand_view->id); wand_view = (WandView *) RelinquishMagickMemory(wand_view); return (wand_view); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DuplexTransferWandViewIterator() iterates over three wand views * in % parallel and calls your transfer method for each scanline of the * view. The % source and duplex pixel extent is not confined to the image * canvas-- that is % you can include negative offsets or widths or heights * that exceed the image % dimension. However, the destination wand view is * confined to the image % canvas-- that is no negative offsets or widths or * heights that exceed the % image dimension are permitted. % % The * callback signature is: % % MagickBooleanType * DuplexTransferImageViewMethod(const WandView *source, % const * WandView *duplex,WandView *destination,const ssize_t y, % const int * thread_id,void *context) % % Use this pragma if the view is not single * threaded: % % % % to define a section of code in your callback transfer * method that must be % executed by a single thread at a time. % % The * format of the DuplexTransferWandViewIterator method is: % % * MagickBooleanType DuplexTransferWandViewIterator(WandView *source, % * WandView *duplex,WandView *destination, % * DuplexTransferWandViewMethod transfer,void *context) % % A description of * each parameter follows: % % o source: the source wand view. % % o * duplex: the duplex wand view. % % o destination: the destination wand * view. % % o transfer: the transfer callback method. % % o context: * the user defined context. % */ WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView * source, WandView * duplex, WandView * destination, DuplexTransferWandViewMethod transfer, void *context) { ExceptionInfo * exception; Image * destination_image, *duplex_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferWandViewMethod) NULL) return (MagickFalse); source_image = source->wand->images; duplex_image = duplex->wand->images; destination_image = destination->wand->images; if (SetImageStorageClass(destination_image, DirectClass) == MagickFalse) return (MagickFalse); status = MagickTrue; progress = 0; exception = destination->exception; for (y = source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket * magick_restrict duplex_indexes, *magick_restrict indexes; register const PixelPacket * magick_restrict duplex_pixels, *magick_restrict pixels; register IndexPacket * magick_restrict destination_indexes; register ssize_t x; register PixelPacket * magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels = GetCacheViewVirtualPixels(source->view, source->extent.x, y, source->extent.width, 1, source->exception); if (pixels == (const PixelPacket *)NULL) { status = MagickFalse; continue; } indexes = GetCacheViewVirtualIndexQueue(source->view); for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x], pixels + x); if (source_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes + x)); if (source_image->storage_class == PseudoClass) for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes + x)); duplex_pixels = GetCacheViewVirtualPixels(duplex->view, duplex->extent.x, y, duplex->extent.width, 1, duplex->exception); if (duplex_pixels == (const PixelPacket *)NULL) { status = MagickFalse; continue; } duplex_indexes = GetCacheViewVirtualIndexQueue(duplex->view); for (x = 0; x < (ssize_t) duplex->extent.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x], duplex_pixels + x); if (duplex_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) duplex->extent.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x], GetPixelBlack(duplex_indexes + x)); if (duplex_image->storage_class == PseudoClass) for (x = 0; x < (ssize_t) duplex->extent.width; x++) PixelSetIndex(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes + x)); destination_pixels = GetCacheViewAuthenticPixels(destination->view, destination->extent.x, y, destination->extent.width, 1, exception); if (destination_pixels == (PixelPacket *) NULL) { status = MagickFalse; continue; } destination_indexes = GetCacheViewAuthenticIndexQueue(destination->view); for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels + x); if (destination_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelBlack(destination_indexes + x)); if (destination_image->storage_class == PseudoClass) for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes + x)); if (transfer(source, duplex, destination, y, id, context) == MagickFalse) status = MagickFalse; for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels + x); if (destination_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(destination_indexes + x, PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync = SyncCacheViewAuthenticPixels(destination->view, exception); if (sync == MagickFalse) { InheritException(destination->exception, GetCacheViewException( source->view)); status = MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(source_image, source->description, progress++, source->extent.height); if (proceed == MagickFalse) status = MagickFalse; } } return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t W a n d V i e w E x c e p t i o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetWandViewException() returns the severity, reason, and * description of any % error that occurs when utilizing a wand view. % % * The format of the GetWandViewException method is: % % char * *GetWandViewException(const WandView *wand_view, % ExceptionType * *severity) % % A description of each parameter follows: % % o * wand_view: the pixel wand_view. % % o severity: the severity of the * error is returned here. % */ WandExport char * GetWandViewException(const WandView * wand_view, ExceptionType * severity) { char *description; assert(wand_view != (const WandView *)NULL); assert(wand_view->signature == WandSignature); if (wand_view->debug != MagickFalse) (void)LogMagickEvent(WandEvent, GetMagickModule(), "%s", wand_view->name); assert(severity != (ExceptionType *) NULL); *severity = wand_view->exception->severity; description = (char *)AcquireQuantumMemory(2UL * MaxTextExtent, sizeof(*description)); if (description == (char *)NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed", wand_view->name); *description = '\0'; if (wand_view->exception->reason != (char *)NULL) (void)CopyMagickString(description, GetLocaleExceptionMessage( wand_view->exception->severity, wand_view->exception->reason), MaxTextExtent); if (wand_view->exception->description != (char *)NULL) { (void)ConcatenateMagickString(description, " (", MaxTextExtent); (void)ConcatenateMagickString(description, GetLocaleExceptionMessage( wand_view->exception->severity, wand_view->exception->description), MaxTextExtent); (void)ConcatenateMagickString(description, ")", MaxTextExtent); } return (description); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t W a n d V i e w E x t e n t * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetWandViewExtent() returns the wand view extent. % % The * format of the GetWandViewExtent method is: % % RectangleInfo * GetWandViewExtent(const WandView *wand_view) % % A description of each * parameter follows: % % o wand_view: the wand view. % */ WandExport RectangleInfo GetWandViewExtent(const WandView * wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return (wand_view->extent); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t W a n d V i e w I t e r a t o r * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetWandViewIterator() iterates over the wand view in parallel * and calls % your get method for each scanline of the view. The pixel * extent is % not confined to the image canvas-- that is you can include * negative offsets % or widths or heights that exceed the image dimension. * Any updates to % the pixels in your callback are ignored. % % The * callback signature is: % % MagickBooleanType GetImageViewMethod(const * WandView *source, % const ssize_t y,const int thread_id,void * *context) % % Use this pragma if the view is not single threaded: % % % % * to define a section of code in your callback get method that must be % * executed by a single thread at a time. % % The format of the * GetWandViewIterator method is: % % MagickBooleanType * GetWandViewIterator(WandView *source, % GetWandViewMethod get,void * *context) % % A description of each parameter follows: % % o source: * the source wand view. % % o get: the get callback method. % % o * context: the user defined context. % */ WandExport MagickBooleanType GetWandViewIterator(WandView * source, GetWandViewMethod get, void *context) { Image * source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (get == (GetWandViewMethod) NULL) return (MagickFalse); source_image = source->wand->images; status = MagickTrue; progress = 0; for (y = source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket * indexes; register const PixelPacket * pixels; register ssize_t x; if (status == MagickFalse) continue; pixels = GetCacheViewVirtualPixels(source->view, source->extent.x, y, source->extent.width, 1, source->exception); if (pixels == (const PixelPacket *)NULL) { status = MagickFalse; continue; } indexes = GetCacheViewVirtualIndexQueue(source->view); for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x], pixels + x); if (source_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes + x)); if (source_image->storage_class == PseudoClass) for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes + x)); if (get(source, y, id, context) == MagickFalse) status = MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(source_image, source->description, progress++, source->extent.height); if (proceed == MagickFalse) status = MagickFalse; } } return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t W a n d V i e w P i x e l s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetWandViewPixels() returns the wand view pixel_wands. % % The * format of the GetWandViewPixels method is: % % PixelWand * *GetWandViewPixels(const WandView *wand_view) % % A description of each * parameter follows: % % o wand_view: the wand view. % */ WandExport PixelWand ** GetWandViewPixels(const WandView * wand_view) { const int id = GetOpenMPThreadId(); assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return (wand_view->pixel_wands[id]); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t W a n d V i e w W a n d * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetWandViewWand() returns the magick wand associated with the * wand view. % % The format of the GetWandViewWand method is: % % * MagickWand *GetWandViewWand(const WandView *wand_view) % % A description * of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickWand * GetWandViewWand(const WandView * wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return (wand_view->wand); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I s W a n d V i e w * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IsWandView() returns MagickTrue if the the parameter is verified * as a wand % view object. % % The format of the IsWandView method is: % % * MagickBooleanType IsWandView(const WandView *wand_view) % % A description * of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickBooleanType IsWandView(const WandView * wand_view) { size_t length; if (wand_view == (const WandView *)NULL) return (MagickFalse); if (wand_view->signature != WandSignature) return (MagickFalse); length = strlen(WandViewId); if (LocaleNCompare(wand_view->name, WandViewId, length) != 0) return (MagickFalse); return (MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % N e w W a n d V i e w * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % NewWandView() returns a wand view required for all other methods * in the % Wand View API. % % The format of the NewWandView method is: % % * WandView *NewWandView(MagickWand *wand) % % A description of each * parameter follows: % % o wand: the wand. % */ static PixelWand *** AcquirePixelsThreadSet(const size_t number_wands, const size_t number_threads) { PixelWand *** pixel_wands; register ssize_t i; pixel_wands = (PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return ((PixelWand ***) NULL); (void)memset(pixel_wands, 0, number_threads * sizeof(*pixel_wands)); for (i = 0; i < (ssize_t) number_threads; i++) { pixel_wands[i] = NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return (DestroyPixelsThreadSet(pixel_wands, number_wands, number_threads)); } return (pixel_wands); } WandExport WandView * NewWandView(MagickWand * wand) { WandView * wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); wand_view = (WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed", GetExceptionMessage(errno)); (void)memset(wand_view, 0, sizeof(*wand_view)); wand_view->id = AcquireWandId(); (void)FormatLocaleString(wand_view->name, MaxTextExtent, "%s-%.20g", WandViewId, (double)wand_view->id); wand_view->description = ConstantString("WandView"); wand_view->wand = wand; wand_view->exception = AcquireExceptionInfo(); wand_view->view = AcquireVirtualCacheView(wand_view->wand->images, wand_view->exception); wand_view->extent.width = wand->images->columns; wand_view->extent.height = wand->images->rows; wand_view->number_threads = GetOpenMPMaximumThreads(); wand_view->pixel_wands = AcquirePixelsThreadSet(wand_view->extent.width, wand_view->number_threads); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug = IsEventLogging(); wand_view->signature = WandSignature; return (wand_view); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % N e w W a n d V i e w E x t e n t * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % NewWandViewExtent() returns a wand view required for all other * methods % in the Wand View API. % % The format of the NewWandViewExtent * method is: % % WandView *NewWandViewExtent(MagickWand *wand,const * ssize_t x, % const ssize_t y,const size_t width,const size_t * height) % % A description of each parameter follows: % % o wand: the * magick wand. % % o x,y,columns,rows: These values define the perimeter * of a extent of % pixel_wands view. % */ WandExport WandView * NewWandViewExtent(MagickWand * wand, const ssize_t x, const ssize_t y, const size_t width, const size_t height) { WandView * wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); wand_view = (WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed", GetExceptionMessage(errno)); (void)memset(wand_view, 0, sizeof(*wand_view)); wand_view->id = AcquireWandId(); (void)FormatLocaleString(wand_view->name, MaxTextExtent, "%s-%.20g", WandViewId, (double)wand_view->id); wand_view->description = ConstantString("WandView"); wand_view->exception = AcquireExceptionInfo(); wand_view->view = AcquireVirtualCacheView(wand_view->wand->images, wand_view->exception); wand_view->wand = wand; wand_view->extent.width = width; wand_view->extent.height = height; wand_view->extent.x = x; wand_view->extent.y = y; wand_view->number_threads = GetOpenMPMaximumThreads(); wand_view->pixel_wands = AcquirePixelsThreadSet(wand_view->extent.width, wand_view->number_threads); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug = IsEventLogging(); wand_view->signature = WandSignature; return (wand_view); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t W a n d V i e w D e s c r i p t i o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetWandViewDescription() associates a description with an image * view. % % The format of the SetWandViewDescription method is: % % * void SetWandViewDescription(WandView *image_view,const char *description) * % % A description of each parameter follows: % % o wand_view: the wand * view. % % o description: the wand view description. % */ MagickExport void SetWandViewDescription(WandView * wand_view, const char *description) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); wand_view->description = ConstantString(description); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t W a n d V i e w I t e r a t o r * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetWandViewIterator() iterates over the wand view in parallel * and calls % your set method for each scanline of the view. The pixel * extent is % confined to the image canvas-- that is no negative offsets or * widths or % heights that exceed the image dimension. The pixels are * initiallly % undefined and any settings you make in the callback method * are automagically % synced back to your image. % % The callback * signature is: % % MagickBooleanType SetImageViewMethod(ImageView * *destination, % const ssize_t y,const int thread_id,void *context) * % % Use this pragma if the view is not single threaded: % % % % to * define a section of code in your callback set method that must be % * executed by a single thread at a time. % % The format of the * SetWandViewIterator method is: % % MagickBooleanType * SetWandViewIterator(WandView *destination, % SetWandViewMethod * set,void *context) % % A description of each parameter follows: % % o * destination: the wand view. % % o set: the set callback method. % % * o context: the user defined context. % */ WandExport MagickBooleanType SetWandViewIterator(WandView * destination, SetWandViewMethod set, void *context) { ExceptionInfo * exception; Image * destination_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(destination != (WandView *) NULL); assert(destination->signature == WandSignature); if (set == (SetWandViewMethod) NULL) return (MagickFalse); destination_image = destination->wand->images; if (SetImageStorageClass(destination_image, DirectClass) == MagickFalse) return (MagickFalse); status = MagickTrue; progress = 0; exception = destination->exception; for (y = destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket * magick_restrict indexes; register ssize_t x; register PixelPacket * magick_restrict pixels; if (status == MagickFalse) continue; pixels = GetCacheViewAuthenticPixels(destination->view, destination->extent.x, y, destination->extent.width, 1, exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception, GetCacheViewException( destination->view)); status = MagickFalse; continue; } indexes = GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination, y, id, context) == MagickFalse) status = MagickFalse; for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], pixels + x); if (destination_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(indexes + x, PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync = SyncCacheViewAuthenticPixels(destination->view, exception); if (sync == MagickFalse) { InheritException(destination->exception, GetCacheViewException( destination->view)); status = MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(destination_image, destination->description, progress++, destination->extent.height); if (proceed == MagickFalse) status = MagickFalse; } } return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t W a n d V i e w T h r e a d s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetWandViewThreads() sets the number of threads in a thread * team. % % The format of the SetWandViewDescription method is: % % * void SetWandViewThreads(WandView *image_view, % const size_t * number_threads) % % A description of each parameter follows: % % o * image_view: the image view. % % o number_threads: the number of threads * in a thread team. % */ MagickExport void SetWandViewThreads(WandView * image_view, const size_t number_threads) { assert(image_view != (WandView *) NULL); assert(image_view->signature == MagickCoreSignature); image_view->number_threads = number_threads; if (number_threads > (size_t) GetMagickResourceLimit(ThreadResource)) image_view->number_threads = GetOpenMPMaximumThreads(); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % T r a n s f e r W a n d V i e w I t e r a t o r * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % TransferWandViewIterator() iterates over two wand views in * parallel and % calls your transfer method for each scanline of the view. * The source pixel % extent is not confined to the image canvas-- that is * you can include % negative offsets or widths or heights that exceed the * image dimension. % However, the destination wand view is confined to the * image canvas-- that % is no negative offsets or widths or heights that * exceed the image dimension % are permitted. % % The callback signature * is: % % MagickBooleanType TransferImageViewMethod(const WandView * *source, % WandView *destination,const ssize_t y,const int * thread_id, % void *context) % % Use this pragma if the view is not * single threaded: % % % % to define a section of code in your callback * transfer method that must be % executed by a single thread at a time. % % * The format of the TransferWandViewIterator method is: % % * MagickBooleanType TransferWandViewIterator(WandView *source, % * WandView *destination,TransferWandViewMethod transfer,void *context) % % * A description of each parameter follows: % % o source: the source wand * view. % % o destination: the destination wand view. % % o transfer: * the transfer callback method. % % o context: the user defined context. * % */ WandExport MagickBooleanType TransferWandViewIterator(WandView * source, WandView * destination, TransferWandViewMethod transfer, void *context) { ExceptionInfo * exception; Image * destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferWandViewMethod) NULL) return (MagickFalse); source_image = source->wand->images; destination_image = destination->wand->images; if (SetImageStorageClass(destination_image, DirectClass) == MagickFalse) return (MagickFalse); status = MagickTrue; progress = 0; exception = destination->exception; for (y = source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket * magick_restrict indexes; register const PixelPacket * magick_restrict pixels; register IndexPacket * magick_restrict destination_indexes; register ssize_t x; register PixelPacket * magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels = GetCacheViewVirtualPixels(source->view, source->extent.x, y, source->extent.width, 1, source->exception); if (pixels == (const PixelPacket *)NULL) { status = MagickFalse; continue; } indexes = GetCacheViewVirtualIndexQueue(source->view); for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x], pixels + x); if (source_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes + x)); if (source_image->storage_class == PseudoClass) for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes + x)); destination_pixels = GetCacheViewAuthenticPixels(destination->view, destination->extent.x, y, destination->extent.width, 1, exception); if (destination_pixels == (PixelPacket *) NULL) { status = MagickFalse; continue; } destination_indexes = GetCacheViewAuthenticIndexQueue(destination->view); for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], pixels + x); if (destination_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelBlack(indexes + x)); if (destination_image->storage_class == PseudoClass) for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(indexes + x)); if (transfer(source, destination, y, id, context) == MagickFalse) status = MagickFalse; for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels + x); if (destination_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(destination_indexes + x, PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync = SyncCacheViewAuthenticPixels(destination->view, exception); if (sync == MagickFalse) { InheritException(destination->exception, GetCacheViewException( source->view)); status = MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(source_image, source->description, progress++, source->extent.height); if (proceed == MagickFalse) status = MagickFalse; } } return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % U p d a t e W a n d V i e w I t e r a t o r * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % UpdateWandViewIterator() iterates over the wand view in parallel * and calls % your update method for each scanline of the view. The pixel * extent is % confined to the image canvas-- that is no negative offsets or * widths or % heights that exceed the image dimension are permitted. * Updates to pixels % in your callback are automagically synced back to the * image. % % The callback signature is: % % MagickBooleanType * UpdateImageViewMethod(WandView *source,const ssize_t y, % const int * thread_id,void *context) % % Use this pragma if the view is not single * threaded: % % % % to define a section of code in your callback update * method that must be % executed by a single thread at a time. % % The * format of the UpdateWandViewIterator method is: % % MagickBooleanType * UpdateWandViewIterator(WandView *source, % UpdateWandViewMethod * update,void *context) % % A description of each parameter follows: % % * o source: the source wand view. % % o update: the update callback * method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdateWandViewIterator(WandView * source, UpdateWandViewMethod update, void *context) { ExceptionInfo * exception; Image * source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (update == (UpdateWandViewMethod) NULL) return (MagickFalse); source_image = source->wand->images; if (SetImageStorageClass(source_image, DirectClass) == MagickFalse) return (MagickFalse); status = MagickTrue; progress = 0; exception = source->exception; for (y = source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register IndexPacket * magick_restrict indexes; register ssize_t x; register PixelPacket * magick_restrict pixels; if (status == MagickFalse) continue; pixels = GetCacheViewAuthenticPixels(source->view, source->extent.x, y, source->extent.width, 1, exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception, GetCacheViewException( source->view)); status = MagickFalse; continue; } indexes = GetCacheViewAuthenticIndexQueue(source->view); for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x], pixels + x); if (source_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes + x)); if (update(source, y, id, context) == MagickFalse) status = MagickFalse; for (x = 0; x < (ssize_t) source->extent.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x], pixels + x); if (source_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) source->extent.width; x++) SetPixelBlack(indexes + x, PixelGetBlackQuantum( source->pixel_wands[id][x])); if (SyncCacheViewAuthenticPixels(source->view, exception) == MagickFalse) { InheritException(source->exception, GetCacheViewException(source->view)); status = MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(source_image, source->description, progress++, source->extent.height); if (proceed == MagickFalse) status = MagickFalse; } } return (status); }
/* * Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* * Define declarations. */ #define WandViewId "WandView" /* * Typedef declarations. */ struct _WandView { size_t id; char name[MaxTextExtent], *description; RectangleInfo extent; MagickWand * wand; CacheView * view; size_t number_threads; PixelWand *** pixel_wands; ExceptionInfo * exception; MagickBooleanType debug; size_t signature; }; /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C l o n e W a n d V i e w * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CloneWandView() makes a copy of the specified wand view. % % * The format of the CloneWandView method is: % % WandView * *CloneWandView(const WandView *wand_view) % % A description of each * parameter follows: % % o wand_view: the wand view. % */ WandExport WandView * CloneWandView(const WandView * wand_view) { WandView * clone_view; register ssize_t i; assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); if (wand_view->debug != MagickFalse) (void)LogMagickEvent(WandEvent, GetMagickModule(), "%s", wand_view->name); clone_view = (WandView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed", wand_view->name); (void)memset(clone_view, 0, sizeof(*clone_view)); clone_view->id = AcquireWandId(); (void)FormatLocaleString(clone_view->name, MaxTextExtent, "%s-%.20g", WandViewId, (double)clone_view->id); clone_view->description = ConstantString(wand_view->description); clone_view->view = CloneCacheView(wand_view->view); clone_view->extent = wand_view->extent; clone_view->number_threads = wand_view->number_threads; clone_view->exception = AcquireExceptionInfo(); InheritException(clone_view->exception, wand_view->exception); for (i = 0; i < (ssize_t) wand_view->number_threads; i++) clone_view->pixel_wands[i] = ClonePixelWands((const PixelWand **) wand_view->pixel_wands[i], wand_view->extent.width); clone_view->debug = wand_view->debug; if (clone_view->debug != MagickFalse) (void)LogMagickEvent(WandEvent, GetMagickModule(), "%s", clone_view->name); clone_view->signature = WandSignature; return (clone_view); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D e s t r o y W a n d V i e w * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyWandView() deallocates memory associated with a wand * view. % % The format of the DestroyWandView method is: % % WandView * *DestroyWandView(WandView *wand_view) % % A description of each parameter * follows: % % o wand_view: the wand view. % */ static PixelWand *** DestroyPixelsThreadSet(PixelWand *** pixel_wands, const size_t number_wands, const size_t number_threads) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i = 0; i < (ssize_t) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i] = DestroyPixelWands(pixel_wands[i], number_wands); pixel_wands = (PixelWand ***) RelinquishMagickMemory(pixel_wands); return (pixel_wands); } WandExport WandView * DestroyWandView(WandView * wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); wand_view->pixel_wands = DestroyPixelsThreadSet(wand_view->pixel_wands, wand_view->extent.width, wand_view->number_threads); wand_view->view = DestroyCacheView(wand_view->view); wand_view->exception = DestroyExceptionInfo(wand_view->exception); wand_view->signature = (~WandSignature); RelinquishWandId(wand_view->id); wand_view = (WandView *) RelinquishMagickMemory(wand_view); return (wand_view); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DuplexTransferWandViewIterator() iterates over three wand views * in % parallel and calls your transfer method for each scanline of the * view. The % source and duplex pixel extent is not confined to the image * canvas-- that is % you can include negative offsets or widths or heights * that exceed the image % dimension. However, the destination wand view is * confined to the image % canvas-- that is no negative offsets or widths or * heights that exceed the % image dimension are permitted. % % The * callback signature is: % % MagickBooleanType * DuplexTransferImageViewMethod(const WandView *source, % const * WandView *duplex,WandView *destination,const ssize_t y, % const int * thread_id,void *context) % % Use this pragma if the view is not single * threaded: % % #pragma omp critical % % to define a section of code in * your callback transfer method that must be % executed by a single thread * at a time. % % The format of the DuplexTransferWandViewIterator method * is: % % MagickBooleanType DuplexTransferWandViewIterator(WandView * *source, % WandView *duplex,WandView *destination, % * DuplexTransferWandViewMethod transfer,void *context) % % A description of * each parameter follows: % % o source: the source wand view. % % o * duplex: the duplex wand view. % % o destination: the destination wand * view. % % o transfer: the transfer callback method. % % o context: * the user defined context. % */ WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView * source, WandView * duplex, WandView * destination, DuplexTransferWandViewMethod transfer, void *context) { ExceptionInfo * exception; Image * destination_image, *duplex_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferWandViewMethod) NULL) return (MagickFalse); source_image = source->wand->images; duplex_image = duplex->wand->images; destination_image = destination->wand->images; if (SetImageStorageClass(destination_image, DirectClass) == MagickFalse) return (MagickFalse); status = MagickTrue; progress = 0; exception = destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height = (size_t) (source->extent.height - source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y = source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket * magick_restrict duplex_indexes, *magick_restrict indexes; register const PixelPacket * magick_restrict duplex_pixels, *magick_restrict pixels; register IndexPacket * magick_restrict destination_indexes; register ssize_t x; register PixelPacket * magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels = GetCacheViewVirtualPixels(source->view, source->extent.x, y, source->extent.width, 1, source->exception); if (pixels == (const PixelPacket *)NULL) { status = MagickFalse; continue; } indexes = GetCacheViewVirtualIndexQueue(source->view); for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x], pixels + x); if (source_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes + x)); if (source_image->storage_class == PseudoClass) for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes + x)); duplex_pixels = GetCacheViewVirtualPixels(duplex->view, duplex->extent.x, y, duplex->extent.width, 1, duplex->exception); if (duplex_pixels == (const PixelPacket *)NULL) { status = MagickFalse; continue; } duplex_indexes = GetCacheViewVirtualIndexQueue(duplex->view); for (x = 0; x < (ssize_t) duplex->extent.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x], duplex_pixels + x); if (duplex_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) duplex->extent.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x], GetPixelBlack(duplex_indexes + x)); if (duplex_image->storage_class == PseudoClass) for (x = 0; x < (ssize_t) duplex->extent.width; x++) PixelSetIndex(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes + x)); destination_pixels = GetCacheViewAuthenticPixels(destination->view, destination->extent.x, y, destination->extent.width, 1, exception); if (destination_pixels == (PixelPacket *) NULL) { status = MagickFalse; continue; } destination_indexes = GetCacheViewAuthenticIndexQueue(destination->view); for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels + x); if (destination_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelBlack(destination_indexes + x)); if (destination_image->storage_class == PseudoClass) for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes + x)); if (transfer(source, duplex, destination, y, id, context) == MagickFalse) status = MagickFalse; for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels + x); if (destination_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(destination_indexes + x, PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync = SyncCacheViewAuthenticPixels(destination->view, exception); if (sync == MagickFalse) { InheritException(destination->exception, GetCacheViewException( source->view)); status = MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(source_image, source->description, progress++, source->extent.height); if (proceed == MagickFalse) status = MagickFalse; } } return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t W a n d V i e w E x c e p t i o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetWandViewException() returns the severity, reason, and * description of any % error that occurs when utilizing a wand view. % % * The format of the GetWandViewException method is: % % char * *GetWandViewException(const WandView *wand_view, % ExceptionType * *severity) % % A description of each parameter follows: % % o * wand_view: the pixel wand_view. % % o severity: the severity of the * error is returned here. % */ WandExport char * GetWandViewException(const WandView * wand_view, ExceptionType * severity) { char *description; assert(wand_view != (const WandView *)NULL); assert(wand_view->signature == WandSignature); if (wand_view->debug != MagickFalse) (void)LogMagickEvent(WandEvent, GetMagickModule(), "%s", wand_view->name); assert(severity != (ExceptionType *) NULL); *severity = wand_view->exception->severity; description = (char *)AcquireQuantumMemory(2UL * MaxTextExtent, sizeof(*description)); if (description == (char *)NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed", wand_view->name); *description = '\0'; if (wand_view->exception->reason != (char *)NULL) (void)CopyMagickString(description, GetLocaleExceptionMessage( wand_view->exception->severity, wand_view->exception->reason), MaxTextExtent); if (wand_view->exception->description != (char *)NULL) { (void)ConcatenateMagickString(description, " (", MaxTextExtent); (void)ConcatenateMagickString(description, GetLocaleExceptionMessage( wand_view->exception->severity, wand_view->exception->description), MaxTextExtent); (void)ConcatenateMagickString(description, ")", MaxTextExtent); } return (description); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t W a n d V i e w E x t e n t * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetWandViewExtent() returns the wand view extent. % % The * format of the GetWandViewExtent method is: % % RectangleInfo * GetWandViewExtent(const WandView *wand_view) % % A description of each * parameter follows: % % o wand_view: the wand view. % */ WandExport RectangleInfo GetWandViewExtent(const WandView * wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return (wand_view->extent); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t W a n d V i e w I t e r a t o r * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetWandViewIterator() iterates over the wand view in parallel * and calls % your get method for each scanline of the view. The pixel * extent is % not confined to the image canvas-- that is you can include * negative offsets % or widths or heights that exceed the image dimension. * Any updates to % the pixels in your callback are ignored. % % The * callback signature is: % % MagickBooleanType GetImageViewMethod(const * WandView *source, % const ssize_t y,const int thread_id,void * *context) % % Use this pragma if the view is not single threaded: % % * #pragma omp critical % % to define a section of code in your callback get * method that must be % executed by a single thread at a time. % % The * format of the GetWandViewIterator method is: % % MagickBooleanType * GetWandViewIterator(WandView *source, % GetWandViewMethod get,void * *context) % % A description of each parameter follows: % % o source: * the source wand view. % % o get: the get callback method. % % o * context: the user defined context. % */ WandExport MagickBooleanType GetWandViewIterator(WandView * source, GetWandViewMethod get, void *context) { Image * source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (get == (GetWandViewMethod) NULL) return (MagickFalse); source_image = source->wand->images; status = MagickTrue; progress = 0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height = (size_t) (source->extent.height - source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y = source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket * indexes; register const PixelPacket * pixels; register ssize_t x; if (status == MagickFalse) continue; pixels = GetCacheViewVirtualPixels(source->view, source->extent.x, y, source->extent.width, 1, source->exception); if (pixels == (const PixelPacket *)NULL) { status = MagickFalse; continue; } indexes = GetCacheViewVirtualIndexQueue(source->view); for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x], pixels + x); if (source_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes + x)); if (source_image->storage_class == PseudoClass) for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes + x)); if (get(source, y, id, context) == MagickFalse) status = MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(source_image, source->description, progress++, source->extent.height); if (proceed == MagickFalse) status = MagickFalse; } } return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t W a n d V i e w P i x e l s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetWandViewPixels() returns the wand view pixel_wands. % % The * format of the GetWandViewPixels method is: % % PixelWand * *GetWandViewPixels(const WandView *wand_view) % % A description of each * parameter follows: % % o wand_view: the wand view. % */ WandExport PixelWand ** GetWandViewPixels(const WandView * wand_view) { const int id = GetOpenMPThreadId(); assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return (wand_view->pixel_wands[id]); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t W a n d V i e w W a n d * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetWandViewWand() returns the magick wand associated with the * wand view. % % The format of the GetWandViewWand method is: % % * MagickWand *GetWandViewWand(const WandView *wand_view) % % A description * of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickWand * GetWandViewWand(const WandView * wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return (wand_view->wand); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I s W a n d V i e w * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IsWandView() returns MagickTrue if the the parameter is verified * as a wand % view object. % % The format of the IsWandView method is: % % * MagickBooleanType IsWandView(const WandView *wand_view) % % A description * of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickBooleanType IsWandView(const WandView * wand_view) { size_t length; if (wand_view == (const WandView *)NULL) return (MagickFalse); if (wand_view->signature != WandSignature) return (MagickFalse); length = strlen(WandViewId); if (LocaleNCompare(wand_view->name, WandViewId, length) != 0) return (MagickFalse); return (MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % N e w W a n d V i e w * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % NewWandView() returns a wand view required for all other methods * in the % Wand View API. % % The format of the NewWandView method is: % % * WandView *NewWandView(MagickWand *wand) % % A description of each * parameter follows: % % o wand: the wand. % */ static PixelWand *** AcquirePixelsThreadSet(const size_t number_wands, const size_t number_threads) { PixelWand *** pixel_wands; register ssize_t i; pixel_wands = (PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return ((PixelWand ***) NULL); (void)memset(pixel_wands, 0, number_threads * sizeof(*pixel_wands)); for (i = 0; i < (ssize_t) number_threads; i++) { pixel_wands[i] = NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return (DestroyPixelsThreadSet(pixel_wands, number_wands, number_threads)); } return (pixel_wands); } WandExport WandView * NewWandView(MagickWand * wand) { WandView * wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); wand_view = (WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed", GetExceptionMessage(errno)); (void)memset(wand_view, 0, sizeof(*wand_view)); wand_view->id = AcquireWandId(); (void)FormatLocaleString(wand_view->name, MaxTextExtent, "%s-%.20g", WandViewId, (double)wand_view->id); wand_view->description = ConstantString("WandView"); wand_view->wand = wand; wand_view->exception = AcquireExceptionInfo(); wand_view->view = AcquireVirtualCacheView(wand_view->wand->images, wand_view->exception); wand_view->extent.width = wand->images->columns; wand_view->extent.height = wand->images->rows; wand_view->number_threads = GetOpenMPMaximumThreads(); wand_view->pixel_wands = AcquirePixelsThreadSet(wand_view->extent.width, wand_view->number_threads); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug = IsEventLogging(); wand_view->signature = WandSignature; return (wand_view); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % N e w W a n d V i e w E x t e n t * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % NewWandViewExtent() returns a wand view required for all other * methods % in the Wand View API. % % The format of the NewWandViewExtent * method is: % % WandView *NewWandViewExtent(MagickWand *wand,const * ssize_t x, % const ssize_t y,const size_t width,const size_t * height) % % A description of each parameter follows: % % o wand: the * magick wand. % % o x,y,columns,rows: These values define the perimeter * of a extent of % pixel_wands view. % */ WandExport WandView * NewWandViewExtent(MagickWand * wand, const ssize_t x, const ssize_t y, const size_t width, const size_t height) { WandView * wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); wand_view = (WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed", GetExceptionMessage(errno)); (void)memset(wand_view, 0, sizeof(*wand_view)); wand_view->id = AcquireWandId(); (void)FormatLocaleString(wand_view->name, MaxTextExtent, "%s-%.20g", WandViewId, (double)wand_view->id); wand_view->description = ConstantString("WandView"); wand_view->exception = AcquireExceptionInfo(); wand_view->view = AcquireVirtualCacheView(wand_view->wand->images, wand_view->exception); wand_view->wand = wand; wand_view->extent.width = width; wand_view->extent.height = height; wand_view->extent.x = x; wand_view->extent.y = y; wand_view->number_threads = GetOpenMPMaximumThreads(); wand_view->pixel_wands = AcquirePixelsThreadSet(wand_view->extent.width, wand_view->number_threads); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError, "MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug = IsEventLogging(); wand_view->signature = WandSignature; return (wand_view); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t W a n d V i e w D e s c r i p t i o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetWandViewDescription() associates a description with an image * view. % % The format of the SetWandViewDescription method is: % % * void SetWandViewDescription(WandView *image_view,const char *description) * % % A description of each parameter follows: % % o wand_view: the wand * view. % % o description: the wand view description. % */ MagickExport void SetWandViewDescription(WandView * wand_view, const char *description) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); wand_view->description = ConstantString(description); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t W a n d V i e w I t e r a t o r * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetWandViewIterator() iterates over the wand view in parallel * and calls % your set method for each scanline of the view. The pixel * extent is % confined to the image canvas-- that is no negative offsets or * widths or % heights that exceed the image dimension. The pixels are * initiallly % undefined and any settings you make in the callback method * are automagically % synced back to your image. % % The callback * signature is: % % MagickBooleanType SetImageViewMethod(ImageView * *destination, % const ssize_t y,const int thread_id,void *context) * % % Use this pragma if the view is not single threaded: % % #pragma * omp critical % % to define a section of code in your callback set method * that must be % executed by a single thread at a time. % % The format of * the SetWandViewIterator method is: % % MagickBooleanType * SetWandViewIterator(WandView *destination, % SetWandViewMethod * set,void *context) % % A description of each parameter follows: % % o * destination: the wand view. % % o set: the set callback method. % % * o context: the user defined context. % */ WandExport MagickBooleanType SetWandViewIterator(WandView * destination, SetWandViewMethod set, void *context) { ExceptionInfo * exception; Image * destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (WandView *) NULL); assert(destination->signature == WandSignature); if (set == (SetWandViewMethod) NULL) return (MagickFalse); destination_image = destination->wand->images; if (SetImageStorageClass(destination_image, DirectClass) == MagickFalse) return (MagickFalse); status = MagickTrue; progress = 0; exception = destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height = (size_t) (destination->extent.height - destination->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(destination_image,destination_image,height,1) #endif for (y = destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket * magick_restrict indexes; register ssize_t x; register PixelPacket * magick_restrict pixels; if (status == MagickFalse) continue; pixels = GetCacheViewAuthenticPixels(destination->view, destination->extent.x, y, destination->extent.width, 1, exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception, GetCacheViewException( destination->view)); status = MagickFalse; continue; } indexes = GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination, y, id, context) == MagickFalse) status = MagickFalse; for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], pixels + x); if (destination_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(indexes + x, PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync = SyncCacheViewAuthenticPixels(destination->view, exception); if (sync == MagickFalse) { InheritException(destination->exception, GetCacheViewException( destination->view)); status = MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(destination_image, destination->description, progress++, destination->extent.height); if (proceed == MagickFalse) status = MagickFalse; } } return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t W a n d V i e w T h r e a d s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetWandViewThreads() sets the number of threads in a thread * team. % % The format of the SetWandViewDescription method is: % % * void SetWandViewThreads(WandView *image_view, % const size_t * number_threads) % % A description of each parameter follows: % % o * image_view: the image view. % % o number_threads: the number of threads * in a thread team. % */ MagickExport void SetWandViewThreads(WandView * image_view, const size_t number_threads) { assert(image_view != (WandView *) NULL); assert(image_view->signature == MagickCoreSignature); image_view->number_threads = number_threads; if (number_threads > (size_t) GetMagickResourceLimit(ThreadResource)) image_view->number_threads = GetOpenMPMaximumThreads(); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % T r a n s f e r W a n d V i e w I t e r a t o r * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % TransferWandViewIterator() iterates over two wand views in * parallel and % calls your transfer method for each scanline of the view. * The source pixel % extent is not confined to the image canvas-- that is * you can include % negative offsets or widths or heights that exceed the * image dimension. % However, the destination wand view is confined to the * image canvas-- that % is no negative offsets or widths or heights that * exceed the image dimension % are permitted. % % The callback signature * is: % % MagickBooleanType TransferImageViewMethod(const WandView * *source, % WandView *destination,const ssize_t y,const int * thread_id, % void *context) % % Use this pragma if the view is not * single threaded: % % #pragma omp critical % % to define a section of * code in your callback transfer method that must be % executed by a single * thread at a time. % % The format of the TransferWandViewIterator method * is: % % MagickBooleanType TransferWandViewIterator(WandView *source, * % WandView *destination,TransferWandViewMethod transfer,void * *context) % % A description of each parameter follows: % % o source: * the source wand view. % % o destination: the destination wand view. % % * o transfer: the transfer callback method. % % o context: the user * defined context. % */ WandExport MagickBooleanType TransferWandViewIterator(WandView * source, WandView * destination, TransferWandViewMethod transfer, void *context) { ExceptionInfo * exception; Image * destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferWandViewMethod) NULL) return (MagickFalse); source_image = source->wand->images; destination_image = destination->wand->images; if (SetImageStorageClass(destination_image, DirectClass) == MagickFalse) return (MagickFalse); status = MagickTrue; progress = 0; exception = destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height = (size_t) (source->extent.height - source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y = source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket * magick_restrict indexes; register const PixelPacket * magick_restrict pixels; register IndexPacket * magick_restrict destination_indexes; register ssize_t x; register PixelPacket * magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels = GetCacheViewVirtualPixels(source->view, source->extent.x, y, source->extent.width, 1, source->exception); if (pixels == (const PixelPacket *)NULL) { status = MagickFalse; continue; } indexes = GetCacheViewVirtualIndexQueue(source->view); for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x], pixels + x); if (source_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes + x)); if (source_image->storage_class == PseudoClass) for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes + x)); destination_pixels = GetCacheViewAuthenticPixels(destination->view, destination->extent.x, y, destination->extent.width, 1, exception); if (destination_pixels == (PixelPacket *) NULL) { status = MagickFalse; continue; } destination_indexes = GetCacheViewAuthenticIndexQueue(destination->view); for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], pixels + x); if (destination_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelBlack(indexes + x)); if (destination_image->storage_class == PseudoClass) for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(indexes + x)); if (transfer(source, destination, y, id, context) == MagickFalse) status = MagickFalse; for (x = 0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels + x); if (destination_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(destination_indexes + x, PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync = SyncCacheViewAuthenticPixels(destination->view, exception); if (sync == MagickFalse) { InheritException(destination->exception, GetCacheViewException( source->view)); status = MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(source_image, source->description, progress++, source->extent.height); if (proceed == MagickFalse) status = MagickFalse; } } return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % U p d a t e W a n d V i e w I t e r a t o r * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % UpdateWandViewIterator() iterates over the wand view in parallel * and calls % your update method for each scanline of the view. The pixel * extent is % confined to the image canvas-- that is no negative offsets or * widths or % heights that exceed the image dimension are permitted. * Updates to pixels % in your callback are automagically synced back to the * image. % % The callback signature is: % % MagickBooleanType * UpdateImageViewMethod(WandView *source,const ssize_t y, % const int * thread_id,void *context) % % Use this pragma if the view is not single * threaded: % % #pragma omp critical % % to define a section of code in * your callback update method that must be % executed by a single thread at * a time. % % The format of the UpdateWandViewIterator method is: % % * MagickBooleanType UpdateWandViewIterator(WandView *source, % * UpdateWandViewMethod update,void *context) % % A description of each * parameter follows: % % o source: the source wand view. % % o update: * the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdateWandViewIterator(WandView * source, UpdateWandViewMethod update, void *context) { ExceptionInfo * exception; Image * source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (update == (UpdateWandViewMethod) NULL) return (MagickFalse); source_image = source->wand->images; if (SetImageStorageClass(source_image, DirectClass) == MagickFalse) return (MagickFalse); status = MagickTrue; progress = 0; exception = source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height = (size_t) (source->extent.height - source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y = source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register IndexPacket * magick_restrict indexes; register ssize_t x; register PixelPacket * magick_restrict pixels; if (status == MagickFalse) continue; pixels = GetCacheViewAuthenticPixels(source->view, source->extent.x, y, source->extent.width, 1, exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception, GetCacheViewException( source->view)); status = MagickFalse; continue; } indexes = GetCacheViewAuthenticIndexQueue(source->view); for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x], pixels + x); if (source_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes + x)); if (update(source, y, id, context) == MagickFalse) status = MagickFalse; for (x = 0; x < (ssize_t) source->extent.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x], pixels + x); if (source_image->colorspace == CMYKColorspace) for (x = 0; x < (ssize_t) source->extent.width; x++) SetPixelBlack(indexes + x, PixelGetBlackQuantum( source->pixel_wands[id][x])); if (SyncCacheViewAuthenticPixels(source->view, exception) == MagickFalse) { InheritException(source->exception, GetCacheViewException(source->view)); status = MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(source_image, source->description, progress++, source->extent.height); if (proceed == MagickFalse) status = MagickFalse; } } return (status); }
pr66429.c
/* PR middle-end/66429 */ /* { dg-do compile } */ /* { dg-options "-O2 -fopenmp" } */ float b[10][15][10]; __attribute__ ((noreturn)) void noreturn (void) { for (;;); } __attribute__ ((noinline, noclone)) void foo (int n) { int i; #pragma omp parallel for simd schedule(static, 32) collapse(3) for (i = 0; i < 10; i++) for (int j = n; j < 8; j++) for (long k = -10; k < 10; k++) { b[i][j][k] += 16; noreturn (); b[i][j][k] -= 32; } } __attribute__ ((noinline, noclone)) void bar (void) { int i; #pragma omp parallel for simd schedule(static, 32) for (i = 0; i < 10; i++) { b[0][0][i] += 16; noreturn (); b[0][0][i] -= 32; } }
/* PR middle-end/66429 */ /* { dg-do compile } */ /* { dg-options "-O2 -fopenmp" } */ float b[10][15][10]; __attribute__((noreturn)) void noreturn(void) { for (;;); } __attribute__((noinline, noclone)) void foo(int n) { int i; for (i = 0; i < 10; i++) for (int j = n; j < 8; j++) for (long k = -10; k < 10; k++) { b[i][j][k] += 16; noreturn(); b[i][j][k] -= 32; } } __attribute__((noinline, noclone)) void bar(void) { int i; for (i = 0; i < 10; i++) { b[0][0][i] += 16; noreturn(); b[0][0][i] -= 32; } }
/* PR middle-end/66429 */ /* { dg-do compile } */ /* { dg-options "-O2 -fopenmp" } */ float b[10][15][10]; __attribute__((noreturn)) void noreturn(void) { for (;;); } __attribute__((noinline, noclone)) void foo(int n) { int i; #pragma omp parallel for simd schedule(static, 32) collapse(3) for (i = 0; i < 10; i++) for (int j = n; j < 8; j++) for (long k = -10; k < 10; k++) { b[i][j][k] += 16; noreturn(); b[i][j][k] -= 32; } } __attribute__((noinline, noclone)) void bar(void) { int i; #pragma omp parallel for simd schedule(static, 32) for (i = 0; i < 10; i++) { b[0][0][i] += 16; noreturn(); b[0][0][i] -= 32; } }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 24; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 24; tile_size[3] = 2048; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz - 4; i++) { for (j = 4; j < Ny - 4; j++) { for (k = 4; k < Nx - 4; k++) { A[(t + 1) % 2][i][j][k] = coef[0][i][j][k] * A[(t) % 2][i][j][k] + coef[1][i][j][k] * (A[(t) % 2][i - 1][j][k] + A[(t) % 2][i + 1][j][k]) + coef[2][i][j][k] * (A[(t) % 2][i][j - 1][k] + A[(t) % 2][i][j + 1][k]) + coef[3][i][j][k] * (A[(t) % 2][i][j][k - 1] + A[(t) % 2][i][j][k + 1]) + coef[4][i][j][k] * (A[(t) % 2][i - 2][j][k] + A[(t) % 2][i + 2][j][k]) + coef[5][i][j][k] * (A[(t) % 2][i][j - 2][k] + A[(t) % 2][i][j + 2][k]) + coef[6][i][j][k] * (A[(t) % 2][i][j][k - 2] + A[(t) % 2][i][j][k + 2]) + coef[7][i][j][k] * (A[(t) % 2][i - 3][j][k] + A[(t) % 2][i + 3][j][k]) + coef[8][i][j][k] * (A[(t) % 2][i][j - 3][k] + A[(t) % 2][i][j + 3][k]) + coef[9][i][j][k] * (A[(t) % 2][i][j][k - 3] + A[(t) % 2][i][j][k + 3]) + coef[10][i][j][k] * (A[(t) % 2][i - 4][j][k] + A[(t) % 2][i + 4][j][k]) + coef[11][i][j][k] * (A[(t) % 2][i][j - 4][k] + A[(t) % 2][i][j + 4][k]) + coef[12][i][j][k] * (A[(t) % 2][i][j][k - 4] + A[(t) % 2][i][j][k + 4]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 24; tile_size[3] = 2048; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz - 4; i++) { for (j = 4; j < Ny - 4; j++) { for (k = 4; k < Nx - 4; k++) { A[(t + 1) % 2][i][j][k] = coef[0][i][j][k] * A[(t) % 2][i][j][k] + coef[1][i][j][k] * (A[(t) % 2][i - 1][j][k] + A[(t) % 2][i + 1][j][k]) + coef[2][i][j][k] * (A[(t) % 2][i][j - 1][k] + A[(t) % 2][i][j + 1][k]) + coef[3][i][j][k] * (A[(t) % 2][i][j][k - 1] + A[(t) % 2][i][j][k + 1]) + coef[4][i][j][k] * (A[(t) % 2][i - 2][j][k] + A[(t) % 2][i + 2][j][k]) + coef[5][i][j][k] * (A[(t) % 2][i][j - 2][k] + A[(t) % 2][i][j + 2][k]) + coef[6][i][j][k] * (A[(t) % 2][i][j][k - 2] + A[(t) % 2][i][j][k + 2]) + coef[7][i][j][k] * (A[(t) % 2][i - 3][j][k] + A[(t) % 2][i + 3][j][k]) + coef[8][i][j][k] * (A[(t) % 2][i][j - 3][k] + A[(t) % 2][i][j + 3][k]) + coef[9][i][j][k] * (A[(t) % 2][i][j][k - 3] + A[(t) % 2][i][j][k + 3]) + coef[10][i][j][k] * (A[(t) % 2][i - 4][j][k] + A[(t) % 2][i + 4][j][k]) + coef[11][i][j][k] * (A[(t) % 2][i][j - 4][k] + A[(t) % 2][i][j + 4][k]) + coef[12][i][j][k] * (A[(t) % 2][i][j][k - 4] + A[(t) % 2][i][j][k + 4]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_binop__rdiv_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int8) // A*D function (colscale): GB (_AxD__rdiv_int8) // D*A function (rowscale): GB (_DxB__rdiv_int8) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int8) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int8) // C=scalar+B GB (_bind1st__rdiv_int8) // C=scalar+B' GB (_bind1st_tran__rdiv_int8) // C=A+scalar GB (_bind2nd__rdiv_int8) // C=A'+scalar GB (_bind2nd_tran__rdiv_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_SIGNED (y, x, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_INT8 || GxB_NO_RDIV_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_SIGNED (bij, x, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_SIGNED (y, aij, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (aij, x, 8) ; \ } GrB_Info GB (_bind1st_tran__rdiv_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (y, aij, 8) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int8) // A*D function (colscale): GB (_AxD__rdiv_int8) // D*A function (rowscale): GB (_DxB__rdiv_int8) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int8) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int8) // C=scalar+B GB (_bind1st__rdiv_int8) // C=scalar+B' GB (_bind1st_tran__rdiv_int8) // C=A+scalar GB (_bind2nd__rdiv_int8) // C=A'+scalar GB (_bind2nd_tran__rdiv_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_SIGNED (y, x, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_INT8 || GxB_NO_RDIV_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_SIGNED (bij, x, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_SIGNED (y, aij, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (aij, x, 8) ; \ } GrB_Info GB (_bind1st_tran__rdiv_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (y, aij, 8) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int8) // A*D function (colscale): GB (_AxD__rdiv_int8) // D*A function (rowscale): GB (_DxB__rdiv_int8) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int8) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int8) // C=scalar+B GB (_bind1st__rdiv_int8) // C=scalar+B' GB (_bind1st_tran__rdiv_int8) // C=A+scalar GB (_bind2nd__rdiv_int8) // C=A'+scalar GB (_bind2nd_tran__rdiv_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_SIGNED (y, x, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_INT8 || GxB_NO_RDIV_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_SIGNED (bij, x, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_SIGNED (y, aij, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (aij, x, 8) ; \ } GrB_Info GB (_bind1st_tran__rdiv_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (y, aij, 8) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
relu.c
#include <omp.h> #include <assert.h> #include <stdlib.h> #include <stdio.h> #include <memory.h> void print(const float *x, const int len) { assert(x && len>0); for(int idx=0; idx<len; ++idx) { printf("%.2f ", (float)x[idx]); } printf("\n"); return; } float *relu(float *x, const int len, float *res) { assert(x && len>0 && res); int nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); #pragma omp parallel for for(int idx=0; idx<len; ++idx) { res[idx] = x[idx]>0 ? x[idx] : 0; } return res; } int main(int argc, char *argv[]) { // init const size_t n = 10; float *x = calloc(10, sizeof(float)); memset(x, -1, n*sizeof(float)); // memset only can init value with 0 or -1 print(x, n); // relu x = relu(x, n, x); print(x, n); if(x) free(x); x = NULL; return 0; }
#include <omp.h> #include <assert.h> #include <stdlib.h> #include <stdio.h> #include <memory.h> void print(const float *x, const int len) { assert(x && len > 0); for (int idx = 0; idx < len; ++idx) { printf("%.2f ", (float)x[idx]); } printf("\n"); return; } float * relu(float *x, const int len, float *res) { assert(x && len > 0 && res); int nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); for (int idx = 0; idx < len; ++idx) { res[idx] = x[idx] > 0 ? x[idx] : 0; } return res; } int main(int argc, char *argv[]) { //init const size_t n = 10; float *x = calloc(10, sizeof(float)); memset(x, -1, n * sizeof(float)); //memset only can init value with 0 or - 1 print(x, n); //relu x = relu(x, n, x); print(x, n); if (x) free(x); x = NULL; return 0; }
#include <omp.h> #include <assert.h> #include <stdlib.h> #include <stdio.h> #include <memory.h> void print(const float *x, const int len) { assert(x && len > 0); for (int idx = 0; idx < len; ++idx) { printf("%.2f ", (float)x[idx]); } printf("\n"); return; } float * relu(float *x, const int len, float *res) { assert(x && len > 0 && res); int nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); #pragma omp parallel for for (int idx = 0; idx < len; ++idx) { res[idx] = x[idx] > 0 ? x[idx] : 0; } return res; } int main(int argc, char *argv[]) { //init const size_t n = 10; float *x = calloc(10, sizeof(float)); memset(x, -1, n * sizeof(float)); //memset only can init value with 0 or - 1 print(x, n); //relu x = relu(x, n, x); print(x, n); if (x) free(x); x = NULL; return 0; }
critical-3.c
// { dg-do compile } // { dg-options "-fopenmp -fdump-tree-ompexp" } void bar(void); void foo(void) { #pragma omp critical (xyzzy) bar(); } // { dg-final { scan-tree-dump-times "\\&\\.gomp_critical_user_xyzzy" 2 "ompexp" } }
// { dg-do compile } // { dg-options "-fopenmp -fdump-tree-ompexp" } void bar(void); void foo(void) { bar(); } // { dg-final { scan-tree-dump-times "\\&\\.gomp_critical_user_xyzzy" 2 "ompexp" } }
// { dg-do compile } // { dg-options "-fopenmp -fdump-tree-ompexp" } void bar(void); void foo(void) { #pragma omp critical (xyzzy) bar(); } // { dg-final { scan-tree-dump-times "\\&\\.gomp_critical_user_xyzzy" 2 "ompexp" } }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(8*t2-Nz,4)),t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(4*t1+Ny+5,4)),floord(8*t2+Ny+4,4)),floord(8*t1-8*t2+Nz+Ny+3,4));t3++) { for (t4=max(max(max(0,ceild(t1-31,32)),ceild(8*t2-Nz-124,128)),ceild(4*t3-Ny-124,128));t4<=min(min(min(min(floord(4*t3+Nx,128),floord(Nt+Nx-4,128)),floord(4*t1+Nx+5,128)),floord(8*t2+Nx+4,128)),floord(8*t1-8*t2+Nz+Nx+3,128));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),4*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),4*t3+2),128*t4+126),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(8*t2-Nz,4)),t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(4*t1+Ny+5,4)),floord(8*t2+Ny+4,4)),floord(8*t1-8*t2+Nz+Ny+3,4));t3++) { for (t4=max(max(max(0,ceild(t1-31,32)),ceild(8*t2-Nz-124,128)),ceild(4*t3-Ny-124,128));t4<=min(min(min(min(floord(4*t3+Nx,128),floord(Nt+Nx-4,128)),floord(4*t1+Nx+5,128)),floord(8*t2+Nx+4,128)),floord(8*t1-8*t2+Nz+Nx+3,128));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),4*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),4*t3+2),128*t4+126),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(8*t2-Nz,4)),t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(4*t1+Ny+5,4)),floord(8*t2+Ny+4,4)),floord(8*t1-8*t2+Nz+Ny+3,4));t3++) { for (t4=max(max(max(0,ceild(t1-31,32)),ceild(8*t2-Nz-124,128)),ceild(4*t3-Ny-124,128));t4<=min(min(min(min(floord(4*t3+Nx,128),floord(Nt+Nx-4,128)),floord(4*t1+Nx+5,128)),floord(8*t2+Nx+4,128)),floord(8*t1-8*t2+Nz+Nx+3,128));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),4*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),4*t3+2),128*t4+126),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
mobilenet_224.c
/* Pretrained MobileNet Convolutional Neural Network in C language and OpenMP API GitHUB Page: https://github.com/jcanore/vgg16 Author: ZFTurbo/jocare Compilation: gcc -O3 MobileNet_CPU_cifar.c -lm -fopenmp -o MobileNet_CPU_cifar Usage: MobileNet_CPU_cifar <weights_path> <file_with_list_of_images> <output file> <output convolution features (optional)> Example: MobileNet_CPU_cifar ../../weights/weights.txt" ../../img/image_list.txt results_imagenet_conv.txt 1 */ #include <ctype.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> double get_seconds(struct timeval tStart, struct timeval tEnd) { return ((tEnd.tv_sec - tStart.tv_sec) * 1000000 + tEnd.tv_usec - tStart.tv_usec) / 1.e6; } #define SIZE 224 #define CONV_SIZE 3 #define CONV_LEVELS 27 //#define _CRT_SECURE_NO_WARNINGS 1 // precompile variables // assure default values if nothing provided #ifndef SPARSE_CONVOLUTIONS #define SPARSE_CONVOLUTIONS 0 // default dense convolutions #endif // SPARSE_CONVOLUTIONS #ifndef FIRST_CONV_SPARSE #define FIRST_CONV_SPARSE 0 // this is almost never 1 #endif // FIRST_CONV_SPARSE #ifndef SPARSE_FULLY_CONNECTED #define SPARSE_FULLY_CONNECTED 0 // this is not implemented yet #endif // SPARSE_FULLY_CONNECTED #ifndef FISHER_PRUNING #define FISHER_PRUNING \ 0 // set for fisher pruning, all previous variables changed to dense #endif // FISHER_PRUNING #ifndef NUMBER_OF_THREADS #define NUMBER_OF_THREADS 1 // number of threads to run on //#define NUMBER_OF_THREADS omp_get_num_procs() - 1 #endif // NUMBER_OF_THREADS static double pw_conv_time = 0.0; static double dense_time = 0.0; /****************************************************************************************************************************/ int im_sizes[27] = {224, 224, 16, 16, 16, 16, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2}; int strides[26] = {1, 2, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1}; int mem_block_shape[3] = { 1024, 224, 224}; // allocate the absolute maximum amount of space we will need float ***block1; float ***block2; float *****wc; // weights convolution float ***wd; // weights dense float **bd; // biases dense float **batchnorm_weights; float **batchnorm_biases; float **batchnorm_means; // running mean and variance from training used to // estimate population statistics float **batchnorm_vars; int mem_block_dense_shape = { 1024 * 2 * 2}; // size of output from last convolutional layer float *mem_block1_dense; float *mem_block2_dense; #if SPARSE_CONVOLUTIONS // sparse conv csr_t ****wc_sparse; #endif // SPARSE_CONVOLUTIONS #if FISHER_PRUNING #define SPARSE_CONVOLUTIONS 0 // force dense convolutions /* // ORIGINAL FISHER EXPERIMENTS int cshape[27][4] = { { 224, 3, CONV_SIZE, CONV_SIZE }, { 224, 1, CONV_SIZE, CONV_SIZE }, { 43, 224, 1, 1 }, { 43, 1, CONV_SIZE, CONV_SIZE }, { 85, 43, 1, 1 }, { 85, 1, CONV_SIZE, CONV_SIZE }, { 70, 85, 1, 1 }, { 70, 1, CONV_SIZE, CONV_SIZE }, { 150, 70, 1, 1 }, { 150, 1, CONV_SIZE, CONV_SIZE }, { 69, 150, 1, 1 }, { 69, 1, CONV_SIZE, CONV_SIZE }, { 188, 69, 1, 1 }, { 188, 1, CONV_SIZE, CONV_SIZE }, { 72, 188, 1, 1 }, { 72, 1, CONV_SIZE, CONV_SIZE }, { 122, 72, 1, 1 }, { 122, 1, CONV_SIZE, CONV_SIZE }, { 106, 122, 1, 1 }, { 106, 1, CONV_SIZE, CONV_SIZE }, { 96, 106, 1, 1 }, { 96, 1, CONV_SIZE, CONV_SIZE }, { 81, 96, 1, 1 }, { 81, 1, CONV_SIZE, CONV_SIZE }, { 75, 81, 1, 1 }, { 75, 1, CONV_SIZE, CONV_SIZE }, { 100, 75, 1, 1 } }; int dshape[1][2]= { { 100, 10} }; */ // FIXED 90% ACCURACY EXPERIMENTS int cshape[27][4] = {{224, 3, CONV_SIZE, CONV_SIZE}, {224, 1, CONV_SIZE, CONV_SIZE}, {43, 224, 1, 1}, {43, 1, CONV_SIZE, CONV_SIZE}, {85, 43, 1, 1}, {85, 1, CONV_SIZE, CONV_SIZE}, {70, 85, 1, 1}, {70, 1, CONV_SIZE, CONV_SIZE}, {150, 70, 1, 1}, {150, 1, CONV_SIZE, CONV_SIZE}, {69, 150, 1, 1}, {69, 1, CONV_SIZE, CONV_SIZE}, {188, 69, 1, 1}, {188, 1, CONV_SIZE, CONV_SIZE}, {72, 188, 1, 1}, {72, 1, CONV_SIZE, CONV_SIZE}, {122, 72, 1, 1}, {122, 1, CONV_SIZE, CONV_SIZE}, {106, 122, 1, 1}, {106, 1, CONV_SIZE, CONV_SIZE}, {96, 106, 1, 1}, {96, 1, CONV_SIZE, CONV_SIZE}, {81, 96, 1, 1}, {81, 1, CONV_SIZE, CONV_SIZE}, {75, 81, 1, 1}, {75, 1, CONV_SIZE, CONV_SIZE}, {100, 75, 1, 1} }; int dshape[1][2] = {{100, 10}}; #else // PLAIN int cshape[27][4] = {{224, 3, CONV_SIZE, CONV_SIZE}, {224, 1, CONV_SIZE, CONV_SIZE}, {64, 224, 1, 1}, {64, 1, CONV_SIZE, CONV_SIZE}, {128, 64, 1, 1}, {128, 1, CONV_SIZE, CONV_SIZE}, {128, 128, 1, 1}, {128, 1, CONV_SIZE, CONV_SIZE}, {256, 128, 1, 1}, {256, 1, CONV_SIZE, CONV_SIZE}, {256, 256, 1, 1}, {256, 1, CONV_SIZE, CONV_SIZE}, {512, 256, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {1024, 512, 1, 1}, {1024, 1, CONV_SIZE, CONV_SIZE}, {1024, 1024, 1, 1}}; int dshape[1][2] = {{1024, 10}}; #endif // FISHER_PRUNING /****************************************************************************************************************************/ void reset_mem_block(float ***mem) { int i, j, k; for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { for (k = 0; k < mem_block_shape[2]; k++) { mem[i][j][k] = 0.0; } } } } /****************************************************************************************************************************/ void reset_mem_block_dense(float *mem) { int i; for (i = 0; i < mem_block_dense_shape; i++) { mem[i] = 0.0; } } /****************************************************************************************************************************/ void init_memory() { int i, j, k, l; int max_channels = 1024; int max_im_size = 224; block1 = malloc(max_channels * sizeof(float **)); block2 = malloc(max_channels * sizeof(float **)); // allocate block memory for (i = 0; i < max_channels; i++) { block1[i] = malloc(max_im_size * sizeof(float *)); block2[i] = malloc(max_im_size * sizeof(float *)); for (j = 0; j < max_im_size; j++) { block1[i][j] = malloc(max_im_size * sizeof(float)); block2[i][j] = malloc(max_im_size * sizeof(float)); } } #if SPARSE_CONVOLUTIONS wc_sparse = (csr_t ****)malloc(CONV_LEVELS * sizeof(csr_t ***)); for (l = 0; l < CONV_LEVELS; l++) { wc_sparse[l] = (csr_t ***)malloc(cshape[l][0] * sizeof(csr_t **)); for (i = 0; i < cshape[l][0]; i++) { wc_sparse[l][i] = (csr_t **)malloc(cshape[l][1] * sizeof(csr_t *)); } } // wc memory allocated below will be freed in read_weights if // SPARSE_CONVOLUTIONS #endif // SPARSE_CONVOLUTIONS wc = malloc(CONV_LEVELS * sizeof(float ****)); // allocate kernel memory for (l = 0; l < CONV_LEVELS; l++) { wc[l] = malloc(cshape[l][0] * sizeof(float ***)); for (i = 0; i < cshape[l][0]; i++) { wc[l][i] = malloc(cshape[l][1] * sizeof(float **)); for (j = 0; j < cshape[l][1]; j++) { wc[l][i][j] = malloc(cshape[l][2] * sizeof(float *)); for (k = 0; k < cshape[l][2]; k++) { wc[l][i][j][k] = malloc(cshape[l][3] * sizeof(float)); } } } } // allocate batchnorm memory batchnorm_weights = malloc(27 * sizeof(float *)); batchnorm_biases = malloc(27 * sizeof(float *)); batchnorm_means = malloc(27 * sizeof(float *)); batchnorm_vars = malloc(27 * sizeof(float *)); for (l = 0; l < CONV_LEVELS; l++) { batchnorm_weights[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_biases[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_means[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_vars[l] = malloc(cshape[l][0] * sizeof(float)); } wd = malloc(1 * sizeof(float **)); bd = malloc(1 * sizeof(float *)); for (l = 0; l < 1; l++) { wd[l] = malloc(dshape[l][0] * sizeof(float *)); for (i = 0; i < dshape[l][0]; i++) { wd[l][i] = malloc(dshape[l][1] * sizeof(float)); } bd[l] = malloc(dshape[l][1] * sizeof(float)); } // allocate dense memory mem_block1_dense = calloc(mem_block_dense_shape, sizeof(float)); mem_block2_dense = calloc(mem_block_dense_shape, sizeof(float)); } /****************************************************************************************************************************/ void free_memory() { int i, j, k, l; // Free convolution weights for (l = 0; l < CONV_LEVELS; l++) { #if SPARSE_CONVOLUTIONS for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { free(wc_sparse[l][i][j]); } free(wc_sparse[l][i]); } free(wc_sparse[l]); #else for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); #endif } // free(wc); // free(bc); #if SPARSE_CONVOLUTIONS free(wc_sparse); #else free(wc); #endif // SPARSE_CONVOLUTIONS // Free dense weights for (l = 0; l < 1; l++) { for (i = 0; i < dshape[l][0]; i++) { free(wd[l][i]); } free(wd[l]); free(bd[l]); } free(wd); free(bd); // Free memblocks for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { free(block1[i][j]); free(block2[i][j]); } free(block1[i]); free(block2[i]); } free(block1); free(block2); free(mem_block1_dense); free(mem_block2_dense); } /****************************************************************************************************************************/ void read_weights(char *in_file, int lvls) { float dval; int i, j, k, l, m, z; FILE *iin; int total_lvls_read = 0; // printf("\nin_file es: %s\n\n", in_file); iin = fopen(in_file, "r"); if (iin == NULL) { printf("Weights file %s absent\n", in_file); exit(1); } // Reading convolution weights (store them flipped from begining) // no biases for (l = 0; l < CONV_LEVELS; l++) { printf("Read conv block %d weights\n", l); for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { for (m = 0; m < cshape[l][3]; m++) { fscanf(iin, "%f", &dval); wc[l][i][j][k][m] = dval; } } } } total_lvls_read += 1; } for (z = 0; z < CONV_LEVELS; z++) { // batchnorm weights and biases printf("Read batchnorm block %d weights\n", z); for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); batchnorm_weights[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_biases[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_means[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_vars[z][i] = dval; } } if (total_lvls_read >= lvls && lvls != -1) return; // Reading dense weights int num_dense_layers = 1; for (z = 0; z < num_dense_layers; z++) { printf("Read dense block %d weights\n", z); for (i = 0; i < dshape[z][0]; i++) { for (j = 0; j < dshape[z][1]; j++) { fscanf(iin, "%f", &dval); // printf("weight: %i : %f \n", i, dval); wd[z][i][j] = dval; } } for (i = 0; i < dshape[z][1]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); bd[z][i] = dval; } } fclose(iin); /////////////**************** SPARSE ************///////////////////////////// #if SPARSE_CONVOLUTIONS // convert to sparse format for (l = 0; l < CONV_LEVELS; l++) for (i = 0; i < cshape[l][0]; i++) for (j = 0; j < cshape[l][1]; j++) { // printf("going for %d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, // cshape[l][1]); csr_t *a = dense2csr2(cshape[l][2], cshape[l][3], wc[l][i][j]); // print_csr(a); wc_sparse[l][i][j] = a; // printf("done..%d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, // cshape[l][1]); } // Free convolution weights #if FIRST_CONV_SPARSE == 0 l = 0; // allocate new memory for first conv and copy from wc float *****wc_first_conv = (float *****)malloc(1 * sizeof(float ****)); wc_first_conv[l] = (float ****)malloc(cshape[l][0] * sizeof(float ***)); int k1, k2; for (i = 0; i < cshape[l][0]; i++) { wc_first_conv[l][i] = (float ***)malloc(cshape[l][1] * sizeof(float **)); for (j = 0; j < cshape[l][1]; j++) { wc_first_conv[l][i][j] = (float **)malloc(cshape[l][2] * sizeof(float *)); for (k1 = 0; k1 < cshape[l][2]; k1++) { wc_first_conv[l][i][j][k1] = (float *)malloc(cshape[l][3] * sizeof(float)); for (k2 = 0; k2 < cshape[l][3]; k2++) wc_first_conv[l][i][j][k1][k2] = wc[l][i][j][k1][k2]; } } } #endif // FIRST_CONV_SPARSE == 0 // free up all dense conv layer representation for (l = 0; l < CONV_LEVELS; l++) { for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); } free(wc); #if FIRST_CONV_SPARSE == 0 // replace old wc pointer with the data for only first conv layer created // above wc = wc_first_conv; #endif // FIRST_CONV_SPARSE == 0 #endif // SPARSE_CONVOLUTIONS } /****************************************************************************************************************************/ void read_image(char *in_file) { int i, j, l; FILE *iin; float dval; iin = fopen(in_file, "r"); if (iin == NULL) { printf("Image file %s absent\n", in_file); exit(1); } /* Reading image */ for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { for (l = 0; l < 3; l++) { fscanf(iin, "%f", &dval); block1[l][i][j] = dval; } } } } /****************************************************************************************************************************/ void convolution_3_x_3(float **matrix, float **kernel, float **out, int size, int stride) { int i, j; float sum; float zeropad[size + 2][size + 2]; memset(zeropad, 0, ((size + 2) * (size + 2) * sizeof(float))); // jack for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } for (i = 0; i < size; i = i + stride) { for (j = 0; j < size; j = j + stride) { sum = zeropad[i][j] * kernel[0][0] + zeropad[i][j + 1] * kernel[0][1] + zeropad[i][j + 2] * kernel[0][2] + zeropad[i + 1][j] * kernel[1][0] + zeropad[i + 1][j + 1] * kernel[1][1] + zeropad[i + 1][j + 2] * kernel[1][2] + zeropad[i + 2][j] * kernel[2][0] + zeropad[i + 2][j + 1] * kernel[2][1] + zeropad[i + 2][j + 2] * kernel[2][2]; out[i][j] += sum; } } } /****************************************************************************************************************************/ /****************************************************************************************************************************/ void pointwise_convolution(float ****point_kernel, float ***block2, float ***block1, int input_channels, int output_channels, int image_size) { struct timeval start, end; gettimeofday(&start, NULL); int i, j, k, l; float sum; for (i = 0; i < output_channels; i++) { for (j = 0; j < image_size; j++) { for (k = 0; k < image_size; k++) { sum = 0.; for (l = 0; l < input_channels; l++) { sum += block2[l][j][k] * point_kernel[i][l][0] [0]; // 0 because they are always 1x1 filters } block1[i][j][k] = sum; } } } gettimeofday(&end, NULL); pw_conv_time += get_seconds(start, end); } /****************************************************************************************************************************/ void batchnorm_and_relu(float ***in, float ***out, float *weights, float *bias, float *mean, float *var, int num_channels, int image_size) { int channel, i, j; // ((x - mean) * invstd) * w + b #pragma omp parallel for private(channel, i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (channel = 0; channel < num_channels; channel++) { float invstd = 1. / sqrt(var[channel] + 0.000001); for (i = 0; i < image_size; i++) { for (j = 0; j < image_size; j++) { out[channel][i][j] = (weights[channel] * invstd) * in[channel][i][j] + (bias[channel] - ((weights[channel] * mean[channel]) * invstd)); // out[channel][i][j] = ((in[channel][i][j] - mean[channel]) * invstd) * // weights[channel] + bias[channel]; if (out[channel][i][j] < 0.f) out[channel][i][j] = 0.f; } } } } /****************************************************************************************************************************/ void depthwise_convolution(float ***block1, float ***block2, float ****depth_kernel, float ****point_kernel, int level) { int i, j; int input_channels = cshape[level][0]; int output_channels = cshape[level + 1][0]; // printf("level %i: %i ==> %i\n", level, input_channels, output_channels); #pragma omp parallel for private(i) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < input_channels; i++) { #if SPARSE_CONVOLUTIONS convolution_3_x_3_sparse(block1[i], wc_sparse[level][i][0], block2[i], im_sizes[level], strides[level]); #else convolution_3_x_3(block1[i], depth_kernel[i][0], block2[i], im_sizes[level], strides[level]); #endif } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], input_channels, im_sizes[level + 1]); reset_mem_block(block2); level++; // now do linear combination of the elements in output and write them back // into the first memory block #if SPARSE_CONVOLUTIONS #pragma omp parallel for private(i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < output_channels; i++) { for (j = 0; j < input_channels; j++) { pointwise_convolution_sparse(block2[j], wc_sparse[level][i][j], block1[j], im_sizes[level]); } } #else pointwise_convolution(point_kernel, block1, block2, input_channels, output_channels, im_sizes[level]); #endif batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], output_channels, im_sizes[level + 1]); reset_mem_block(block2); } /****************************************************************************************************************************/ void add_bias_and_relu_flatten(float *out, float *bs, int size, int relu) { int i; for (i = 0; i < size; i++) { out[i] += bs[i]; // printf("%f\n", out[i]); if (relu == 1) { if (out[i] < 0) out[i] = 0.f; } } } /****************************************************************************************************************************/ void flatten(float ***in, float *out, int sh0, int sh1, int sh2) { int i, j, k, total = 0; for (i = 0; i < sh0; i++) { for (j = 0; j < sh1; j++) { for (k = 0; k < sh2; k++) { out[total] = in[i][j][k]; total += 1; } } } } /****************************************************************************************************************************/ void dense(float *in, float **weights, float *out, int sh_in, int sh_out) { struct timeval start, end; gettimeofday(&start, NULL); int i, j; for (i = 0; i < sh_out; i++) { float sum = 0.0; for (j = 0; j < sh_in; j++) { sum += in[j] * weights[j][i]; } out[i] = sum; } gettimeofday(&end, NULL); dense_time += get_seconds(start, end); } /****************************************************************************************************************************/ void write_out_block(int layer, float ***block) { int layer_name = layer; // * 2 - 1; char filename[16]; sprintf(filename, "outputs/output%d", layer_name); FILE *f = fopen(filename, "w"); if (f == NULL) { printf("Error opening file!\n"); exit(1); } for (int i = 0; i < 224; i++) { for (int j = 0; j < mem_block_shape[1]; j++) { for (int k = 0; k < mem_block_shape[2]; k++) { fprintf(f, "%f \n", block[i][j][k]); } } } fclose(f); } /****************************************************************************************************************************/ void write_out_layer(int layer) { int layer_name = layer; // * 2 - 1; char filename[7]; sprintf(filename, "layer%d", layer_name); FILE *f = fopen(filename, "w"); int depth = 1; if (f == NULL) { printf("Error opening file!\n"); exit(1); } for (int o = 0; o < cshape[layer][0]; o++) { for (int i = 0; i < cshape[layer][1]; i++) { for (int k_h = 0; k_h < cshape[layer][2]; k_h++) { for (int k_w = 0; k_w < cshape[layer][3]; k_w++) { fprintf(f, "%f ", wc[layer][o][i][k_h][k_w]); } } fprintf(f, "\n"); } } fclose(f); layer_name = layer + 1; char filename2[7]; sprintf(filename2, "layer%d", layer_name); // get batchnorms FILE *f2 = fopen(filename2, "w"); if (f2 == NULL) { printf("Error opening file!\n"); exit(1); } for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_weights[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_biases[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_means[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_vars[layer][i]); } fclose(f); } /****************************************************************************************************************************/ void output_predictions(FILE *out, int only_convolution, int size, int cur_size) { int i; int c = 0; if (only_convolution == 1) { // for (i = 0; i < 512*7*7; i++) { for (i = 0; i < size * cur_size * cur_size; i++) { fprintf(out, "%g\n", mem_block1_dense[i]); } } else { double maximum = -1; // dshape[0][1] ==> 10 for (i = 0; i < dshape[0][1]; i++) { fprintf(out, "%g\n", mem_block2_dense[i]); if (mem_block1_dense[i] > maximum) { maximum = mem_block2_dense[i]; c = i + 1; } } fprintf(out, "\n"); printf("This image depicts class: %d\n", c); } } /****************************************************************************************************************************/ void get_mobilenet_predict() { int level = 0; int i, j; // normal convolution #pragma omp parallel for private(i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { #if FIRST_CONV_SPARSE convolution_3_x_3_sparse(block1[j], wc_sparse[level][i][j], block2[i], im_sizes[level], 1); #else convolution_3_x_3(block1[j], wc[level][i][j], block2[i], im_sizes[level], 1); #endif } } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], 224, 224); reset_mem_block(block2); // depthwise convolutions for (level = 1; level < (CONV_LEVELS - 1); level = level + 2) { depthwise_convolution(block1, block2, wc[level], wc[level + 1], (level)); } // flatten flatten(block1, mem_block1_dense, cshape[level][0], im_sizes[level], im_sizes[level]); // dense level = 0; dense(mem_block1_dense, wd[level], mem_block2_dense, dshape[level][0], dshape[level][1]); add_bias_and_relu_flatten(mem_block2_dense, bd[level], dshape[level][1], 0); reset_mem_block_dense(mem_block1_dense); return; } /****************************************************************************************************************************/ char *trimwhitespace(char *str) { char *end; // Trim leading space while (isspace((unsigned char)*str)) str++; if (*str == 0) // All spaces? return str; // Trim trailing space end = str + strlen(str) - 1; while (end > str && isspace((unsigned char)*end)) end--; // Write new null terminator *(end + 1) = 0; return str; } /****************************************************************************************************************************/ int main(int argc, char *argv[]) { FILE *file_list, *results; char buf[1024]; struct timeval tStart, tEnd; double deltaTime; char *weights_file; char *image_list_file; char *output_file; int lvls = -1; int only_convolution = 0; //----------------------------------------------------------------------- printf("Using %d threads\n", NUMBER_OF_THREADS); if (argc != 4 && argc != 5) { printf( "Usage: <program.exe> <weights file> <images list file> <output file> " "<only_convolution [optional]>\n"); return 0; } weights_file = argv[1]; // printf("%s\n", weights_file); image_list_file = argv[2]; output_file = argv[3]; if (argc == 5) { lvls = 20; only_convolution = 1; } //----------------------------------------------------------------------- init_memory(); file_list = fopen(image_list_file, "r"); if (file_list == NULL) { printf("Check file list location: %s\n", image_list_file); return 1; } results = fopen(output_file, "w"); if (results == NULL) { printf("Couldn't open file for writing: %s\n", output_file); return 1; } gettimeofday(&tStart, NULL); read_weights(weights_file, lvls); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Reading weights: %.3lf sec\n", deltaTime); while (!feof(file_list)) { pw_conv_time = 0.0; dense_time = 0.0; fgets(buf, 1024, file_list); if (strlen(buf) == 0) { break; } // printf("%d\n", strlen(buf)); read_image(trimwhitespace(buf)); gettimeofday(&tStart, NULL); get_mobilenet_predict(); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Infer image %s: %.3lf sec\n", buf, deltaTime); printf("pw_conv time: %.3lf sec\n", pw_conv_time); printf("dense time: %.3lf sec\n", dense_time); output_predictions(results, only_convolution, 1024, 1); } // free_memory(); fclose(file_list); return 0; }
/* Pretrained MobileNet Convolutional Neural Network in C language and OpenMP API GitHUB Page: https://github.com/jcanore/vgg16 Author: ZFTurbo/jocare Compilation: gcc -O3 MobileNet_CPU_cifar.c -lm -fopenmp -o MobileNet_CPU_cifar Usage: MobileNet_CPU_cifar <weights_path> <file_with_list_of_images> <output file> <output convolution features (optional)> Example: MobileNet_CPU_cifar ../../weights/weights.txt" ../../img/image_list.txt results_imagenet_conv.txt 1 */ #include <ctype.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> double get_seconds(struct timeval tStart, struct timeval tEnd) { return ((tEnd.tv_sec - tStart.tv_sec) * 1000000 + tEnd.tv_usec - tStart.tv_usec) / 1.e6; } #define SIZE 224 #define CONV_SIZE 3 #define CONV_LEVELS 27 //#define _CRT_SECURE_NO_WARNINGS 1 // precompile variables // assure default values if nothing provided #ifndef SPARSE_CONVOLUTIONS #define SPARSE_CONVOLUTIONS 0 // default dense convolutions #endif // SPARSE_CONVOLUTIONS #ifndef FIRST_CONV_SPARSE #define FIRST_CONV_SPARSE 0 // this is almost never 1 #endif // FIRST_CONV_SPARSE #ifndef SPARSE_FULLY_CONNECTED #define SPARSE_FULLY_CONNECTED 0 // this is not implemented yet #endif // SPARSE_FULLY_CONNECTED #ifndef FISHER_PRUNING #define FISHER_PRUNING \ 0 // set for fisher pruning, all previous variables changed to dense #endif // FISHER_PRUNING #ifndef NUMBER_OF_THREADS #define NUMBER_OF_THREADS 1 // number of threads to run on //#define NUMBER_OF_THREADS omp_get_num_procs() - 1 #endif // NUMBER_OF_THREADS static double pw_conv_time = 0.0; static double dense_time = 0.0; /****************************************************************************************************************************/ int im_sizes[27] = {224, 224, 16, 16, 16, 16, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2}; int strides[26] = {1, 2, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1}; int mem_block_shape[3] = { 1024, 224, 224}; // allocate the absolute maximum amount of space we will need float ***block1; float ***block2; float *****wc; // weights convolution float ***wd; // weights dense float **bd; // biases dense float **batchnorm_weights; float **batchnorm_biases; float **batchnorm_means; // running mean and variance from training used to // estimate population statistics float **batchnorm_vars; int mem_block_dense_shape = { 1024 * 2 * 2}; // size of output from last convolutional layer float *mem_block1_dense; float *mem_block2_dense; #if SPARSE_CONVOLUTIONS // sparse conv csr_t ****wc_sparse; #endif // SPARSE_CONVOLUTIONS #if FISHER_PRUNING #define SPARSE_CONVOLUTIONS 0 // force dense convolutions /* // ORIGINAL FISHER EXPERIMENTS int cshape[27][4] = { { 224, 3, CONV_SIZE, CONV_SIZE }, { 224, 1, CONV_SIZE, CONV_SIZE }, { 43, 224, 1, 1 }, { 43, 1, CONV_SIZE, CONV_SIZE }, { 85, 43, 1, 1 }, { 85, 1, CONV_SIZE, CONV_SIZE }, { 70, 85, 1, 1 }, { 70, 1, CONV_SIZE, CONV_SIZE }, { 150, 70, 1, 1 }, { 150, 1, CONV_SIZE, CONV_SIZE }, { 69, 150, 1, 1 }, { 69, 1, CONV_SIZE, CONV_SIZE }, { 188, 69, 1, 1 }, { 188, 1, CONV_SIZE, CONV_SIZE }, { 72, 188, 1, 1 }, { 72, 1, CONV_SIZE, CONV_SIZE }, { 122, 72, 1, 1 }, { 122, 1, CONV_SIZE, CONV_SIZE }, { 106, 122, 1, 1 }, { 106, 1, CONV_SIZE, CONV_SIZE }, { 96, 106, 1, 1 }, { 96, 1, CONV_SIZE, CONV_SIZE }, { 81, 96, 1, 1 }, { 81, 1, CONV_SIZE, CONV_SIZE }, { 75, 81, 1, 1 }, { 75, 1, CONV_SIZE, CONV_SIZE }, { 100, 75, 1, 1 } }; int dshape[1][2]= { { 100, 10} }; */ // FIXED 90% ACCURACY EXPERIMENTS int cshape[27][4] = {{224, 3, CONV_SIZE, CONV_SIZE}, {224, 1, CONV_SIZE, CONV_SIZE}, {43, 224, 1, 1}, {43, 1, CONV_SIZE, CONV_SIZE}, {85, 43, 1, 1}, {85, 1, CONV_SIZE, CONV_SIZE}, {70, 85, 1, 1}, {70, 1, CONV_SIZE, CONV_SIZE}, {150, 70, 1, 1}, {150, 1, CONV_SIZE, CONV_SIZE}, {69, 150, 1, 1}, {69, 1, CONV_SIZE, CONV_SIZE}, {188, 69, 1, 1}, {188, 1, CONV_SIZE, CONV_SIZE}, {72, 188, 1, 1}, {72, 1, CONV_SIZE, CONV_SIZE}, {122, 72, 1, 1}, {122, 1, CONV_SIZE, CONV_SIZE}, {106, 122, 1, 1}, {106, 1, CONV_SIZE, CONV_SIZE}, {96, 106, 1, 1}, {96, 1, CONV_SIZE, CONV_SIZE}, {81, 96, 1, 1}, {81, 1, CONV_SIZE, CONV_SIZE}, {75, 81, 1, 1}, {75, 1, CONV_SIZE, CONV_SIZE}, {100, 75, 1, 1} }; int dshape[1][2] = {{100, 10}}; #else // PLAIN int cshape[27][4] = {{224, 3, CONV_SIZE, CONV_SIZE}, {224, 1, CONV_SIZE, CONV_SIZE}, {64, 224, 1, 1}, {64, 1, CONV_SIZE, CONV_SIZE}, {128, 64, 1, 1}, {128, 1, CONV_SIZE, CONV_SIZE}, {128, 128, 1, 1}, {128, 1, CONV_SIZE, CONV_SIZE}, {256, 128, 1, 1}, {256, 1, CONV_SIZE, CONV_SIZE}, {256, 256, 1, 1}, {256, 1, CONV_SIZE, CONV_SIZE}, {512, 256, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {1024, 512, 1, 1}, {1024, 1, CONV_SIZE, CONV_SIZE}, {1024, 1024, 1, 1}}; int dshape[1][2] = {{1024, 10}}; #endif // FISHER_PRUNING /****************************************************************************************************************************/ void reset_mem_block(float ***mem) { int i, j, k; for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { for (k = 0; k < mem_block_shape[2]; k++) { mem[i][j][k] = 0.0; } } } } /****************************************************************************************************************************/ void reset_mem_block_dense(float *mem) { int i; for (i = 0; i < mem_block_dense_shape; i++) { mem[i] = 0.0; } } /****************************************************************************************************************************/ void init_memory() { int i, j, k, l; int max_channels = 1024; int max_im_size = 224; block1 = malloc(max_channels * sizeof(float **)); block2 = malloc(max_channels * sizeof(float **)); // allocate block memory for (i = 0; i < max_channels; i++) { block1[i] = malloc(max_im_size * sizeof(float *)); block2[i] = malloc(max_im_size * sizeof(float *)); for (j = 0; j < max_im_size; j++) { block1[i][j] = malloc(max_im_size * sizeof(float)); block2[i][j] = malloc(max_im_size * sizeof(float)); } } #if SPARSE_CONVOLUTIONS wc_sparse = (csr_t ****)malloc(CONV_LEVELS * sizeof(csr_t ***)); for (l = 0; l < CONV_LEVELS; l++) { wc_sparse[l] = (csr_t ***)malloc(cshape[l][0] * sizeof(csr_t **)); for (i = 0; i < cshape[l][0]; i++) { wc_sparse[l][i] = (csr_t **)malloc(cshape[l][1] * sizeof(csr_t *)); } } // wc memory allocated below will be freed in read_weights if // SPARSE_CONVOLUTIONS #endif // SPARSE_CONVOLUTIONS wc = malloc(CONV_LEVELS * sizeof(float ****)); // allocate kernel memory for (l = 0; l < CONV_LEVELS; l++) { wc[l] = malloc(cshape[l][0] * sizeof(float ***)); for (i = 0; i < cshape[l][0]; i++) { wc[l][i] = malloc(cshape[l][1] * sizeof(float **)); for (j = 0; j < cshape[l][1]; j++) { wc[l][i][j] = malloc(cshape[l][2] * sizeof(float *)); for (k = 0; k < cshape[l][2]; k++) { wc[l][i][j][k] = malloc(cshape[l][3] * sizeof(float)); } } } } // allocate batchnorm memory batchnorm_weights = malloc(27 * sizeof(float *)); batchnorm_biases = malloc(27 * sizeof(float *)); batchnorm_means = malloc(27 * sizeof(float *)); batchnorm_vars = malloc(27 * sizeof(float *)); for (l = 0; l < CONV_LEVELS; l++) { batchnorm_weights[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_biases[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_means[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_vars[l] = malloc(cshape[l][0] * sizeof(float)); } wd = malloc(1 * sizeof(float **)); bd = malloc(1 * sizeof(float *)); for (l = 0; l < 1; l++) { wd[l] = malloc(dshape[l][0] * sizeof(float *)); for (i = 0; i < dshape[l][0]; i++) { wd[l][i] = malloc(dshape[l][1] * sizeof(float)); } bd[l] = malloc(dshape[l][1] * sizeof(float)); } // allocate dense memory mem_block1_dense = calloc(mem_block_dense_shape, sizeof(float)); mem_block2_dense = calloc(mem_block_dense_shape, sizeof(float)); } /****************************************************************************************************************************/ void free_memory() { int i, j, k, l; // Free convolution weights for (l = 0; l < CONV_LEVELS; l++) { #if SPARSE_CONVOLUTIONS for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { free(wc_sparse[l][i][j]); } free(wc_sparse[l][i]); } free(wc_sparse[l]); #else for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); #endif } // free(wc); // free(bc); #if SPARSE_CONVOLUTIONS free(wc_sparse); #else free(wc); #endif // SPARSE_CONVOLUTIONS // Free dense weights for (l = 0; l < 1; l++) { for (i = 0; i < dshape[l][0]; i++) { free(wd[l][i]); } free(wd[l]); free(bd[l]); } free(wd); free(bd); // Free memblocks for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { free(block1[i][j]); free(block2[i][j]); } free(block1[i]); free(block2[i]); } free(block1); free(block2); free(mem_block1_dense); free(mem_block2_dense); } /****************************************************************************************************************************/ void read_weights(char *in_file, int lvls) { float dval; int i, j, k, l, m, z; FILE *iin; int total_lvls_read = 0; // printf("\nin_file es: %s\n\n", in_file); iin = fopen(in_file, "r"); if (iin == NULL) { printf("Weights file %s absent\n", in_file); exit(1); } // Reading convolution weights (store them flipped from begining) // no biases for (l = 0; l < CONV_LEVELS; l++) { printf("Read conv block %d weights\n", l); for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { for (m = 0; m < cshape[l][3]; m++) { fscanf(iin, "%f", &dval); wc[l][i][j][k][m] = dval; } } } } total_lvls_read += 1; } for (z = 0; z < CONV_LEVELS; z++) { // batchnorm weights and biases printf("Read batchnorm block %d weights\n", z); for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); batchnorm_weights[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_biases[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_means[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_vars[z][i] = dval; } } if (total_lvls_read >= lvls && lvls != -1) return; // Reading dense weights int num_dense_layers = 1; for (z = 0; z < num_dense_layers; z++) { printf("Read dense block %d weights\n", z); for (i = 0; i < dshape[z][0]; i++) { for (j = 0; j < dshape[z][1]; j++) { fscanf(iin, "%f", &dval); // printf("weight: %i : %f \n", i, dval); wd[z][i][j] = dval; } } for (i = 0; i < dshape[z][1]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); bd[z][i] = dval; } } fclose(iin); /////////////**************** SPARSE ************///////////////////////////// #if SPARSE_CONVOLUTIONS // convert to sparse format for (l = 0; l < CONV_LEVELS; l++) for (i = 0; i < cshape[l][0]; i++) for (j = 0; j < cshape[l][1]; j++) { // printf("going for %d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, // cshape[l][1]); csr_t *a = dense2csr2(cshape[l][2], cshape[l][3], wc[l][i][j]); // print_csr(a); wc_sparse[l][i][j] = a; // printf("done..%d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, // cshape[l][1]); } // Free convolution weights #if FIRST_CONV_SPARSE == 0 l = 0; // allocate new memory for first conv and copy from wc float *****wc_first_conv = (float *****)malloc(1 * sizeof(float ****)); wc_first_conv[l] = (float ****)malloc(cshape[l][0] * sizeof(float ***)); int k1, k2; for (i = 0; i < cshape[l][0]; i++) { wc_first_conv[l][i] = (float ***)malloc(cshape[l][1] * sizeof(float **)); for (j = 0; j < cshape[l][1]; j++) { wc_first_conv[l][i][j] = (float **)malloc(cshape[l][2] * sizeof(float *)); for (k1 = 0; k1 < cshape[l][2]; k1++) { wc_first_conv[l][i][j][k1] = (float *)malloc(cshape[l][3] * sizeof(float)); for (k2 = 0; k2 < cshape[l][3]; k2++) wc_first_conv[l][i][j][k1][k2] = wc[l][i][j][k1][k2]; } } } #endif // FIRST_CONV_SPARSE == 0 // free up all dense conv layer representation for (l = 0; l < CONV_LEVELS; l++) { for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); } free(wc); #if FIRST_CONV_SPARSE == 0 // replace old wc pointer with the data for only first conv layer created // above wc = wc_first_conv; #endif // FIRST_CONV_SPARSE == 0 #endif // SPARSE_CONVOLUTIONS } /****************************************************************************************************************************/ void read_image(char *in_file) { int i, j, l; FILE *iin; float dval; iin = fopen(in_file, "r"); if (iin == NULL) { printf("Image file %s absent\n", in_file); exit(1); } /* Reading image */ for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { for (l = 0; l < 3; l++) { fscanf(iin, "%f", &dval); block1[l][i][j] = dval; } } } } /****************************************************************************************************************************/ void convolution_3_x_3(float **matrix, float **kernel, float **out, int size, int stride) { int i, j; float sum; float zeropad[size + 2][size + 2]; memset(zeropad, 0, ((size + 2) * (size + 2) * sizeof(float))); // jack for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } for (i = 0; i < size; i = i + stride) { for (j = 0; j < size; j = j + stride) { sum = zeropad[i][j] * kernel[0][0] + zeropad[i][j + 1] * kernel[0][1] + zeropad[i][j + 2] * kernel[0][2] + zeropad[i + 1][j] * kernel[1][0] + zeropad[i + 1][j + 1] * kernel[1][1] + zeropad[i + 1][j + 2] * kernel[1][2] + zeropad[i + 2][j] * kernel[2][0] + zeropad[i + 2][j + 1] * kernel[2][1] + zeropad[i + 2][j + 2] * kernel[2][2]; out[i][j] += sum; } } } /****************************************************************************************************************************/ /****************************************************************************************************************************/ void pointwise_convolution(float ****point_kernel, float ***block2, float ***block1, int input_channels, int output_channels, int image_size) { struct timeval start, end; gettimeofday(&start, NULL); int i, j, k, l; float sum; for (i = 0; i < output_channels; i++) { for (j = 0; j < image_size; j++) { for (k = 0; k < image_size; k++) { sum = 0.; for (l = 0; l < input_channels; l++) { sum += block2[l][j][k] * point_kernel[i][l][0] [0]; // 0 because they are always 1x1 filters } block1[i][j][k] = sum; } } } gettimeofday(&end, NULL); pw_conv_time += get_seconds(start, end); } /****************************************************************************************************************************/ void batchnorm_and_relu(float ***in, float ***out, float *weights, float *bias, float *mean, float *var, int num_channels, int image_size) { int channel, i, j; // ((x - mean) * invstd) * w + b num_threads(NUMBER_OF_THREADS) for (channel = 0; channel < num_channels; channel++) { float invstd = 1. / sqrt(var[channel] + 0.000001); for (i = 0; i < image_size; i++) { for (j = 0; j < image_size; j++) { out[channel][i][j] = (weights[channel] * invstd) * in[channel][i][j] + (bias[channel] - ((weights[channel] * mean[channel]) * invstd)); // out[channel][i][j] = ((in[channel][i][j] - mean[channel]) * invstd) * // weights[channel] + bias[channel]; if (out[channel][i][j] < 0.f) out[channel][i][j] = 0.f; } } } } /****************************************************************************************************************************/ void depthwise_convolution(float ***block1, float ***block2, float ****depth_kernel, float ****point_kernel, int level) { int i, j; int input_channels = cshape[level][0]; int output_channels = cshape[level + 1][0]; // printf("level %i: %i ==> %i\n", level, input_channels, output_channels); num_threads(NUMBER_OF_THREADS) for (i = 0; i < input_channels; i++) { #if SPARSE_CONVOLUTIONS convolution_3_x_3_sparse(block1[i], wc_sparse[level][i][0], block2[i], im_sizes[level], strides[level]); #else convolution_3_x_3(block1[i], depth_kernel[i][0], block2[i], im_sizes[level], strides[level]); #endif } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], input_channels, im_sizes[level + 1]); reset_mem_block(block2); level++; // now do linear combination of the elements in output and write them back // into the first memory block #if SPARSE_CONVOLUTIONS num_threads(NUMBER_OF_THREADS) for (i = 0; i < output_channels; i++) { for (j = 0; j < input_channels; j++) { pointwise_convolution_sparse(block2[j], wc_sparse[level][i][j], block1[j], im_sizes[level]); } } #else pointwise_convolution(point_kernel, block1, block2, input_channels, output_channels, im_sizes[level]); #endif batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], output_channels, im_sizes[level + 1]); reset_mem_block(block2); } /****************************************************************************************************************************/ void add_bias_and_relu_flatten(float *out, float *bs, int size, int relu) { int i; for (i = 0; i < size; i++) { out[i] += bs[i]; // printf("%f\n", out[i]); if (relu == 1) { if (out[i] < 0) out[i] = 0.f; } } } /****************************************************************************************************************************/ void flatten(float ***in, float *out, int sh0, int sh1, int sh2) { int i, j, k, total = 0; for (i = 0; i < sh0; i++) { for (j = 0; j < sh1; j++) { for (k = 0; k < sh2; k++) { out[total] = in[i][j][k]; total += 1; } } } } /****************************************************************************************************************************/ void dense(float *in, float **weights, float *out, int sh_in, int sh_out) { struct timeval start, end; gettimeofday(&start, NULL); int i, j; for (i = 0; i < sh_out; i++) { float sum = 0.0; for (j = 0; j < sh_in; j++) { sum += in[j] * weights[j][i]; } out[i] = sum; } gettimeofday(&end, NULL); dense_time += get_seconds(start, end); } /****************************************************************************************************************************/ void write_out_block(int layer, float ***block) { int layer_name = layer; // * 2 - 1; char filename[16]; sprintf(filename, "outputs/output%d", layer_name); FILE *f = fopen(filename, "w"); if (f == NULL) { printf("Error opening file!\n"); exit(1); } for (int i = 0; i < 224; i++) { for (int j = 0; j < mem_block_shape[1]; j++) { for (int k = 0; k < mem_block_shape[2]; k++) { fprintf(f, "%f \n", block[i][j][k]); } } } fclose(f); } /****************************************************************************************************************************/ void write_out_layer(int layer) { int layer_name = layer; // * 2 - 1; char filename[7]; sprintf(filename, "layer%d", layer_name); FILE *f = fopen(filename, "w"); int depth = 1; if (f == NULL) { printf("Error opening file!\n"); exit(1); } for (int o = 0; o < cshape[layer][0]; o++) { for (int i = 0; i < cshape[layer][1]; i++) { for (int k_h = 0; k_h < cshape[layer][2]; k_h++) { for (int k_w = 0; k_w < cshape[layer][3]; k_w++) { fprintf(f, "%f ", wc[layer][o][i][k_h][k_w]); } } fprintf(f, "\n"); } } fclose(f); layer_name = layer + 1; char filename2[7]; sprintf(filename2, "layer%d", layer_name); // get batchnorms FILE *f2 = fopen(filename2, "w"); if (f2 == NULL) { printf("Error opening file!\n"); exit(1); } for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_weights[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_biases[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_means[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_vars[layer][i]); } fclose(f); } /****************************************************************************************************************************/ void output_predictions(FILE *out, int only_convolution, int size, int cur_size) { int i; int c = 0; if (only_convolution == 1) { // for (i = 0; i < 512*7*7; i++) { for (i = 0; i < size * cur_size * cur_size; i++) { fprintf(out, "%g\n", mem_block1_dense[i]); } } else { double maximum = -1; // dshape[0][1] ==> 10 for (i = 0; i < dshape[0][1]; i++) { fprintf(out, "%g\n", mem_block2_dense[i]); if (mem_block1_dense[i] > maximum) { maximum = mem_block2_dense[i]; c = i + 1; } } fprintf(out, "\n"); printf("This image depicts class: %d\n", c); } } /****************************************************************************************************************************/ void get_mobilenet_predict() { int level = 0; int i, j; // normal convolution num_threads(NUMBER_OF_THREADS) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { #if FIRST_CONV_SPARSE convolution_3_x_3_sparse(block1[j], wc_sparse[level][i][j], block2[i], im_sizes[level], 1); #else convolution_3_x_3(block1[j], wc[level][i][j], block2[i], im_sizes[level], 1); #endif } } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], 224, 224); reset_mem_block(block2); // depthwise convolutions for (level = 1; level < (CONV_LEVELS - 1); level = level + 2) { depthwise_convolution(block1, block2, wc[level], wc[level + 1], (level)); } // flatten flatten(block1, mem_block1_dense, cshape[level][0], im_sizes[level], im_sizes[level]); // dense level = 0; dense(mem_block1_dense, wd[level], mem_block2_dense, dshape[level][0], dshape[level][1]); add_bias_and_relu_flatten(mem_block2_dense, bd[level], dshape[level][1], 0); reset_mem_block_dense(mem_block1_dense); return; } /****************************************************************************************************************************/ char *trimwhitespace(char *str) { char *end; // Trim leading space while (isspace((unsigned char)*str)) str++; if (*str == 0) // All spaces? return str; // Trim trailing space end = str + strlen(str) - 1; while (end > str && isspace((unsigned char)*end)) end--; // Write new null terminator *(end + 1) = 0; return str; } /****************************************************************************************************************************/ int main(int argc, char *argv[]) { FILE *file_list, *results; char buf[1024]; struct timeval tStart, tEnd; double deltaTime; char *weights_file; char *image_list_file; char *output_file; int lvls = -1; int only_convolution = 0; //----------------------------------------------------------------------- printf("Using %d threads\n", NUMBER_OF_THREADS); if (argc != 4 && argc != 5) { printf( "Usage: <program.exe> <weights file> <images list file> <output file> " "<only_convolution [optional]>\n"); return 0; } weights_file = argv[1]; // printf("%s\n", weights_file); image_list_file = argv[2]; output_file = argv[3]; if (argc == 5) { lvls = 20; only_convolution = 1; } //----------------------------------------------------------------------- init_memory(); file_list = fopen(image_list_file, "r"); if (file_list == NULL) { printf("Check file list location: %s\n", image_list_file); return 1; } results = fopen(output_file, "w"); if (results == NULL) { printf("Couldn't open file for writing: %s\n", output_file); return 1; } gettimeofday(&tStart, NULL); read_weights(weights_file, lvls); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Reading weights: %.3lf sec\n", deltaTime); while (!feof(file_list)) { pw_conv_time = 0.0; dense_time = 0.0; fgets(buf, 1024, file_list); if (strlen(buf) == 0) { break; } // printf("%d\n", strlen(buf)); read_image(trimwhitespace(buf)); gettimeofday(&tStart, NULL); get_mobilenet_predict(); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Infer image %s: %.3lf sec\n", buf, deltaTime); printf("pw_conv time: %.3lf sec\n", pw_conv_time); printf("dense time: %.3lf sec\n", dense_time); output_predictions(results, only_convolution, 1024, 1); } // free_memory(); fclose(file_list); return 0; }
/* Pretrained MobileNet Convolutional Neural Network in C language and OpenMP API GitHUB Page: https://github.com/jcanore/vgg16 Author: ZFTurbo/jocare Compilation: gcc -O3 MobileNet_CPU_cifar.c -lm -fopenmp -o MobileNet_CPU_cifar Usage: MobileNet_CPU_cifar <weights_path> <file_with_list_of_images> <output file> <output convolution features (optional)> Example: MobileNet_CPU_cifar ../../weights/weights.txt" ../../img/image_list.txt results_imagenet_conv.txt 1 */ #include <ctype.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> double get_seconds(struct timeval tStart, struct timeval tEnd) { return ((tEnd.tv_sec - tStart.tv_sec) * 1000000 + tEnd.tv_usec - tStart.tv_usec) / 1.e6; } #define SIZE 224 #define CONV_SIZE 3 #define CONV_LEVELS 27 //#define _CRT_SECURE_NO_WARNINGS 1 // precompile variables // assure default values if nothing provided #ifndef SPARSE_CONVOLUTIONS #define SPARSE_CONVOLUTIONS 0 // default dense convolutions #endif // SPARSE_CONVOLUTIONS #ifndef FIRST_CONV_SPARSE #define FIRST_CONV_SPARSE 0 // this is almost never 1 #endif // FIRST_CONV_SPARSE #ifndef SPARSE_FULLY_CONNECTED #define SPARSE_FULLY_CONNECTED 0 // this is not implemented yet #endif // SPARSE_FULLY_CONNECTED #ifndef FISHER_PRUNING #define FISHER_PRUNING \ 0 // set for fisher pruning, all previous variables changed to dense #endif // FISHER_PRUNING #ifndef NUMBER_OF_THREADS #define NUMBER_OF_THREADS 1 // number of threads to run on //#define NUMBER_OF_THREADS omp_get_num_procs() - 1 #endif // NUMBER_OF_THREADS static double pw_conv_time = 0.0; static double dense_time = 0.0; /****************************************************************************************************************************/ int im_sizes[27] = {224, 224, 16, 16, 16, 16, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2}; int strides[26] = {1, 2, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1}; int mem_block_shape[3] = { 1024, 224, 224}; // allocate the absolute maximum amount of space we will need float ***block1; float ***block2; float *****wc; // weights convolution float ***wd; // weights dense float **bd; // biases dense float **batchnorm_weights; float **batchnorm_biases; float **batchnorm_means; // running mean and variance from training used to // estimate population statistics float **batchnorm_vars; int mem_block_dense_shape = { 1024 * 2 * 2}; // size of output from last convolutional layer float *mem_block1_dense; float *mem_block2_dense; #if SPARSE_CONVOLUTIONS // sparse conv csr_t ****wc_sparse; #endif // SPARSE_CONVOLUTIONS #if FISHER_PRUNING #define SPARSE_CONVOLUTIONS 0 // force dense convolutions /* // ORIGINAL FISHER EXPERIMENTS int cshape[27][4] = { { 224, 3, CONV_SIZE, CONV_SIZE }, { 224, 1, CONV_SIZE, CONV_SIZE }, { 43, 224, 1, 1 }, { 43, 1, CONV_SIZE, CONV_SIZE }, { 85, 43, 1, 1 }, { 85, 1, CONV_SIZE, CONV_SIZE }, { 70, 85, 1, 1 }, { 70, 1, CONV_SIZE, CONV_SIZE }, { 150, 70, 1, 1 }, { 150, 1, CONV_SIZE, CONV_SIZE }, { 69, 150, 1, 1 }, { 69, 1, CONV_SIZE, CONV_SIZE }, { 188, 69, 1, 1 }, { 188, 1, CONV_SIZE, CONV_SIZE }, { 72, 188, 1, 1 }, { 72, 1, CONV_SIZE, CONV_SIZE }, { 122, 72, 1, 1 }, { 122, 1, CONV_SIZE, CONV_SIZE }, { 106, 122, 1, 1 }, { 106, 1, CONV_SIZE, CONV_SIZE }, { 96, 106, 1, 1 }, { 96, 1, CONV_SIZE, CONV_SIZE }, { 81, 96, 1, 1 }, { 81, 1, CONV_SIZE, CONV_SIZE }, { 75, 81, 1, 1 }, { 75, 1, CONV_SIZE, CONV_SIZE }, { 100, 75, 1, 1 } }; int dshape[1][2]= { { 100, 10} }; */ // FIXED 90% ACCURACY EXPERIMENTS int cshape[27][4] = {{224, 3, CONV_SIZE, CONV_SIZE}, {224, 1, CONV_SIZE, CONV_SIZE}, {43, 224, 1, 1}, {43, 1, CONV_SIZE, CONV_SIZE}, {85, 43, 1, 1}, {85, 1, CONV_SIZE, CONV_SIZE}, {70, 85, 1, 1}, {70, 1, CONV_SIZE, CONV_SIZE}, {150, 70, 1, 1}, {150, 1, CONV_SIZE, CONV_SIZE}, {69, 150, 1, 1}, {69, 1, CONV_SIZE, CONV_SIZE}, {188, 69, 1, 1}, {188, 1, CONV_SIZE, CONV_SIZE}, {72, 188, 1, 1}, {72, 1, CONV_SIZE, CONV_SIZE}, {122, 72, 1, 1}, {122, 1, CONV_SIZE, CONV_SIZE}, {106, 122, 1, 1}, {106, 1, CONV_SIZE, CONV_SIZE}, {96, 106, 1, 1}, {96, 1, CONV_SIZE, CONV_SIZE}, {81, 96, 1, 1}, {81, 1, CONV_SIZE, CONV_SIZE}, {75, 81, 1, 1}, {75, 1, CONV_SIZE, CONV_SIZE}, {100, 75, 1, 1} }; int dshape[1][2] = {{100, 10}}; #else // PLAIN int cshape[27][4] = {{224, 3, CONV_SIZE, CONV_SIZE}, {224, 1, CONV_SIZE, CONV_SIZE}, {64, 224, 1, 1}, {64, 1, CONV_SIZE, CONV_SIZE}, {128, 64, 1, 1}, {128, 1, CONV_SIZE, CONV_SIZE}, {128, 128, 1, 1}, {128, 1, CONV_SIZE, CONV_SIZE}, {256, 128, 1, 1}, {256, 1, CONV_SIZE, CONV_SIZE}, {256, 256, 1, 1}, {256, 1, CONV_SIZE, CONV_SIZE}, {512, 256, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {1024, 512, 1, 1}, {1024, 1, CONV_SIZE, CONV_SIZE}, {1024, 1024, 1, 1}}; int dshape[1][2] = {{1024, 10}}; #endif // FISHER_PRUNING /****************************************************************************************************************************/ void reset_mem_block(float ***mem) { int i, j, k; for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { for (k = 0; k < mem_block_shape[2]; k++) { mem[i][j][k] = 0.0; } } } } /****************************************************************************************************************************/ void reset_mem_block_dense(float *mem) { int i; for (i = 0; i < mem_block_dense_shape; i++) { mem[i] = 0.0; } } /****************************************************************************************************************************/ void init_memory() { int i, j, k, l; int max_channels = 1024; int max_im_size = 224; block1 = malloc(max_channels * sizeof(float **)); block2 = malloc(max_channels * sizeof(float **)); // allocate block memory for (i = 0; i < max_channels; i++) { block1[i] = malloc(max_im_size * sizeof(float *)); block2[i] = malloc(max_im_size * sizeof(float *)); for (j = 0; j < max_im_size; j++) { block1[i][j] = malloc(max_im_size * sizeof(float)); block2[i][j] = malloc(max_im_size * sizeof(float)); } } #if SPARSE_CONVOLUTIONS wc_sparse = (csr_t ****)malloc(CONV_LEVELS * sizeof(csr_t ***)); for (l = 0; l < CONV_LEVELS; l++) { wc_sparse[l] = (csr_t ***)malloc(cshape[l][0] * sizeof(csr_t **)); for (i = 0; i < cshape[l][0]; i++) { wc_sparse[l][i] = (csr_t **)malloc(cshape[l][1] * sizeof(csr_t *)); } } // wc memory allocated below will be freed in read_weights if // SPARSE_CONVOLUTIONS #endif // SPARSE_CONVOLUTIONS wc = malloc(CONV_LEVELS * sizeof(float ****)); // allocate kernel memory for (l = 0; l < CONV_LEVELS; l++) { wc[l] = malloc(cshape[l][0] * sizeof(float ***)); for (i = 0; i < cshape[l][0]; i++) { wc[l][i] = malloc(cshape[l][1] * sizeof(float **)); for (j = 0; j < cshape[l][1]; j++) { wc[l][i][j] = malloc(cshape[l][2] * sizeof(float *)); for (k = 0; k < cshape[l][2]; k++) { wc[l][i][j][k] = malloc(cshape[l][3] * sizeof(float)); } } } } // allocate batchnorm memory batchnorm_weights = malloc(27 * sizeof(float *)); batchnorm_biases = malloc(27 * sizeof(float *)); batchnorm_means = malloc(27 * sizeof(float *)); batchnorm_vars = malloc(27 * sizeof(float *)); for (l = 0; l < CONV_LEVELS; l++) { batchnorm_weights[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_biases[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_means[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_vars[l] = malloc(cshape[l][0] * sizeof(float)); } wd = malloc(1 * sizeof(float **)); bd = malloc(1 * sizeof(float *)); for (l = 0; l < 1; l++) { wd[l] = malloc(dshape[l][0] * sizeof(float *)); for (i = 0; i < dshape[l][0]; i++) { wd[l][i] = malloc(dshape[l][1] * sizeof(float)); } bd[l] = malloc(dshape[l][1] * sizeof(float)); } // allocate dense memory mem_block1_dense = calloc(mem_block_dense_shape, sizeof(float)); mem_block2_dense = calloc(mem_block_dense_shape, sizeof(float)); } /****************************************************************************************************************************/ void free_memory() { int i, j, k, l; // Free convolution weights for (l = 0; l < CONV_LEVELS; l++) { #if SPARSE_CONVOLUTIONS for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { free(wc_sparse[l][i][j]); } free(wc_sparse[l][i]); } free(wc_sparse[l]); #else for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); #endif } // free(wc); // free(bc); #if SPARSE_CONVOLUTIONS free(wc_sparse); #else free(wc); #endif // SPARSE_CONVOLUTIONS // Free dense weights for (l = 0; l < 1; l++) { for (i = 0; i < dshape[l][0]; i++) { free(wd[l][i]); } free(wd[l]); free(bd[l]); } free(wd); free(bd); // Free memblocks for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { free(block1[i][j]); free(block2[i][j]); } free(block1[i]); free(block2[i]); } free(block1); free(block2); free(mem_block1_dense); free(mem_block2_dense); } /****************************************************************************************************************************/ void read_weights(char *in_file, int lvls) { float dval; int i, j, k, l, m, z; FILE *iin; int total_lvls_read = 0; // printf("\nin_file es: %s\n\n", in_file); iin = fopen(in_file, "r"); if (iin == NULL) { printf("Weights file %s absent\n", in_file); exit(1); } // Reading convolution weights (store them flipped from begining) // no biases for (l = 0; l < CONV_LEVELS; l++) { printf("Read conv block %d weights\n", l); for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { for (m = 0; m < cshape[l][3]; m++) { fscanf(iin, "%f", &dval); wc[l][i][j][k][m] = dval; } } } } total_lvls_read += 1; } for (z = 0; z < CONV_LEVELS; z++) { // batchnorm weights and biases printf("Read batchnorm block %d weights\n", z); for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); batchnorm_weights[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_biases[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_means[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_vars[z][i] = dval; } } if (total_lvls_read >= lvls && lvls != -1) return; // Reading dense weights int num_dense_layers = 1; for (z = 0; z < num_dense_layers; z++) { printf("Read dense block %d weights\n", z); for (i = 0; i < dshape[z][0]; i++) { for (j = 0; j < dshape[z][1]; j++) { fscanf(iin, "%f", &dval); // printf("weight: %i : %f \n", i, dval); wd[z][i][j] = dval; } } for (i = 0; i < dshape[z][1]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); bd[z][i] = dval; } } fclose(iin); /////////////**************** SPARSE ************///////////////////////////// #if SPARSE_CONVOLUTIONS // convert to sparse format for (l = 0; l < CONV_LEVELS; l++) for (i = 0; i < cshape[l][0]; i++) for (j = 0; j < cshape[l][1]; j++) { // printf("going for %d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, // cshape[l][1]); csr_t *a = dense2csr2(cshape[l][2], cshape[l][3], wc[l][i][j]); // print_csr(a); wc_sparse[l][i][j] = a; // printf("done..%d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, // cshape[l][1]); } // Free convolution weights #if FIRST_CONV_SPARSE == 0 l = 0; // allocate new memory for first conv and copy from wc float *****wc_first_conv = (float *****)malloc(1 * sizeof(float ****)); wc_first_conv[l] = (float ****)malloc(cshape[l][0] * sizeof(float ***)); int k1, k2; for (i = 0; i < cshape[l][0]; i++) { wc_first_conv[l][i] = (float ***)malloc(cshape[l][1] * sizeof(float **)); for (j = 0; j < cshape[l][1]; j++) { wc_first_conv[l][i][j] = (float **)malloc(cshape[l][2] * sizeof(float *)); for (k1 = 0; k1 < cshape[l][2]; k1++) { wc_first_conv[l][i][j][k1] = (float *)malloc(cshape[l][3] * sizeof(float)); for (k2 = 0; k2 < cshape[l][3]; k2++) wc_first_conv[l][i][j][k1][k2] = wc[l][i][j][k1][k2]; } } } #endif // FIRST_CONV_SPARSE == 0 // free up all dense conv layer representation for (l = 0; l < CONV_LEVELS; l++) { for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); } free(wc); #if FIRST_CONV_SPARSE == 0 // replace old wc pointer with the data for only first conv layer created // above wc = wc_first_conv; #endif // FIRST_CONV_SPARSE == 0 #endif // SPARSE_CONVOLUTIONS } /****************************************************************************************************************************/ void read_image(char *in_file) { int i, j, l; FILE *iin; float dval; iin = fopen(in_file, "r"); if (iin == NULL) { printf("Image file %s absent\n", in_file); exit(1); } /* Reading image */ for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { for (l = 0; l < 3; l++) { fscanf(iin, "%f", &dval); block1[l][i][j] = dval; } } } } /****************************************************************************************************************************/ void convolution_3_x_3(float **matrix, float **kernel, float **out, int size, int stride) { int i, j; float sum; float zeropad[size + 2][size + 2]; memset(zeropad, 0, ((size + 2) * (size + 2) * sizeof(float))); // jack for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } for (i = 0; i < size; i = i + stride) { for (j = 0; j < size; j = j + stride) { sum = zeropad[i][j] * kernel[0][0] + zeropad[i][j + 1] * kernel[0][1] + zeropad[i][j + 2] * kernel[0][2] + zeropad[i + 1][j] * kernel[1][0] + zeropad[i + 1][j + 1] * kernel[1][1] + zeropad[i + 1][j + 2] * kernel[1][2] + zeropad[i + 2][j] * kernel[2][0] + zeropad[i + 2][j + 1] * kernel[2][1] + zeropad[i + 2][j + 2] * kernel[2][2]; out[i][j] += sum; } } } /****************************************************************************************************************************/ /****************************************************************************************************************************/ void pointwise_convolution(float ****point_kernel, float ***block2, float ***block1, int input_channels, int output_channels, int image_size) { struct timeval start, end; gettimeofday(&start, NULL); int i, j, k, l; float sum; for (i = 0; i < output_channels; i++) { for (j = 0; j < image_size; j++) { for (k = 0; k < image_size; k++) { sum = 0.; for (l = 0; l < input_channels; l++) { sum += block2[l][j][k] * point_kernel[i][l][0] [0]; // 0 because they are always 1x1 filters } block1[i][j][k] = sum; } } } gettimeofday(&end, NULL); pw_conv_time += get_seconds(start, end); } /****************************************************************************************************************************/ void batchnorm_and_relu(float ***in, float ***out, float *weights, float *bias, float *mean, float *var, int num_channels, int image_size) { int channel, i, j; // ((x - mean) * invstd) * w + b #pragma omp parallel for private(channel, i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (channel = 0; channel < num_channels; channel++) { float invstd = 1. / sqrt(var[channel] + 0.000001); for (i = 0; i < image_size; i++) { for (j = 0; j < image_size; j++) { out[channel][i][j] = (weights[channel] * invstd) * in[channel][i][j] + (bias[channel] - ((weights[channel] * mean[channel]) * invstd)); // out[channel][i][j] = ((in[channel][i][j] - mean[channel]) * invstd) * // weights[channel] + bias[channel]; if (out[channel][i][j] < 0.f) out[channel][i][j] = 0.f; } } } } /****************************************************************************************************************************/ void depthwise_convolution(float ***block1, float ***block2, float ****depth_kernel, float ****point_kernel, int level) { int i, j; int input_channels = cshape[level][0]; int output_channels = cshape[level + 1][0]; // printf("level %i: %i ==> %i\n", level, input_channels, output_channels); #pragma omp parallel for private(i) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < input_channels; i++) { #if SPARSE_CONVOLUTIONS convolution_3_x_3_sparse(block1[i], wc_sparse[level][i][0], block2[i], im_sizes[level], strides[level]); #else convolution_3_x_3(block1[i], depth_kernel[i][0], block2[i], im_sizes[level], strides[level]); #endif } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], input_channels, im_sizes[level + 1]); reset_mem_block(block2); level++; // now do linear combination of the elements in output and write them back // into the first memory block #if SPARSE_CONVOLUTIONS #pragma omp parallel for private(i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < output_channels; i++) { for (j = 0; j < input_channels; j++) { pointwise_convolution_sparse(block2[j], wc_sparse[level][i][j], block1[j], im_sizes[level]); } } #else pointwise_convolution(point_kernel, block1, block2, input_channels, output_channels, im_sizes[level]); #endif batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], output_channels, im_sizes[level + 1]); reset_mem_block(block2); } /****************************************************************************************************************************/ void add_bias_and_relu_flatten(float *out, float *bs, int size, int relu) { int i; for (i = 0; i < size; i++) { out[i] += bs[i]; // printf("%f\n", out[i]); if (relu == 1) { if (out[i] < 0) out[i] = 0.f; } } } /****************************************************************************************************************************/ void flatten(float ***in, float *out, int sh0, int sh1, int sh2) { int i, j, k, total = 0; for (i = 0; i < sh0; i++) { for (j = 0; j < sh1; j++) { for (k = 0; k < sh2; k++) { out[total] = in[i][j][k]; total += 1; } } } } /****************************************************************************************************************************/ void dense(float *in, float **weights, float *out, int sh_in, int sh_out) { struct timeval start, end; gettimeofday(&start, NULL); int i, j; for (i = 0; i < sh_out; i++) { float sum = 0.0; for (j = 0; j < sh_in; j++) { sum += in[j] * weights[j][i]; } out[i] = sum; } gettimeofday(&end, NULL); dense_time += get_seconds(start, end); } /****************************************************************************************************************************/ void write_out_block(int layer, float ***block) { int layer_name = layer; // * 2 - 1; char filename[16]; sprintf(filename, "outputs/output%d", layer_name); FILE *f = fopen(filename, "w"); if (f == NULL) { printf("Error opening file!\n"); exit(1); } for (int i = 0; i < 224; i++) { for (int j = 0; j < mem_block_shape[1]; j++) { for (int k = 0; k < mem_block_shape[2]; k++) { fprintf(f, "%f \n", block[i][j][k]); } } } fclose(f); } /****************************************************************************************************************************/ void write_out_layer(int layer) { int layer_name = layer; // * 2 - 1; char filename[7]; sprintf(filename, "layer%d", layer_name); FILE *f = fopen(filename, "w"); int depth = 1; if (f == NULL) { printf("Error opening file!\n"); exit(1); } for (int o = 0; o < cshape[layer][0]; o++) { for (int i = 0; i < cshape[layer][1]; i++) { for (int k_h = 0; k_h < cshape[layer][2]; k_h++) { for (int k_w = 0; k_w < cshape[layer][3]; k_w++) { fprintf(f, "%f ", wc[layer][o][i][k_h][k_w]); } } fprintf(f, "\n"); } } fclose(f); layer_name = layer + 1; char filename2[7]; sprintf(filename2, "layer%d", layer_name); // get batchnorms FILE *f2 = fopen(filename2, "w"); if (f2 == NULL) { printf("Error opening file!\n"); exit(1); } for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_weights[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_biases[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_means[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_vars[layer][i]); } fclose(f); } /****************************************************************************************************************************/ void output_predictions(FILE *out, int only_convolution, int size, int cur_size) { int i; int c = 0; if (only_convolution == 1) { // for (i = 0; i < 512*7*7; i++) { for (i = 0; i < size * cur_size * cur_size; i++) { fprintf(out, "%g\n", mem_block1_dense[i]); } } else { double maximum = -1; // dshape[0][1] ==> 10 for (i = 0; i < dshape[0][1]; i++) { fprintf(out, "%g\n", mem_block2_dense[i]); if (mem_block1_dense[i] > maximum) { maximum = mem_block2_dense[i]; c = i + 1; } } fprintf(out, "\n"); printf("This image depicts class: %d\n", c); } } /****************************************************************************************************************************/ void get_mobilenet_predict() { int level = 0; int i, j; // normal convolution #pragma omp parallel for private(i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { #if FIRST_CONV_SPARSE convolution_3_x_3_sparse(block1[j], wc_sparse[level][i][j], block2[i], im_sizes[level], 1); #else convolution_3_x_3(block1[j], wc[level][i][j], block2[i], im_sizes[level], 1); #endif } } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], 224, 224); reset_mem_block(block2); // depthwise convolutions for (level = 1; level < (CONV_LEVELS - 1); level = level + 2) { depthwise_convolution(block1, block2, wc[level], wc[level + 1], (level)); } // flatten flatten(block1, mem_block1_dense, cshape[level][0], im_sizes[level], im_sizes[level]); // dense level = 0; dense(mem_block1_dense, wd[level], mem_block2_dense, dshape[level][0], dshape[level][1]); add_bias_and_relu_flatten(mem_block2_dense, bd[level], dshape[level][1], 0); reset_mem_block_dense(mem_block1_dense); return; } /****************************************************************************************************************************/ char *trimwhitespace(char *str) { char *end; // Trim leading space while (isspace((unsigned char)*str)) str++; if (*str == 0) // All spaces? return str; // Trim trailing space end = str + strlen(str) - 1; while (end > str && isspace((unsigned char)*end)) end--; // Write new null terminator *(end + 1) = 0; return str; } /****************************************************************************************************************************/ int main(int argc, char *argv[]) { FILE *file_list, *results; char buf[1024]; struct timeval tStart, tEnd; double deltaTime; char *weights_file; char *image_list_file; char *output_file; int lvls = -1; int only_convolution = 0; //----------------------------------------------------------------------- printf("Using %d threads\n", NUMBER_OF_THREADS); if (argc != 4 && argc != 5) { printf( "Usage: <program.exe> <weights file> <images list file> <output file> " "<only_convolution [optional]>\n"); return 0; } weights_file = argv[1]; // printf("%s\n", weights_file); image_list_file = argv[2]; output_file = argv[3]; if (argc == 5) { lvls = 20; only_convolution = 1; } //----------------------------------------------------------------------- init_memory(); file_list = fopen(image_list_file, "r"); if (file_list == NULL) { printf("Check file list location: %s\n", image_list_file); return 1; } results = fopen(output_file, "w"); if (results == NULL) { printf("Couldn't open file for writing: %s\n", output_file); return 1; } gettimeofday(&tStart, NULL); read_weights(weights_file, lvls); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Reading weights: %.3lf sec\n", deltaTime); while (!feof(file_list)) { pw_conv_time = 0.0; dense_time = 0.0; fgets(buf, 1024, file_list); if (strlen(buf) == 0) { break; } // printf("%d\n", strlen(buf)); read_image(trimwhitespace(buf)); gettimeofday(&tStart, NULL); get_mobilenet_predict(); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Infer image %s: %.3lf sec\n", buf, deltaTime); printf("pw_conv time: %.3lf sec\n", pw_conv_time); printf("dense time: %.3lf sec\n", dense_time); output_predictions(results, only_convolution, 1024, 1); } // free_memory(); fclose(file_list); return 0; }
triad.c
/******************************************************************************* * Copyright 2021 UChicago Argonne, LLC. * (c.f. AUTHORS, LICENSE) * * This file is part of the NRM Benchmarks project. * For more info, see https://github.com/anlsys/nrm-benchmarks * * SPDX-License-Identifier: BSD-3-Clause ******************************************************************************/ #include "config.h" #include "nrm-benchmarks.h" #include <nrm.h> static double *a, *b, *c; static struct nrm_context *context; static struct nrm_scope *region_scope, **thread_scope; int main(int argc, char **argv) { /* configuration parameters: * - array size in number of double elements * - number of times to run through the benchmark */ size_t array_size; long int times; double scalar = 3.0; /* needed for performance measurement */ int64_t sumtime = 0, mintime = INT64_MAX, maxtime = 0; nrmb_time_t start, end; size_t memory_size; int num_threads; /* retrieve the size of the allocation and the number of time * to loop through the kernel. */ assert(argc == 3); array_size = strtoull(argv[1], NULL, 0); assert(!errno); times = strtol(argv[2], NULL, 0); assert(!errno); /* ensure that OpenMP is giving us the right number of threads */ #pragma omp parallel #pragma omp master num_threads = omp_get_num_threads(); int err = 0; #pragma omp parallel #pragma omp atomic err++; assert(num_threads == err); err = 0; /* allocate the arrays and initialize them. Note that we expect the * first-touch policy of Linux to result in the arrays being properly * balanced between threads/numa-nodes */ memory_size = array_size * sizeof(double); a = malloc(memory_size); b = malloc(memory_size); c = malloc(memory_size); #pragma omp parallel for for(size_t i = 0; i < array_size; i++) { a[i] = 1.0; b[i] = 2.0; c[i] = 0.0; } /* NRM Context init */ context = nrm_ctxt_create(); nrm_init(context, argv[0], 0, 0); /* one run of the benchmark for free, warms up the memory */ #pragma omp parallel for for(size_t i = 0; i < array_size; i++) c[i] = a[i] + scalar*b[i]; /* this version of the benchmarks reports one progress each time it goes * through the entire array. */ /* Create scopes */ region_scope = nrm_scope_create(); thread_scope = malloc(num_threads*sizeof(nrm_scope_t*)); for (int i = 0; i < num_threads; i++) { thread_scope[i] = nrm_scope_create(); } /* Get master process scope */ nrm_scope_threadshared(region_scope); nrm_send_progress(context, 1, region_scope); for(long int iter = 0; iter < times; iter++) { int64_t time; nrmb_gettime(&start); /* the actual benchmark */ #pragma omp parallel for for(size_t i = 0; i < array_size; i++) { c[i] = a[i] + scalar*b[i]; /* Get scopes */ nrm_scope_threadshared(region_scope); nrm_scope_threadprivate(thread_scope[omp_get_thread_num()]); nrm_send_progress(context, 1, thread_scope[omp_get_thread_num()]); } nrmb_gettime(&end); nrm_send_progress(context, 1, region_scope); time = nrmb_timediff(&start, &end); sumtime += time; mintime = NRMB_MIN(time, mintime); maxtime = NRMB_MAX(time, maxtime); } nrm_fini(context); nrm_ctxt_delete(context); /* Delete scopes */ nrm_scope_delete(region_scope); for (int i = 0; i < num_threads; i++) { nrm_scope_delete(thread_scope[i]); } /* compute stats */ /* report the configuration and timings */ fprintf(stdout, "NRM Benchmarks: %s\n", argv[0]); fprintf(stdout, "Version: %s\n", PACKAGE_VERSION); fprintf(stdout, "Description: one progress per iteration, Triad benchmark\n"); fprintf(stdout, "Array size: %zu (elements).\n", array_size); fprintf(stdout, "Memory per array: %.1f MiB.\n", (double) memory_size /1024.0/1024.0); fprintf(stdout, "Kernel was executed: %ld times.\n", times); fprintf(stdout, "Number of threads: %d\n", num_threads); fprintf(stdout, "Time (s): avg: %11.6f min: %11.6f max: %11.6f\n", 1.0E-09 * sumtime/times, 1.0E-09 * mintime, 1.0E-09 * maxtime); fprintf(stdout, "Perf (MiB/s): avg: %12.6f best: %12.6f\n", (3.0E-06 * memory_size)/ (1.0E-09 * sumtime/times), (3.0E-06 * memory_size)/ (1.0E-09 * mintime)); /* validate the benchmark: minimum about of bits should be different. */ err = 0; for(size_t i = 0; i < array_size && err == 0; i++) err = err || !nrmb_check_double(7.0, c[i], 2); if(err) fprintf(stdout, "VALIDATION FAILED!!!!\n"); else fprintf(stdout, "VALIDATION PASSED!!!!\n"); return err; }
#include "config.h" #include "nrm-benchmarks.h" #include <nrm.h> static double *a, *b, *c; static struct nrm_context *context; static struct nrm_scope *region_scope, **thread_scope; int main(int argc, char **argv) { /* * configuration parameters: - array size in number of double elements - * number of times to run through the benchmark */ size_t array_size; long int times; double scalar = 3.0; /* needed for performance measurement */ int64_t sumtime = 0, mintime = INT64_MAX, maxtime = 0; nrmb_time_t start, end; size_t memory_size; int num_threads; /* * retrieve the size of the allocation and the number of time to loop * through the kernel. */ assert(argc == 3); array_size = strtoull(argv[1], NULL, 0); assert(!errno); times = strtol(argv[2], NULL, 0); assert(!errno); /* ensure that OpenMP is giving us the right number of threads */ num_threads = omp_get_num_threads(); int err = 0; err++; assert(num_threads == err); err = 0; /* * allocate the arrays and initialize them. Note that we expect the * first-touch policy of Linux to result in the arrays being properly * balanced between threads/numa-nodes */ memory_size = array_size * sizeof(double); a = malloc(memory_size); b = malloc(memory_size); c = malloc(memory_size); for (size_t i = 0; i < array_size; i++) { a[i] = 1.0; b[i] = 2.0; c[i] = 0.0; } /* NRM Context init */ context = nrm_ctxt_create(); nrm_init(context, argv[0], 0, 0); /* one run of the benchmark for free, warms up the memory */ for (size_t i = 0; i < array_size; i++) c[i] = a[i] + scalar * b[i]; /* * this version of the benchmarks reports one progress each time it goes * through the entire array. */ /* Create scopes */ region_scope = nrm_scope_create(); thread_scope = malloc(num_threads * sizeof(nrm_scope_t *)); for (int i = 0; i < num_threads; i++) { thread_scope[i] = nrm_scope_create(); } /* Get master process scope */ nrm_scope_threadshared(region_scope); nrm_send_progress(context, 1, region_scope); for (long int iter = 0; iter < times; iter++) { int64_t time; nrmb_gettime(&start); /* the actual benchmark */ for (size_t i = 0; i < array_size; i++) { c[i] = a[i] + scalar * b[i]; /* Get scopes */ nrm_scope_threadshared(region_scope); nrm_scope_threadprivate(thread_scope[omp_get_thread_num()]); nrm_send_progress(context, 1, thread_scope[omp_get_thread_num()]); } nrmb_gettime(&end); nrm_send_progress(context, 1, region_scope); time = nrmb_timediff(&start, &end); sumtime += time; mintime = NRMB_MIN(time, mintime); maxtime = NRMB_MAX(time, maxtime); } nrm_fini(context); nrm_ctxt_delete(context); /* Delete scopes */ nrm_scope_delete(region_scope); for (int i = 0; i < num_threads; i++) { nrm_scope_delete(thread_scope[i]); } /* compute stats */ /* report the configuration and timings */ fprintf(stdout, "NRM Benchmarks: %s\n", argv[0]); fprintf(stdout, "Version: %s\n", PACKAGE_VERSION); fprintf(stdout, "Description: one progress per iteration, Triad benchmark\n"); fprintf(stdout, "Array size: %zu (elements).\n", array_size); fprintf(stdout, "Memory per array: %.1f MiB.\n", (double)memory_size / 1024.0 / 1024.0); fprintf(stdout, "Kernel was executed: %ld times.\n", times); fprintf(stdout, "Number of threads: %d\n", num_threads); fprintf(stdout, "Time (s): avg: %11.6f min: %11.6f max: %11.6f\n", 1.0E-09 * sumtime / times, 1.0E-09 * mintime, 1.0E-09 * maxtime); fprintf(stdout, "Perf (MiB/s): avg: %12.6f best: %12.6f\n", (3.0E-06 * memory_size) / (1.0E-09 * sumtime / times), (3.0E-06 * memory_size) / (1.0E-09 * mintime)); /* validate the benchmark: minimum about of bits should be different. */ err = 0; for (size_t i = 0; i < array_size && err == 0; i++) err = err || !nrmb_check_double(7.0, c[i], 2); if (err) fprintf(stdout, "VALIDATION FAILED!!!!\n"); else fprintf(stdout, "VALIDATION PASSED!!!!\n"); return err; }
#include "config.h" #include "nrm-benchmarks.h" #include <nrm.h> static double *a, *b, *c; static struct nrm_context *context; static struct nrm_scope *region_scope, **thread_scope; int main(int argc, char **argv) { /* * configuration parameters: - array size in number of double elements - * number of times to run through the benchmark */ size_t array_size; long int times; double scalar = 3.0; /* needed for performance measurement */ int64_t sumtime = 0, mintime = INT64_MAX, maxtime = 0; nrmb_time_t start, end; size_t memory_size; int num_threads; /* * retrieve the size of the allocation and the number of time to loop * through the kernel. */ assert(argc == 3); array_size = strtoull(argv[1], NULL, 0); assert(!errno); times = strtol(argv[2], NULL, 0); assert(!errno); /* ensure that OpenMP is giving us the right number of threads */ #pragma omp parallel #pragma omp master num_threads = omp_get_num_threads(); int err = 0; #pragma omp parallel #pragma omp atomic err++; assert(num_threads == err); err = 0; /* * allocate the arrays and initialize them. Note that we expect the * first-touch policy of Linux to result in the arrays being properly * balanced between threads/numa-nodes */ memory_size = array_size * sizeof(double); a = malloc(memory_size); b = malloc(memory_size); c = malloc(memory_size); #pragma omp parallel for for (size_t i = 0; i < array_size; i++) { a[i] = 1.0; b[i] = 2.0; c[i] = 0.0; } /* NRM Context init */ context = nrm_ctxt_create(); nrm_init(context, argv[0], 0, 0); /* one run of the benchmark for free, warms up the memory */ #pragma omp parallel for for (size_t i = 0; i < array_size; i++) c[i] = a[i] + scalar * b[i]; /* * this version of the benchmarks reports one progress each time it goes * through the entire array. */ /* Create scopes */ region_scope = nrm_scope_create(); thread_scope = malloc(num_threads * sizeof(nrm_scope_t *)); for (int i = 0; i < num_threads; i++) { thread_scope[i] = nrm_scope_create(); } /* Get master process scope */ nrm_scope_threadshared(region_scope); nrm_send_progress(context, 1, region_scope); for (long int iter = 0; iter < times; iter++) { int64_t time; nrmb_gettime(&start); /* the actual benchmark */ #pragma omp parallel for for (size_t i = 0; i < array_size; i++) { c[i] = a[i] + scalar * b[i]; /* Get scopes */ nrm_scope_threadshared(region_scope); nrm_scope_threadprivate(thread_scope[omp_get_thread_num()]); nrm_send_progress(context, 1, thread_scope[omp_get_thread_num()]); } nrmb_gettime(&end); nrm_send_progress(context, 1, region_scope); time = nrmb_timediff(&start, &end); sumtime += time; mintime = NRMB_MIN(time, mintime); maxtime = NRMB_MAX(time, maxtime); } nrm_fini(context); nrm_ctxt_delete(context); /* Delete scopes */ nrm_scope_delete(region_scope); for (int i = 0; i < num_threads; i++) { nrm_scope_delete(thread_scope[i]); } /* compute stats */ /* report the configuration and timings */ fprintf(stdout, "NRM Benchmarks: %s\n", argv[0]); fprintf(stdout, "Version: %s\n", PACKAGE_VERSION); fprintf(stdout, "Description: one progress per iteration, Triad benchmark\n"); fprintf(stdout, "Array size: %zu (elements).\n", array_size); fprintf(stdout, "Memory per array: %.1f MiB.\n", (double)memory_size / 1024.0 / 1024.0); fprintf(stdout, "Kernel was executed: %ld times.\n", times); fprintf(stdout, "Number of threads: %d\n", num_threads); fprintf(stdout, "Time (s): avg: %11.6f min: %11.6f max: %11.6f\n", 1.0E-09 * sumtime / times, 1.0E-09 * mintime, 1.0E-09 * maxtime); fprintf(stdout, "Perf (MiB/s): avg: %12.6f best: %12.6f\n", (3.0E-06 * memory_size) / (1.0E-09 * sumtime / times), (3.0E-06 * memory_size) / (1.0E-09 * mintime)); /* validate the benchmark: minimum about of bits should be different. */ err = 0; for (size_t i = 0; i < array_size && err == 0; i++) err = err || !nrmb_check_double(7.0, c[i], 2); if (err) fprintf(stdout, "VALIDATION FAILED!!!!\n"); else fprintf(stdout, "VALIDATION PASSED!!!!\n"); return err; }
GB_binop__ge_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_int16) // A.*B function (eWiseMult): GB (_AemultB_08__ge_int16) // A.*B function (eWiseMult): GB (_AemultB_02__ge_int16) // A.*B function (eWiseMult): GB (_AemultB_04__ge_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_int16) // A*D function (colscale): GB (_AxD__ge_int16) // D*A function (rowscale): GB (_DxB__ge_int16) // C+=B function (dense accum): GB (_Cdense_accumB__ge_int16) // C+=b function (dense accum): GB (_Cdense_accumb__ge_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_int16) // C=scalar+B GB (_bind1st__ge_int16) // C=scalar+B' GB (_bind1st_tran__ge_int16) // C=A+scalar GB (_bind2nd__ge_int16) // C=A'+scalar GB (_bind2nd_tran__ge_int16) // C type: bool // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_INT16 || GxB_NO_GE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ge_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ge_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ge_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_int16) // A.*B function (eWiseMult): GB (_AemultB_08__ge_int16) // A.*B function (eWiseMult): GB (_AemultB_02__ge_int16) // A.*B function (eWiseMult): GB (_AemultB_04__ge_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_int16) // A*D function (colscale): GB (_AxD__ge_int16) // D*A function (rowscale): GB (_DxB__ge_int16) // C+=B function (dense accum): GB (_Cdense_accumB__ge_int16) // C+=b function (dense accum): GB (_Cdense_accumb__ge_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_int16) // C=scalar+B GB (_bind1st__ge_int16) // C=scalar+B' GB (_bind1st_tran__ge_int16) // C=A+scalar GB (_bind2nd__ge_int16) // C=A'+scalar GB (_bind2nd_tran__ge_int16) // C type: bool // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_INT16 || GxB_NO_GE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ge_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ge_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ge_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_int16) // A.*B function (eWiseMult): GB (_AemultB_08__ge_int16) // A.*B function (eWiseMult): GB (_AemultB_02__ge_int16) // A.*B function (eWiseMult): GB (_AemultB_04__ge_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_int16) // A*D function (colscale): GB (_AxD__ge_int16) // D*A function (rowscale): GB (_DxB__ge_int16) // C+=B function (dense accum): GB (_Cdense_accumB__ge_int16) // C+=b function (dense accum): GB (_Cdense_accumb__ge_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_int16) // C=scalar+B GB (_bind1st__ge_int16) // C=scalar+B' GB (_bind1st_tran__ge_int16) // C=A+scalar GB (_bind2nd__ge_int16) // C=A'+scalar GB (_bind2nd_tran__ge_int16) // C type: bool // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_INT16 || GxB_NO_GE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ge_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ge_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ge_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sc35_fixedRepulse.c
/*MODIFICATIONS: when wl=3 .. rotation of 0th particle it is allowed to move pokousime se odstranit bug ze castice pri wl=1 se obcas zasekne je to kvuli pohybu center of mass zrejme je chybou tim ze kvuli akumulaci numerickych chyb se cas od casu prepocita centrum hmotnosti systemu u toho se pouziji periodicke okrajove podminky, ktere to cele mohou zkazit protoze castice co odletela do druheho boxu pri navratu od primarniho boxu (-z/2 do z/2) posune stred hmostnosti testujeme - odstranili jsme prepocet stredu hmotnosti z obav z numerickych chyb vypis "F I N I S H E D" do stdout po splneni wl podminek */ /*TODO in future s - Non-equilibrium candidate moves - check scaling of particles of different sizes - should scale with contact area! - cell list - divide simulation box in cells where particles interact with each other and outside is definitely 0 - safe time better scaling with system size, possibly long spherovylinders could be in several celles to keep good scaling - better cluster algorithm - put in wang-landau - cluster list work for spherocylinders only now */ /*------------------------------------------------------------------------------ Version 3.5 - linear bond at spherocylinders, where second spherocilinder is harmonicaly attached to a point that is in distance of bondlength from the first spherocylinder and it follows the direction of spherocylinder - bonded particles belong to the same cluster - print energy at statistical reports - have particles of different lengths - interaction scaling back to v1+v2 (no addition of 1.0) - more physical */ /*------------------------------------------------------------------------------ Version 3.4 - New handling of the option file - reaction coordinate radius around z axis for a pore calculations - reaction coordinate as number of particles in contact (defined by distance of CMs) - 2D Wang-Landau method - New Wang-Landau coordinate - radius pore in vesicle around begining of xy plane - New models TPSC, TCPSC, TCHPSC, TCHCPSC- models with two patches note that switch function on sides of patch are linear in cos angle not in angle as a results two patches with overlaping sides do not compensate easily to a flat profile - FIX chirality was doubled (angle twice as large) - Added posibility of exluded interactions [EXCLUDE] in topology file - MPI replica exchange with different temperatures and pressure (paraltemp paralpress) input configuration is #{number of process}config.init, if it does not exist config.init is used each replica is with different random seed = seed+mpirank - config.init can look like movie snapshot - MPI exchange with Wang-Landau - added angular interaction between neighboring spherocylinders (in chain) angle1 is angle between sc directions and angle2 ins angle between the patches */ /*------------------------------------------------------------------------------- Version 3.3 -external potantial can be added as a part of topology - it can be hard or attractive wall */ /** * Changes made by Noah S. Bieler and Robert Vacha: * * New version 3.2 * * - The length has now to be specified in the topology file, but they are not * allowed to differ from each other. The option file shall no longer contain * a length option. * - The particles can now switch their type based on the chemical potential * delta_mu (= energy difference from state 2 to state 1). * - For that a new option was introduced: Average attempts per sweep to switch * a type. * - A lot of variables are now combined in either topo, sim or conf. The rule * should be: * > topo: Everything that belongs to the topology and that should not change * during the game. * > sim: Options and stuff, that has to do with the simulation. (Maybe the * current target and so should be saved in there as well) * > conf: What changes every step concerning the particles and the box or * in other words: what has been read from conf.init * - added a cluster determing routine => sim->clusterlist + sim->clusters * - added macros for TRUE and FALSE * - Added Option for the random seed * - Basic Neighbour list implemented * - New types: chiral CPSC (CHCPSC) and chiral PSC (CHPSC) and their interactions */ /*-------------------------------------------------------------------------------- sc31.c Patchy Spherocylinder Version 3.1 Wang-Landau method of free energy calculations It is set in options file as: O = none, 1 = z-distance of 1st paticle from system CM, 2 = hole in xyplane of SCA = membrane hole It reads a file wl.dat and write wl-new at the end. There is value of alpha at the first line and then there are three columns: 1- order parameter, 2- weights, 3- histogram Interaction of spherocylinders is scaled based on the volume of attractive patch, the unit of one is that two spheres of diameter sigma =1.0 are attracting each other by 1.0. Using this in interaction among lipids and spherocylinders should be consistent. Start up configuration "config.init" file has a box size at the first line now. (I tested performance: compilation with optimization -O2 speed up 10% rest has negligible effect including usage of static arrays instead of dynamic most of the time consumes paire function. 6,519,638,177 :simulate 6,492,411,300 :energyone 5,705,685,593 :paire 542,561,887 :bondenergy 489,463,361 :eattractive11 450,443,970 :image 115,126,519 :erepulsive */ /* -------------------------------------------------------------------------------- sc3.c Patchy Spherocylinder Version 3.0 Beads were added to the particle list. bead(10) - repulsive bead(11) - isotropocally attractive -It is necessary to provide also a topology file (top.init) -Particles are placed in chains according to the topology order including connections -Particle arryas are allocated dynamicly on heap now -dispacement and rotation are optimized for highest RMSD performace -NPT ensemble with isotropic and anisotropic couplings, in pressure moves all particles are rescaled with their center (chains are not rescaled with CM) 0 - anisotropic coupling, 1 - isotropic coupling, 2 - isotropic in xy z=const bead types and their interactions repulsive(10) purely repulsive shpere with WCA potential on closest distance parameters: Patch repulsion sigma - defined where repulsion reaches zero isotropic(11) - isotropic cos^2 potential is acting isotropicaly dependent only on closest distance between obejcts. Parameters: distance of attractivity (should be at least sigma*2^(1/6)) defines how far is attraction constant -e. After this distance follows switch length on which attraction goes to zero as cos^2. Rest as repulsive model. sc2.c Patchy Spherocylinder Version 2.0 It is possible to make chains of spherocylinders that are connected through hemispherical caps by harmonic bond. There are two parameters eq distance and strength of harmonic spring, note that units are in 1 kT/e, the MC strength of bond is changing with parameter temperature.. Patchy Spherocylinder Version 1.0 Includes diffferent types of possible interactions: repulsive(0) - purely repulsive spherocylinder with WCA potential on closest distance. parameters: Patch repulsion sigma - defined where repulsion reaches zero. isotropic(1) - isotropic cos^2 potential is acting isotropicaly dependent only on closest distance between spherocylinders. Parameters: distance of patch, Interaction distance of patch (should be at least sigma*2^(1/6)) defines how far is attraction constant -e. After this distance follows Switch length on which attraction goes to zero as cos^2. Rest as repulsive model. patchy(2) - Attractive potential in limited to an angular wedge on spherocylinder. Patch goes all the way through, making also hemispherical caps on end attractive. Parameters:Anglular part has a parameter defining it size "Angular size of patch (degrees)" and witdh of switch function "Angular switch off of patch (degrees)" on which attraction reaches zero - it is a linear function. Rest as isotropic model. cylindrical(3) - Attractive potential in limited to an angular wedge on cylindrical part of spherocylinders. The hemispherical caps on ends are repulsive. Rest as patchy model. Note particles are inside numbered from 0, there is prealocated size of particles MAXN because in future there can be grand canonical ensamble and number of particles may vary Follows mc of hard wall spherocylinder version 7 by Mark Miller -description below sc.c Version 1 Performs basic constant volume MC simulation of hard spherocylinders with rigid cuboidal boundary conditions. Run parameters are read in from the file "options". The template for this file appears at the end of the code. The values must be inserted before the colons. The initial configuration is read from the file "config.init". The first line contain size of box The format for the file is nine columns: three for the positions and three for the direction vector and three for direction of pathc. The direction vectors are normalised after being read in. The configuration is checked for particle overlaps. The unit of length is taken as the spherocylinder diameter. Hence the ratio L/D is equal to the length of the cylinder. Order parameters for nematic and smectic order are evaluated. The nematic order parameter is related to the coefficient of the quadratic term in the Legendre expansion of the orientational distribution function. Any smectic order is assumed to be directed along the z axis, and is detected by the coefficients of the Fourier expansion of the position distribution function. MM 12.vii.01 .................................................................................. Version 2 The aspect ratio of the box may now fluctuate, keeping the volume constant. Two new parameters are required in the options file to specify the average number of attempted shape changes per sweep, and the initial maximum trial change in a box dimension. Shape changes are made by picking one of the three box lengths at random, making a random change, evenly distributed between plus and minus a finite interval, choosing a second direction and doing the same, then determining the new length in the remaining direction from the condition of constant volume. The step-size equilibration period is now split into three parts: displacement, rotation, and shape change. The most important change to the code is that the particle coordinates are now stored as fractions of the box dimensions. However, input and output configurations are still communicated in units of the cylinder diameter, D=1. Note that the displacement maximum step size is now specified as a fraction of the box length, not as an absolute distance. MM 18.vii.01 .................................................................................. Version 3 Constant pressure MC. The volume may fluctuate. Volume changes are attempted by altering just one box length at a time, chosen at random. The running average of the density is calculated and reported. MM 24.vii.01 .................................................................................. Version 7 The composite translation-plus-rotation moves have been split into separate move types, each of which is attempted with equal probability. This enables acceptance ratios to be accumulated separately for these degrees of freedom, so that maximum step sizes can be adjusted more sensibly. A few other things have been tidied up, such as defining structures for the book-keeping of statistics and acceptance ratios. MM 9.v.02 --------------------------------------------------------------------------------*/ #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #ifdef MACOS # include "getline.h" #endif #ifdef MPI # include <mpi.h> #endif /* Macros for DEBUG messages */ #ifdef DEBUGGING_INIT #define DEBUG_INIT(...) fprintf(stderr, "DB in INIT: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); fflush(stderr); #else #define DEBUG_INIT(...) #endif #ifdef DEBUGGING_SIM #define DEBUG_SIM(...) fprintf(stderr, "DB in SIM: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); fflush(stderr); #else #define DEBUG_SIM(...) #endif #ifdef DEBUGGING #define DEBUG(...) fprintf(stderr, "DB: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); fflush(stderr); #else #define DEBUG(...) #endif /* End of DEBUG macros */ /* With pairlist ? */ #define WITH_PAIRLIST /* Boolean Macros */ #define BOOL int #define TRUE 1 #define FALSE 0 /* End of Boolean Macros */ #define MAXF 20 /* Maximum number of Fourier terms */ #define MAXN 14000 /* Maximum number of particles */ #define MAXCHL 10 /* Maximum length of chain */ #define ZEROTOL 1.0e-12 /* Dot products below ZEROTOL are deemed zero */ #define ZEROTOL2 1.0e-8 /* numbers below ZEROTOL are deemed zero */ #define PI 3.141592653589793238462643383279 /* pi */ #define PIH 1.57079632679489661923132169163975 /* pi half*/ /*Particle types*/ #define SC 10 /*spherocylinder*/ #define SCN SC+0 /*spherocylinder non-attractive*/ #define SCA SC+1 /*spherocylinder isotropicaly attractive*/ #define PSC SC+2 /*spherocylinder with patchy attraction*/ #define CPSC SC+3 /*spherocylinder with cylindrical patchy attraction*/ #define CHPSC SC+4 /* chiral psc */ #define CHCPSC SC+5 /* chiral cpsc */ #define TPSC SC+6 /*spherocylinder with two patches*/ #define TCPSC SC+7 /*spherocylinder with two cylindrical patches*/ #define TCHPSC SC+8 /* chiral 2psc */ #define TCHCPSC SC+9 /* chiral 2cpsc */ #define SP 30 /*sphere - should be over all apherocylinders*/ #define SPN SP+0 /* sphere non-attractive*/ #define SPA SP+1 /* spherocylinder isotropicaly attractive*/ #define MAXT 30 /* Maximum number of types we have*/ #define MAXMT 100 /* Maximum number of molecular types */ /*Reading topology*/ #define SMSTR 64 /* Small string length */ #define STRLEN 400 /* maximum length of line*/ #define CONTINUE '\\' /* symbol for line continue*/ #define COMMENTSIGN '#' /* symbol for comment*/ #define OPENKEY '[' /* starting sign for keyword*/ #define CLOSEKEY ']' /* ending sign for keyword*/ #define SEPARATOR ':' /* sign for separator*/ #define OPENMOL '{' /* starting sign for molecules*/ #define CLOSEMOL '}' /* ending sign for molecules*/ #define BOXSEP 'x' /* extraction of box*/ /* Wang Landau method */ #define WL_GERR 0.0001 /* Max roughnes in histogram */ #define WL_ALPHATOL 0.0000001 /* Covergence crietria for detailed balance */ #define WL_MINHIST 1000 /* Minimum histogram sampling for considering roughness */ #define WL_ZERO 0.000000000000 /* Zero for histogram with current weights*/ #define WL_CONTACTS 36.0 /* Square distance under which are particles in contact */ /* Math */ #define DOT(a,b) ((a).x * (b).x + (a).y * (b).y + (a).z * (b).z) /* Dot product */ #define AVER(a,b) ((a+b)*0.5) /* Arithmetic average*/ #define ROUND(a) (a > 0.0) ? floor(a + 0.5) : ceil(a - 0.5); /* Round double*/ #define PMONE(a) (1 - 2 * a) /* Takes 1 or 0, return +-1 */ /* Acceptance ratio */ #define RATIO(a) ( ((a).acc+(a).rej) > 0 ? 1.0*(a).acc/((a).acc+(a).rej) : 0.0 ) #define INBOX(a,b) ( a > 0 ? modf(a,&b) : modf(a,&b)+1 ) /*................................................................ Structure definitions */ struct vector { /* Define a 3D vector structure */ double x; double y; double z; }; struct quat { /* Define a quaternion structure */ double w; double x; double y; double z; }; struct particles { /* Define a particle */ struct vector pos; /* Position vector */ struct vector dir; /* Unit direction vector of axis */ struct vector patchdir[2]; /* Vector defining orientation of patch */ struct vector patchsides[4]; /* Vector defining sides of patch */ struct vector chdir[2]; /* Direction for chirality - keep in memory to increase speed */ long chaint; /* Chain type*/ long chainn; /* Chain number*/ int type; /* Type of the particle */ int switchtype; /* With which kind of particle do you want to switch?*/ double delta_mu; /* Chemical potential for the switch */ int switched; /* 0: in initial stat; 1: in the switched stat */ }; struct ia_param{ /* Contatins properties and parameters of particle types */ char name[SMSTR]; /* The name of the particle type */ char other_name[SMSTR]; /* The name of the particle type */ int geotype[2]; /* The geometrical type: spherocylinder (0-repulsive, 1-isotropic, 2-patchy, 3-cylindrical) or sphere (0-repulsive, 1-isotropic) */ double sigma; /* Repulsion wca*/ double epsilon; /* Repulsion strength*/ double pdis; /* Interaction distance of patch */ double pswitch; /* Switch of distance of patch */ double pangl[4]; /* angular size of patch as was specifid in input */ double panglsw[4]; /* angular size of patchswitch as was specifid in input */ double pcangl[4]; /* cosine of half size angle - rotation from patch direction to side */ double pcanglsw[4]; /* cosine of half size angle plus switch - rotation from patch direction to side */ double rcut; /* Cutoff for attraction */ double rcutwca; /* Cutoff for repulsion*/ double pcoshalfi[4]; /* Cosine of half angle going to side of interaction */ double psinhalfi[4]; /* Sine of half angle going to side of interaction -useful for quaterion rotation */ double csecpatchrot[2]; /* Cosine of Rotation of second patches in 2psc models*/ double ssecpatchrot[2]; /* Sine of Rotation of second patches in 2psc models*/ double volume; /* Volume of particle for geometrical center calculations*/ double pvolscale; /* Scale of patch volume size*/ double len[2]; /* Length of the PSC */ double half_len[2]; /* Half length of the PSC */ double chiral_cos[2]; /* Coctains the cosinus for the chiral rotation of the patch */ double chiral_sin[2]; /* Contains the sinus for the chiral rotation of the patch */ }; struct interacts { /* Parameters pased to functions of interaction calculation */ double dist; /* closest distance */ struct vector distvec; /* vector of closes distance */ struct particles * part1; /* particle 1 */ struct particles * part2; /* particle 2 */ struct vector box; /* box size */ struct ia_param * param; /* interaction parameters */ struct vector r_cm; /* vector connecting center of masses */ double distcm; /* distance between center of masses */ double dotrcm; /* square size of r_cm*/ double contt; /* closest point on spherocylinder to sphere */ }; struct chainparams { /*Parameters for inner interaction in chains*/ double bond1eq; /* Equilibrium distance of harmonic bond between nearest neighbours*/ double bond1c; /* Spring constant for harmonic bond between nearest neighbours*/ double bond2eq; /* Equilibrium distance of harmonic bond between second nearest neighbours*/ double bond2c; /* Spring constant for harmonic bond between second nearest neighbours*/ double bonddeq; /* Equilibrium distance of directional harmonic bond between the nearest neighbours*/ double bonddc; /* Spring constant for directional harmonic bond between the nearest neighbours*/ double angle1eq; /* Equilibrium angle between two spherocylinders -neerest neighbours*/ double angle1c; /* Spring constant angle between two spherocylinders -nearest neighbours*/ double angle2eq; /* Equilibrium angle between two spherocylinder patches -nearest neighbours*/ double angle2c; /* Spring constant for angle between two spherocylinder patches -nearest neighbours*/ }; struct molecule { /* This structure is for io only */ char * name; /* The name of the molecule */ long * type; /* The type of the particle */ long * switchtype; /* The switchtype of the particle */ double * delta_mu; /* The chemical potential for the switch */ }; struct disp { /* Define step size and acceptance ratio statistics */ double mx; /* Maximum value displacement, cos(angle), etc. */ double angle; /* Maximum angle, since in .mx cos(angle) is saved */ long acc; /* Number of accepted steps */ long rej; /* Number of rejected steps */ double oldrmsd; /* Averaged mx value in previous equilibration round */ double oldmx; /* Change in mx in last equlibrium step */ }; struct stat { /* Define statistics counters */ double sum; double sum2; long samples; double mean; double rms; }; struct meshs { /* Mesh for hole order parameter */ int dim[2]; /* Mesh dimensions */ int *data; /* Mesh data */ int *tmp; /* tmpporary list for hole search */ }; struct wls { /* Wang landau method (wl) */ double *weights; /* Array of weights for wl method */ long *hist; /* Array of histogram for wl method */ long length[2]; /* Length of above arrays */ double dorder[2]; /* Increments of order parameter */ double minorder[2]; /* Minimum order parameter */ double alpha; /* Current modifier of weights */ long currorder[2]; /* Walue of current order parameter*/ long neworder[2]; /* wl order parameter in new step */ long max; /* wl maximum of histogram */ long min; /* wl minimum of histogram */ double wmin; /* weights minimum */ int wlmdim; /* Dimwnsionality of wang landau */ int wlmtype; /* Atom type for the Wang landau method (wl) */ double wl_meshsize; /* Size of mesh bin for hole order paremeter*/ struct meshs mesh; /* Mesh for hole order */ struct meshs origmesh; /* Mesh store for rejected moves */ long * radiushole; /* Array for hole radius around origin */ long * radiusholeold; /* Array for hole radius around origin-bigmove */ long radiusholemax; /* Size of array for hole radius*/ long partincontact; /* Number of particles in contact */ long partincontactold; /* Number of particles in contact - old for move*/ }; struct pairs{ /* The structure holding the particle numbers of the pairs and the number of pairs */ long num_pairs; /* The number of pairs */ long * pairs; /* The paritcle numbers of the paris */ }; struct pairlist{ /* I think, this is done too complicated: just sim->pairs[npart] should be enough */ struct pairs * list; /* contains the pairlist of all paritcles */ }; struct cluster{ /* contains all the particles of one cluster */ long npart; long * particles; }; struct exters{ BOOL exist; /* existence of external potential*/ double thickness; /* external wall thicnkess*/ double epsilon; /* depth of attraction*/ double attraction; /* distance of attraction*/ double sqmaxcut; /* distance when nothing can interact*/ struct ia_param interactions[MAXT]; /* Interaction parameters with particle types generated from above params*/ }; struct topo{ /* It would be nice, if this struct would contain all the topo stuff in the end*/ long * switchlist; /* List containing the number of all the particles with switchtypes */ long n_switch_part; /* number of particles with switchtype */ double sqmaxcut; /* square of distance over which even spherocylinders cannot interact (distance between CM) */ double maxcut; /* distance over which even spherocylinders cannot interact (distance between CM) */ long conlist[MAXN][4]; /* Connectivity list, we have connection to tail and head and secon neighbours so far*/ long chainlist[MAXN][MAXCHL]; /* List of chains*/ long chainnum; /* Number of chains */ struct chainparams chainparam[MAXMT]; /* parameters for chains */ struct ia_param ia_params[MAXT][MAXT]; /* parametrization of particles for all interations*/ long npart; /* Number of particles */ struct exters exter; /* external potential - wall */ }; struct sim{ /* Should contain mostly all the simulation options and variables, that can change in every step. */ double press; /* Pressure */ double paralpress; /* Parallel pressure for replica exachnge*/ double dpress; /* Pressure change for replica exchange*/ double shave; /* Average number of volume changes to attempt per sweep */ double shprob; /* Probability of attempting a volume change */ double chainprob; /* Average number of chain move attempt per sweep */ double switchprob; /* Average number of type switch attempt per sweep */ int pairlist_update; /* Number of sweep per upedating the pairlist */ double temper; /* Temperature*/ double paraltemper; /* Temperature for parallel tempering */ double dtemp; /* Temprature step */ int ptype; /* Type of pressure coupling*/ long adjust; /* Number of sweeps between step size adjustments */ long movie; /* Number of sweeps between movie frames */ long nequil; /* Number of equilibration sweeps */ long nsweeps; /* Number of production sweeps */ long paramfrq; /* Number of sweeps between order parameter samples */ long report; /* Number of sweeps between statistics reports */ // long terms; /* Number of Fourier terms as smectic order parameters */ long nrepchange; /* Number of sweeps between replica exchanges */ int wlm[2]; /* Wang landau method (wl) */ struct disp edge; /* Maximum box length change and statistics */ struct disp rot[MAXT]; /* Maximum rotation and statistics */ struct disp trans[MAXT]; /* Maximum translation and statistics*/ struct disp chainm[MAXMT]; /* Maximum translation for chain and statistics*/ struct disp chainr[MAXMT]; /* Maximum rotation for chain and statistics */ struct disp mpiexch; /* MPI statistics*/ struct pairs * pairlist; /* The pairlist */ long write_cluster; /* Number of sweeps per writing out cluster info */ long * clusterlist; /* clusterlist[i] = cluster index of particle i */ struct cluster * clusters; /* informations about the single clusters */ double *clustersenergy; /* list of energies of clusters*/ long num_cluster; /* number of single clusters */ long * clusterstat; /* Statistics about the seize of cluster */ long max_clust; /* maximal clustersize */ struct wls wl; /* Wang landau data */ int mpirank; /* MPI number for given process*/ int mpinprocs; /* MPI number of processes */ }; typedef enum { /* Holds the type of a variable in struct option */ Int, Int2, Long, Double } Type; typedef struct { /* for reading in the options */ char *id; /* The name of the value in the option file*/ Type type; /* The type (int, double or long) */ BOOL set; /* Wheter the variable has been set */ void *var; /* The variable */ } Option; struct conf{ /* Configuration of the system*/ struct particles * particle; /* All particles*/ struct vector box; /* Box size*/ double sysvolume; /* Something like total mass*/ struct vector syscm; /* System center of mass*/ }; struct filenames { /* input files */ char configurationinfile[30]; char topologyfile[30]; char optionsfile[30]; char wlinfile[30]; /* output files */ char configurationoutfile[30]; char moviefile[30]; char wloutfile[30]; char statfile[30]; char clusterfile[30]; char clusterstatfile[30]; char energyfile[30]; }; struct mpiexchangedata{ /* extra type for mpi communication*/ struct vector box; /* box of configuration */ double energy; /* energy of configuration */ double volume; /* volume of configuration */ int accepted; /* bool if accepted */ struct vector syscm; /* system CM of configuration */ long radiusholemax; /* size of array for WL*/ long wl_order[2]; /* wang-landau order parameter*/ }; #ifdef MPI MPI_Datatype MPI_vector, MPI_Particle, MPI_exchange; #endif const struct stat nullstat = {0.0, 0.0, 0, 0.0, 0.0}; long seed = 6; /* Seed for random number generator */ /*..............................................................................*/ int main(int argc, char **argv) { DEBUG("start"); FILE *outfile,*mov; /* Handle for writing configuration */ double (* intfce[MAXT][MAXT])(struct interacts *); /*array of interaction functions*/ struct topo topo; /* will maybe contain all the topo stuff in future */ struct sim sim; /* Should contain the simulation options. */ struct conf conf; /* Should contain fast changing particle and box(?) information */ struct filenames files; int memoryalloc(struct conf * conf); int memorydealloc(struct conf * conf, struct topo * topo, struct sim * sim); void read_options(struct sim* sim, char filename[30]); void init_top(struct topo *, struct conf * conf, struct sim * sim, char filename[30]); void init_config(struct topo * topo, struct conf * conf, struct sim * sim, char filename[30]); void init_intfce(double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo); void draw(FILE *, struct conf * conf, struct topo * topo); void printeqstat(struct disp *, double, int); void simulate(long nsweeps, long adjust, long paramfrq, long report, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct sim * sim, struct conf * conf, struct filenames *files); void init_pairlist(struct topo * topo, struct sim * sim); void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf); void print_pairlist(FILE * stream, struct sim * sim, struct topo * topo); int gen_clusterlist(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *) ); int print_clusterlist(FILE * stream, BOOL decor, struct topo * topo, struct sim * sim, struct conf * conf); int print_clusters(FILE * stream, BOOL decor, struct sim * sim); int print_clusterstat(FILE * stream, BOOL decor, struct sim * sim); int sort_clusterlist(struct topo * topo, struct sim * sim); printf ("\nPatchy Spherocylinders version 3.5 "); sprintf(files.configurationinfile, "config.init"); sprintf(files.configurationoutfile, "config.last"); sprintf(files.optionsfile, "options"); sprintf(files.topologyfile, "top.init"); sprintf(files.moviefile, "movie"); sprintf(files.wlinfile, "wl.dat"); sprintf(files.wloutfile, "wl-new.dat"); sprintf(files.statfile, "stat.dat"); sprintf(files.clusterfile, "cluster.dat"); sprintf(files.clusterstatfile, "cluster_stat.dat"); sprintf(files.energyfile, "energy.dat"); #ifdef MPI FILE *infile; printf(" MPI version"); MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD, &(sim.mpinprocs) ); MPI_Comm_rank(MPI_COMM_WORLD, &(sim.mpirank) ); sprintf(files.configurationoutfile, "%dconfig.last", sim.mpirank); sprintf(files.moviefile, "%dmovie", sim.mpirank); sprintf(files.wloutfile, "%dwl-new.dat", sim.mpirank); sprintf(files.clusterfile, "%dcluster.dat", sim.mpirank); sprintf(files.clusterstatfile, "%dcluster_stat.dat", sim.mpirank); sprintf(files.energyfile, "%denergy.dat", sim.mpirank); sprintf(files.statfile, "%dstat.dat", sim.mpirank); /*test if there is a specific input configuration for mpi run*/ sprintf(files.configurationinfile, "%dconfig.init", sim.mpirank); infile = fopen(files.configurationinfile, "r"); if (infile != NULL) fclose (infile); else sprintf(files.configurationinfile, "config.init"); /*test if there is a specific input wang-landau for mpi run*/ sprintf(files.wlinfile, "%dwl.dat", sim.mpirank); infile = fopen(files.wlinfile, "r"); if (infile != NULL) fclose (infile); else sprintf(files.wlinfile, "wl.dat"); #endif printf ("\n-------------------------------------\n"); printf ("Reading options...\n"); read_options(&sim,files.optionsfile); init_top(&topo, &conf, &sim,files.topologyfile); if (topo.chainnum ==0) { /*no chain make the probability of moving them 0*/ if (sim.chainprob > 0) printf ("No chains... chain move probability set to 0.\n"); sim.chainprob = 0; } printf ("\nReading configuration...\n"); init_config(&topo, &conf, &sim, files.configurationinfile); printf ("Equilibration of maximum step sizes: %ld sweeps\n", sim.nequil/2); fflush (stdout); if ( sim.wlm[0] > 0 ) { outfile = fopen(files.wlinfile, "r"); if (outfile == NULL) { printf ("ERROR: Cannot open file for Wang-Landau method (%s).\n",files.wlinfile); memorydealloc(&conf, &topo, &sim); exit(1); } fclose (outfile); } /* Empty movie file */ mov = fopen("movie", "w"); fclose (mov); printf ("\nInitializing energy functions...\n"); init_intfce(intfce, &topo); if (sim.pairlist_update) { init_pairlist(&topo, &sim); } if (sim.nequil) { printf("\nStart equilibration...\n"); simulate(sim.nequil/2, sim.adjust, 0, 0, intfce, &topo, &sim, &conf,&files); simulate(sim.nequil/2, 0, 0, 0, intfce, &topo, &sim, &conf,&files); printf (" Equilibrated maximum displacement / acceptance ratio: \n"); printeqstat(sim.trans,2.0,MAXT); printf (" Equilibrated maximum rotation / acceptance ratio: \n"); printeqstat(sim.rot,1.0,MAXT); printf (" Equilibrated maximum box length change / acceptance ratio: \n"); printf (" %.6le / %.6le\n", sim.edge.mx/2.0,RATIO(sim.edge)); printf (" Equilibrated maximum displacement of chain / acceptance ratio: \n"); printeqstat(sim.chainm,2.0,MAXMT); printf (" Equilibrated maximum rotation of chain / acceptance ratio: \n"); printeqstat(sim.chainr,1.0,MAXMT); printf ("\n"); printf ("Further equilibration of configuration: %ld sweeps\n", sim.nequil/2); fflush (stdout); outfile = fopen("config.eq", "w"); fprintf (outfile, "%15.8le %15.8le %15.8le\n", conf.box.x, conf.box.y, conf.box.z); draw (outfile, &conf, &topo); fclose (outfile); printf (" Equilibrated configuration written to config.eq\n"); printf (" Box dimensions: %.10lf, %.10lf, %.10lf\n\n", conf.box.x, conf.box.y, conf.box.z); } printf ("Production run: %ld sweeps\n\n", sim.nsweeps); fflush (stdout); simulate(sim.nsweeps, 0, sim.paramfrq, sim.report, intfce, &topo, &sim, &conf,&files); #ifdef MPI printf (" MPI replica changeT / changeP / acceptance ratio: \t %.6lf / %.6lf / %.6lf\n\n", sim.mpiexch.mx,sim.mpiexch.angle,RATIO(sim.mpiexch)); #endif outfile = fopen(files.configurationoutfile, "w"); fprintf (outfile, "%15.8le %15.8le %15.8le\n", conf.box.x, conf.box.y, conf.box.z); draw (outfile, &conf, &topo); fclose (outfile); // For testing the pairlist //gen_pairlist(&topo, &sim, &conf); //FILE * fpairlist; //fpairlist = fopen("pairlist.dat", "w"); //print_pairlist(fpairlist, &sim, &topo); //fclose(fpairlist); //printf("sqmaxcut = %lf\n", topo.sqmaxcut); //// For testing the cluster algorithm //gen_clusterlist(&topo, &sim, &conf); //print_clusterlist(stdout, TRUE, &topo, &sim, &conf); //sort_clusterlist(&topo, &sim); //print_clusters(stdout, TRUE, &sim); //print_clusterstat(stdout, TRUE, &sim); if (memorydealloc(&conf, &topo, &sim)) exit(1); #ifdef MPI MPI_Finalize(); #endif printf ("\nDone\n\n"); return 0; } /*..............................................................................*/ /*.........................SIMULATION RUN.......................................*/ /*..............................................................................*/ void simulate(long nsweeps, long adjust, long paramfrq, long report, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct sim * sim, struct conf * conf, struct filenames *files) { long i,j,wli; long next_adjust; /* Next sweep number for step size adjustment */ long next_calc; /* Next sweep number for order parameter calculation */ long next_dump; /* Next sweep number for reporting statistics */ long next_frame; /* Next sweep number for dumping a movie frame */ long step; /* Step number within a given sweep */ long sweep; /* Current sweep number */ //struct stat nem; /* Nematic order parameter */ //struct stat vol; /* Volume statistics */ //struct stat shapex, shapey, shapez; /* Box shape statistics */ //struct stat smec[MAXF]; /* Smectic order parameters (Fourier coeeficients) */ FILE *mf; /* Handle for movie file */ FILE *cl_stat, *cl, *cl_list; /* Handle for cluster statistics */ FILE *ef, *statf; /* Handle for energy file and statistical file*/ double edriftstart; /* Energy drift calculation - start */ double edriftchanges; /* Energy drift calculation - accumulate all changes through moves */ double edriftend; /* Energy drift calculation - end */ double pvdriftstart; /* PV drift calculation - start */ double pvdriftend; /* PV drift calculation - end */ double volume; /* volume of box*/ double moveprobab; /* random number selecting the move*/ /* function declarations */ //double nematic(long, struct particles *); double ran2(long *); //double smectic(long, struct particles *, long); double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); void accumulate(struct stat *, double); void draw(FILE *, struct conf * conf, struct topo * topo); void optimizestep(struct disp *, double, double); void optimizerot(struct disp *, double, double); void partvecinit(struct topo * topo, struct sim * sim, struct conf * conf ); int wlinit(struct wls *, char filename[30]); int wlwrite(struct wls *, char filename[30]); int wlend(struct wls *); int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim); int mesh_end(struct meshs *); long z_order(struct wls *, struct conf * conf,int); long twopartdist(struct wls *, struct conf * conf,int); void mesh_print (struct meshs *); void masscenter(long, struct ia_param [MAXT][MAXT], struct conf * conf); void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf); int write_cluster(FILE * cl_stat, FILE * cl, FILE * cl_list, BOOL decor, long sweep, struct sim * sim, struct topo * topo, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); double particlemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); double chainmove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); double switchtypemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); double pressuremove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); double replicaexchangemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long sweep); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); long radiushole_position(double, struct sim *,int); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); double alignment_order(struct conf * conf, struct topo * topo); int memorydealloc(struct conf * conf, struct topo * topo, struct sim * sim); double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct conf * conf); /* Opening files for cluster statistics */ cl_stat = cl = cl_list = ef = statf = NULL; if(sim->write_cluster){ // Empty file cl_stat = fopen(files->clusterstatfile, "w"); fclose(cl_stat); cl_stat = fopen(files->clusterstatfile, "a"); // Empty file cl = fopen(files->clusterfile, "w"); fclose(cl); cl = fopen(files->clusterfile, "a"); } /* write energy*/ if (report <= nsweeps){ // Empty file ef = fopen(files->energyfile, "w"); fclose(ef); ef = fopen(files->energyfile, "a"); fprintf (ef, "# sweep energy\n"); statf = fopen(files->statfile, "w"); fclose(statf); statf = fopen(files->statfile, "a"); fprintf (statf, "# sweep volume\n"); } /*=== Initialise counters etc. ===*/ // double pvolume; /* Volume of all particles*/ /* pvolume =0.0; for (i=0;i < topo->npart;i++) { if (conf->particle[i].type>=0 ) pvolume += topo->ia_params[conf->particle[i].type][conf->particle[i].type].volume; }*/ sim->shprob = sim->shave/(double)topo->npart; for (i=0;i<MAXT;i++){ sim->rot[i].acc = 0; sim->rot[i].rej = 0; sim->rot[i].oldrmsd = 0; sim->rot[i].oldmx = 0; sim->trans[i].acc = 0; sim->trans[i].rej = 0; sim->trans[i].oldrmsd = 0; sim->trans[i].oldmx = 0; } for (i=0;i<MAXMT;i++){ sim->chainm[i].acc = 0; sim->chainm[i].rej = 0; sim->chainm[i].oldrmsd = 0; sim->chainm[i].oldmx = 0; sim->chainr[i].acc = 0; sim->chainr[i].rej = 0; sim->chainr[i].oldrmsd = 0; sim->chainr[i].oldmx = 0; } //(*edge).acc = (*edge).rej = (*edge).oldrmsd = (*edge).oldmx = 0; sim->edge.acc = sim->edge.rej = sim->edge.oldrmsd = sim->edge.oldmx = 0; sim->mpiexch.acc = sim->mpiexch.rej = sim->mpiexch.oldrmsd = sim->mpiexch.oldmx = 0; /*Initialize some values at begining*/ partvecinit(topo,sim,conf); next_adjust = adjust; next_calc = paramfrq; next_dump = report; next_frame = sim->movie; //nem = vol = shapex = shapey = shapez = nullstat; //for (i=0; i<MAXF; i++) smec[i] = nullstat; if (sim->movie > 0) { mf = fopen(files->moviefile, "a"); } else { mf = NULL; } sim->wl.wl_meshsize = 0; sim->wl.radiushole = NULL; sim->wl.radiusholeold = NULL; sim->wl.radiusholemax = 0; sim->wl.partincontactold = 0; sim->wl.partincontact = 0; sim->wl.wlmdim = 0; sim->wl.wlmdim = 0; sim->wl.length[0]=0; sim->wl.length[1]=0; sim->wl.currorder[0]=0; sim->wl.currorder[1]=0; sim->wl.neworder[0]=0; sim->wl.neworder[1]=0; sim->wl.weights = NULL; sim->wl.hist = NULL; masscenter(topo->npart,topo->ia_params, conf); /* Initialization of wang-landaou method*/ if ( sim->wlm[0] >0 ) { if (wlinit(&sim->wl,files->wlinfile) != 0) return; sim->wl.wlmdim = 1 ; if ( sim->wlm[1] > 0 ) sim->wl.wlmdim = 2 ; for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: masscenter(topo->npart,topo->ia_params, conf); sim->wl.currorder[wli] = z_order(&sim->wl,conf,wli); break; case 2: sim->wl.wl_meshsize = (topo->ia_params[sim->wl.wlmtype][sim->wl.wlmtype].sigma) / 3.0; // TODO sim->wl.mesh.data = NULL; sim->wl.mesh.tmp = NULL; sim->wl.origmesh.data = NULL; sim->wl.origmesh.tmp = NULL; sim->wl.currorder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim) - sim->wl.minorder[wli]); break; case 3: sim->wl.currorder[wli] = (long) floor( (conf->particle[0].dir.z - sim->wl.minorder[wli])/ sim->wl.dorder[wli] ); break; case 4: sim->wl.currorder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: masscenter(topo->npart,topo->ia_params, conf); sim->wl.radiusholemax = 0; sim->wl.radiushole = NULL; sim->wl.radiusholeold = NULL; sim->wl.currorder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: sim->wl.radiusholemax = 0; sim->wl.radiushole = NULL; sim->wl.radiusholeold = NULL; sim->wl.currorder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.currorder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.currorder[wli] = 0; break; } if ( (sim->wl.currorder[wli] >= sim->wl.length[wli] ) || (sim->wl.currorder[wli] < 0) ) { printf("Error: starting Wang-Landau method with order parameter %f out of range(%f - %f)\n\n", sim->wl.dorder[wli]*sim->wl.currorder[wli] + \ sim->wl.minorder[wli], sim->wl.minorder[wli], sim->wl.minorder[wli]+sim->wl.dorder[wli]*sim->wl.length[wli] ); wlend(&sim->wl); if (memorydealloc(conf, topo, sim)) exit(1); exit(2); return; } } if (sim->wl.alpha < WL_ALPHATOL/100) sim->wl.alpha = WL_ZERO; fflush (stdout); } double e,e2; for(int i=0; i< 1/*topo->npart-1*/; ++i) { for(int j=i+1; j< topo->npart; ++j) { e = paire(i, j, intfce, topo, conf); e2 = paire(j, i, intfce, topo, conf); if(e < 1000.0) printf("%.5lf %.5lf\n", e, e2); else printf("%lf\n", 1000.0); } printf("\n"); } exit(0); /*do moves - START OF REAL MC*/ if(sim->pairlist_update){ gen_pairlist(topo, sim, conf); // Does that solve the problem? } /*do energy drift check - start calculation*/ volume = conf->box.x * conf->box.y * conf->box.z; edriftstart = calc_energy(0, intfce, 0, topo, conf, sim,0); pvdriftstart = sim->press * volume - (double)topo->npart * log(volume) / sim->temper; //printf("starting energy: %.15f \n",calc_energy(0, intfce, 0, topo, conf, sim,0)); //printf("press: %.15f\n",sim->press * volume - (double)topo->npart * log(volume) / sim->temper); edriftchanges = 0.0; for (sweep=1; sweep <= nsweeps; sweep++) { // Try replica exchange if((sim->nrepchange) && (sweep % sim->nrepchange == 0)){ edriftchanges += replicaexchangemove(topo,sim,conf,intfce,sweep); } // Generate the pairlist if((sim->pairlist_update) && (sweep % sim->pairlist_update == 0)){ gen_pairlist(topo, sim, conf); } //normal moves for (step=1; step <= topo->npart; step++) { moveprobab = ran2(&seed); if ( moveprobab < sim->shprob) { /* pressure moves*/ edriftchanges += pressuremove(topo,sim,conf,intfce); } else { if (moveprobab < sim->shprob + sim->chainprob) { /* single particle moves*/ edriftchanges += chainmove(topo,sim,conf,intfce); } else if (moveprobab < sim->shprob + sim->chainprob + sim->switchprob){ /*=== This is an attempt to switch a type ===*/ edriftchanges += switchtypemove(topo,sim,conf,intfce); } else { /* single particle moves*/ edriftchanges += particlemove(topo,sim,conf,intfce); } /* end of else next to chain moves */ } /* end of else next to volume moves */ } /**** End of step loop for this sweep ****/ /*=== Start of end-of-sweep housekeeping ===*/ /* Adjustment of maximum step sizes during equilibration */ if (sweep == next_adjust) { for (i = 0; i < MAXT ;i++) { if ((sim->trans[i].acc > 0)||(sim->trans[i].rej >0)) optimizestep (sim->trans + i, 1.5, 0.0); if ((sim->rot[i].acc > 0)||(sim->rot[i].rej >0)) optimizerot (sim->rot + i, 5.0, 0.01); } for (i = 0; i < MAXMT; i++) { if ((sim->chainm[i].acc > 0)||(sim->chainm[i].rej > 0)) optimizestep (sim->chainm + i, 1.5, 0.0); if ((sim->chainr[i].acc > 0)||(sim->chainr[i].rej > 0)) optimizerot (sim->chainr + i, 5.0, 0.01); } optimizestep (&(sim->edge), 1.0, 0.0); next_adjust += adjust; } if ( (sim->wlm[0] > 0) && (sim->wl.alpha > WL_ZERO) && !(sweep % 1000) ) { sim->wl.min = sim->wl.hist[0]; sim->wl.max = sim->wl.hist[0]; for (i=0;i < sim->wl.length[0];i++) { j=0; if ( sim->wl.hist[i+j*sim->wl.length[0]] > sim->wl.max ) sim->wl.max = sim->wl.hist[i+j*sim->wl.length[0]]; if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]]; for (j=1;j < sim->wl.length[1];j++) { if ( sim->wl.hist[i+j*sim->wl.length[0]] > sim->wl.max ) sim->wl.max = sim->wl.hist[i+j*sim->wl.length[0]]; if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]]; } } if ( sim->wl.min > WL_MINHIST ) { if ( sim->temper * log(sim->wl.max/sim->wl.min) < WL_GERR ) { /*DEBUG for (i=1;i<wl.length;i++) { printf (" %15.8le %15ld %15.8f\n",sim->wl.weights[i],sim->wl.hist[i],particle[0].pos.z); fflush(stdout); } */ if ( sim->wl.alpha < WL_ALPHATOL) { printf("\nF I N I S H E D\n\n"); fflush (stdout); break; } sim->wl.alpha/=2; printf("%f \n", sim->wl.alpha); fflush (stdout); sim->wl.wmin = sim->wl.weights[0]; for (i=0;i < sim->wl.length[0];i++) { j=0; sim->wl.hist[i+j*sim->wl.length[0]] = 0; sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin; for (j=1;j < sim->wl.length[1];j++) { sim->wl.hist[i+j*sim->wl.length[0]] = 0; sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin; } } } } } if (!(sweep % 10000)) { /*reinitialize pach vectors to avoid cummulation of errors*/ partvecinit(topo,sim,conf); } /* Sampling of statistics */ if (sweep == next_calc) { /*s2 = nematic(npart, particle); accumulate (&nem, s2); for (i=0; i<terms; i++) { ci = smectic(npart, particle, i+1); accumulate (&smec[i], ci); } accumulate (&shapex, (*box).x); accumulate (&shapey, (*box).y); accumulate (&shapez, (*box).z); volume = (*box).x * (*box).y * (*box).z; accumulate (&vol, volume); next_calc += paramfrq; */ } /* Writing of statistics */ if (sweep == next_dump) { /*printf ("Statistics after %ld sweeps:\n", sweep); printf (" Mean and RMS fluctuation of S2: %13.8lf %13.8lf\n", nem.mean, nem.rms); for (i=0; i<terms; i++) { printf (" Mean & fluc. Fourier coeff. %3ld: %13.8lf %13.8lf\n", i+1, smec[i].mean, smec[i].rms); } printf (" Mean & fluc box dimensions: x %13.8lf %13.8lf\n", shapex.mean, shapex.rms); printf (" y %13.8lf %13.8lf\n", shapey.mean, shapey.rms); printf (" z %13.8lf %13.8lf\n", shapez.mean, shapez.rms); printf (" Mean & fluctuation volume: %13.8lf %13.8lf\n", vol.mean, vol.rms); printf (" Mean & fluc. volume over volume of particles: %13.8lf %13.8lf\n", vol.mean/pvolume, vol.rms/pvolume); printf ("\n"); fflush (stdout); */ fprintf (statf, " %ld; %.10lf\n", sweep, conf->box.x * conf->box.y * conf->box.z); fprintf (ef, " %ld; %.10lf %f \n", sweep, calc_energy(0, intfce, 0, topo, conf, sim,0), alignment_order(conf,topo)); if (sim->wlm[0] > 0) { wlwrite(&sim->wl,files->wloutfile); } next_dump += report; } /* Writing of movie frame */ if (sweep == next_frame) { fprintf (mf, "%ld\n", topo->npart); fprintf (mf, "sweep %ld; box %.10lf %.10lf %.10lf\n", sweep, conf->box.x, conf->box.y, conf->box.z); draw (mf, conf, topo); fflush (mf); next_frame += sim->movie; } /* Writing out cluster statistics */ if(sim->write_cluster && (sweep % sim->write_cluster == 0)){ write_cluster(cl_stat, cl, cl_list, FALSE, sweep, sim, topo, conf, intfce); } /*=== End of housekeeping ===*/ } /**** End of sweeps loop ****/ /*do energy drift check - at the end calculation*/ volume = conf->box.x * conf->box.y * conf->box.z; edriftend = calc_energy(0, intfce, 0, topo, conf, sim,0); pvdriftend = sim->press * volume - (double)topo->npart * log(volume) / sim->temper; printf("Energy drift: %.15lf \n",edriftend - edriftstart - edriftchanges +pvdriftend -pvdriftstart); printf("Starting energy+pv: %.8lf \n",edriftstart+pvdriftstart); printf("Starting energy: %.8lf \n",edriftstart); fflush(stdout); /* End wang-landau*/ if (sim->wlm[0] > 0) { sim->wl.min = sim->wl.hist[0]; for (i=0;i < sim->wl.length[0];i++) { j=0; if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]]; for (j=1;j < sim->wl.length[1];j++) { if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]]; } } sim->wl.wmin = sim->wl.weights[0]; for (i=0;i < sim->wl.length[0];i++) { j=0; sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin; for (j=1;j < sim->wl.length[1];j++) { sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin; } } wlwrite(&sim->wl,files->wloutfile); wlend(&sim->wl); if ( (sim->wlm[0] == 2)||(sim->wlm[1] == 2) ) { mesh_end(&sim->wl.mesh); mesh_end(&sim->wl.origmesh); } if ( (sim->wlm[0] == 5)||(sim->wlm[1] == 5)||(sim->wlm[0] == 6)||(sim->wlm[1] == 6) ) { if ( sim->wl.radiushole != NULL ) free(sim->wl.radiushole); if ( sim->wl.radiusholeold != NULL ) free(sim->wl.radiusholeold); } } /*end movie*/ if (sim->movie > 0) fclose (mf); /*end cluster*/ if(sim->write_cluster){ fclose(cl_stat); fclose(cl); } if (report < nsweeps) { fclose(ef); fclose(statf); } } /*..................................MOVES.........................................*/ /*................................................................................*/ /*..............................PARTICLE MOVES....................................*/ double particlemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)) { double edriftchanges =0.0; long target; double ran2(long *); double partdisplace(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *),long target); double partrotate(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *),long target); /*=== This is a particle move step ===*/ target = ran2(&seed) * topo->npart; if ( ((ran2(&seed) < 0.5) || (topo->ia_params[conf->particle[target].type][conf->particle[target].type].geotype[0] >= SP)) ) { /* no rotation for spheres */ //target = 1; //printf ("displacement\n\n"); edriftchanges = partdisplace(topo,sim,conf,intfce,target); } else { /*=== Rotation step ===*/ edriftchanges = partrotate(topo,sim,conf,intfce,target); } /*=== End particle move step ===*/ return edriftchanges; } /*................................................................................*/ double partdisplace(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *),long target) { double edriftchanges,energy,enermove,wlener; struct vector orig, dr, origsyscm; int reject=0,wli; double radiusholemax_orig=0; double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *, long); void wlaccept(int, struct wls *); long meshorder_moveone(struct vector, struct vector, struct meshs *, long, long, \ struct conf * conf, struct sim * sim, int wli); int mesh_cpy(struct meshs *, struct meshs *); //void mesh_print (struct meshs *); long z_order(struct wls *, struct conf * conf, int wli); long twopartdist(struct wls *, struct conf *conf, int wli); struct vector ranvec(void); int longarray_cpy (long **, long **, long, long); long radiusholeorder_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target, int wli,struct vector *); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); long contparticles_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target,int wli); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); /*=== Displacement step ===*/ edriftchanges =0.0; origsyscm.x = 0; origsyscm.y = 0; origsyscm.z = 0; energy = calc_energy(target, intfce, 1, topo, conf, sim,0); orig = conf->particle[target].pos; dr = ranvec(); //ran = sqrt(ran2(&seed)); dr.x *= sim->trans[conf->particle[target].type].mx/conf->box.x; dr.y *= sim->trans[conf->particle[target].type].mx/conf->box.y; dr.z *= sim->trans[conf->particle[target].type].mx/conf->box.z; conf->particle[target].pos.x += dr.x; conf->particle[target].pos.y += dr.y; conf->particle[target].pos.z += dr.z; //} while (conf->particle[target].pos.x < 0.25 || conf->particle[target].pos.x > 0.50); reject = 0; wlener = 0.0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: origsyscm = conf->syscm; conf->syscm.x += dr.x * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; conf->syscm.y += dr.y * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; conf->syscm.z += dr.z * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli); break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = meshorder_moveone(orig, conf->particle[target].pos, &sim->wl.mesh, topo->npart, target, conf, sim,wli); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; origsyscm = conf->syscm; conf->syscm.x += dr.x * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; conf->syscm.y += dr.y * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; conf->syscm.z += dr.z * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); if ( target == 0 ) sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); else sim->wl.neworder[wli] = radiusholeorder_moveone(&orig, conf, sim,target,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; if ( target == 0 ) sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); else sim->wl.neworder[wli] = contparticles_moveone(&orig,conf,sim,target,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calcualte energy */ enermove = calc_energy(target, intfce, 1, topo, conf, sim,0); } if ( reject || movetry(energy, enermove, sim->temper) ) { /* probability acceptance */ conf->particle[target].pos = orig; sim->trans[conf->particle[target].type].rej++; if ( (sim->wlm[0] == 1) || (sim->wlm[0] == 5) || (sim->wlm[1] == 1) || (sim->wlm[1] == 5) ) conf->syscm = origsyscm; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->trans[conf->particle[target].type].acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; //printf("%lf\t%lf\n", conf->particle[0].pos.z * conf->box.z , enermove); //printf("%.12f\t%.12f\t%.12f\n", energy , enermove,edriftchanges); } return edriftchanges; } /*................................................................................*/ double partrotate(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *),long target) { double edriftchanges,energy,enermove,wlener; struct particles origpart; int reject=0,wli; double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *,long); void wlaccept(int, struct wls *); void normalise(struct vector *); void ortogonalise(struct vector *,struct vector); void psc_rotate(struct particles *,double,int); /*=== Rotation step ===*/ //printf ("rotation %ld npart %ld\n\n",target,npart); energy = calc_energy(target, intfce, 1, topo, conf, sim,0); origpart = conf->particle[target]; psc_rotate(&conf->particle[target],sim->rot[conf->particle[target].type].angle, topo->ia_params[conf->particle[target].type][conf->particle[target].type].geotype[0]); /*should be normalised and ortogonal but we do for safety*/ normalise (&conf->particle[target].dir); ortogonalise(&conf->particle[target].patchdir[0],conf->particle[target].dir); reject = 0; edriftchanges =0.0; wlener = 0.0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 3: if (target == 0) sim->wl.neworder[wli] = (long) floor( (conf->particle[0].dir.z - sim->wl.minorder[wli])/ sim->wl.dorder[wli] ); else sim->wl.neworder[wli] = sim->wl.currorder[wli]; /* only rotation change direction */ break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calcualte energy */ enermove = calc_energy(target, intfce, 1, topo, conf, sim,0); } if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */ conf->particle[target] = origpart; sim->rot[conf->particle[target].type].rej++; wlreject(sim,sim->wl.radiusholemax); } else { /* move was accepted */ // DEBUG //fprintf(fenergy, "%lf\t%lf\n", conf->particle[1].pos.x * conf->box.x , enermove); sim->rot[conf->particle[target].type].acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; //printf("%lf\t%lf\n", conf->particle[0].patchdir[0].z, enermove); } return edriftchanges; } /*..................... This is an attempt to switch a type.................................*/ double switchtypemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *) ) { double edriftchanges,energy,enermove,wlener; int reject=0,wli; long target; double radiusholemax_orig=0; double ran2(long *); double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *,long); void wlaccept(int, struct wls *); void int_partvec(long, struct ia_param *, struct conf *); int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim); int mesh_cpy(struct meshs *, struct meshs *); long z_order(struct wls *, struct conf * conf,int); long twopartdist(struct wls *, struct conf *conf,int); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); int longarray_cpy (long **target, long **source,long,long); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); /*=== This is an attempt to switch a type ===*/ edriftchanges =0.0; wlener = 0.0; target = ran2(&seed) * topo->n_switch_part; target = topo->switchlist[target]; DEBUG_SIM("Switching the particle type"); DEBUG_SIM("PARTICLE: %ld", target); energy = calc_energy(target, intfce, 1, topo, conf, sim,0); // Start switching the type int switched = conf->particle[target].switched; int pmone = PMONE(switched); DEBUG_SIM("switched = %d", switched); DEBUG_SIM("pmone = %d", pmone); int tmp_type = conf->particle[target].type; conf->particle[target].type = conf->particle[target].switchtype; conf->particle[target].switchtype = tmp_type; conf->particle[target].switched += pmone; int_partvec(target,&(topo->ia_params[conf->particle[target].type][conf->particle[target].type]),conf); DEBUG_SIM("Particle %ld is %d switched", target, switched); //DEBUG #ifdef DEBUGGING_SIM if ((abs(pmone) != 1) || (conf->particle[target].type == conf->particle[target].switchtype)){ fprintf(stderr, "ERROR: Something went wrong, when switching the type of particle %ld\n", target); exit(1); } #endif if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { /*case 1: sim->wl.neworder = z_order(&sim->wl, conf,wli); break;*/ case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize,topo->npart, conf, sim) - sim->wl.minorder[wli]); break; /*case 4: sim->wl.neworder = twopartdist(&sim->wl,conf,wli); break;*/ case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { enermove = conf->particle[target].delta_mu * pmone; // DEBUG //double dmu = enermove; //particle[target].switched += pmone; enermove += calc_energy( target, intfce, 1, topo, conf, sim,0); //printf("energy: %lf \t %lf\t%lf\n",particle[target].delta_mu, dmu, enermove); } // If not accepted: switch back if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */ DEBUG_SIM("Did NOT switch it\n"); conf->particle[target].switchtype = conf->particle[target].type; conf->particle[target].type = tmp_type; conf->particle[target].switched -= pmone; int_partvec(target,&(topo->ia_params[conf->particle[target].type][conf->particle[target].type]),conf); wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } return edriftchanges; } /*.................................CHAIN MOVES....................................*/ /*................................................................................*/ double chainmove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)) { double edriftchanges =0.0; long target; double ran2(long *); double chaindisplace(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long target); double chainrotate(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long target); /*=== This is a chain move step ===*/ target = ran2(&seed) * topo->chainnum; if (ran2(&seed) < 0.5) { /*=== Displacement step of cluster/chain ===*/ edriftchanges = chaindisplace(topo,sim,conf,intfce,target); } else { /*=== Rotation step of cluster/chain ===*/ edriftchanges = chainrotate(topo,sim,conf,intfce,target); } /* ==== END OF CHAIN MOVES ===== */ return edriftchanges; } /*................................................................................*/ double chaindisplace(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long target) { double edriftchanges,energy,enermove,wlener; struct vector dr, origsyscm; int reject=0,wli; struct vector cluscm; long current,i; struct particles chorig[MAXCHL]; double radiusholemax_orig=0; double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *,long); void wlaccept(int, struct wls *); long meshorder_movechain(long [MAXN], struct meshs *, long, struct conf * conf, \ struct sim * sim, struct particles chorig[MAXCHL],int); int mesh_cpy(struct meshs *, struct meshs *); //void mesh_print (struct meshs *); long z_order(struct wls *, struct conf * conf,int); long twopartdist(struct wls *, struct conf *conf,int); struct vector ranvec(void); int longarray_cpy (long **target, long **source,long,long); long radiusholeorder_movechain(long chain[MAXN], struct conf * conf, \ struct sim * sim,struct particles chorig[MAXCHL],int,struct vector *); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); long contparticles_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); /*=== Displacement step of cluster/chain ===*/ //printf ("move chain\n\n"); energy =0.0; wlener = 0.0; edriftchanges=0.0; i=0; current = topo->chainlist[target][0]; cluscm.x = 0; cluscm.y = 0; cluscm.z = 0; origsyscm.x = 0; origsyscm.y = 0; origsyscm.z = 0; while (current >=0 ) { /* store old configuration calculate energy*/ chorig[i].pos = conf->particle[current].pos; energy += calc_energy(current, intfce, 2, topo, conf, sim, target); i++; current = topo->chainlist[target][i]; } dr = ranvec(); dr.x *= sim->chainm[conf->particle[target].chaint].mx/conf->box.x; dr.y *= sim->chainm[conf->particle[target].chaint].mx/conf->box.y; dr.z *= sim->chainm[conf->particle[target].chaint].mx/conf->box.z; i=0; current = topo->chainlist[target][0]; while (current >=0 ) { /* move chaine to new position */ if ( (sim->wlm[0] == 1) || (sim->wlm[0] == 5) || (sim->wlm[1] == 1) || (sim->wlm[1] == 5) ) { /* calculate move of center of mass */ cluscm.x += dr.x*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.y += dr.y*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.z += dr.z*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; } conf->particle[current].pos.x += dr.x; conf->particle[current].pos.y += dr.y; conf->particle[current].pos.z += dr.z; i++; current = topo->chainlist[target][i]; } enermove = 0.0; reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: origsyscm = conf->syscm; conf->syscm.x += cluscm.x / conf->sysvolume; conf->syscm.y += cluscm.y / conf->sysvolume; conf->syscm.z += cluscm.z / conf->sysvolume; sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli); break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = meshorder_movechain(topo->chainlist[target], &sim->wl.mesh, topo->npart, conf, sim, chorig,wli); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; origsyscm = conf->syscm; conf->syscm.x += cluscm.x / conf->sysvolume; conf->syscm.y += cluscm.y / conf->sysvolume; conf->syscm.z += cluscm.z / conf->sysvolume; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); if ( target == 0 ) sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); else sim->wl.neworder[wli] = radiusholeorder_movechain(topo->chainlist[target], conf, sim, chorig,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; if ( target == 0 ) sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); else sim->wl.neworder[wli] = contparticles_movechain(topo->chainlist[target],conf,sim,chorig,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calcualte energy */ i=0; current = topo->chainlist[target][0]; while (current >=0 ) { enermove += calc_energy(current, intfce, 2, topo, conf, sim,target); i++; current = topo->chainlist[target][i]; } } if ( reject || movetry(energy, enermove, sim->temper) ) { /* probability acceptance */ i=0; current = topo->chainlist[target][0]; while (current >=0 ) { conf->particle[current].pos = chorig[i].pos; i++; current = topo->chainlist[target][i]; } sim->chainm[conf->particle[target].chaint].rej++; if ( (sim->wlm[0] == 1) || (sim->wlm[0] == 5) || (sim->wlm[1] == 1) || (sim->wlm[1] == 5) ) conf->syscm = origsyscm; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->chainm[conf->particle[target].chaint].acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } return edriftchanges; } /*................................................................................*/ double chainrotate(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long target) { double edriftchanges,energy,enermove,wlener; int reject=0,wli; struct vector cluscm; double chainvolume; long current, i; struct particles chorig[MAXCHL]; double radiusholemax_orig=0; double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *,long); void wlaccept(int, struct wls *); long meshorder_movechain(long [MAXN], struct meshs *, long, struct conf * conf, \ struct sim * sim, struct particles chorig[MAXCHL],int); int mesh_cpy(struct meshs *, struct meshs *); void cluster_rotate(long, struct vector, double, struct topo * topo, struct conf * conf); long z_order(struct wls *, struct conf * conf,int); long twopartdist(struct wls *, struct conf *conf,int); int longarray_cpy (long **target, long **source,long,long); long radiusholeorder_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,\ struct particles chorig[MAXCHL],int,struct vector *); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); long contparticles_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); /*=== Rotation step of cluster/chain ===*/ //printf ("rotation of chain\n\n"); energy=0.0; /* set values to zero*/ edriftchanges=0.0; wlener = 0.0; current = topo->chainlist[target][0]; cluscm.x = conf->particle[current].pos.x*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.y = conf->particle[current].pos.y*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.z = conf->particle[current].pos.z*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; chorig[0] = conf->particle[current]; chainvolume = topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; energy += calc_energy(current, intfce, 2, topo, conf, sim,target); i=1; current = topo->chainlist[target][i]; while (current >=0 ) { /* store old configuration calculate energy*/ chorig[i] = conf->particle[current]; /*We have chains whole! don't have to do PBC*/ /*r_cm.x = conf->particle[current].pos.x - conf->particle[first].pos.x; r_cm.y = conf->particle[current].pos.y - conf->particle[first].pos.y; r_cm.z = conf->particle[current].pos.z - conf->particle[first].pos.z; if ( r_cm.x < 0 ) r_cm.x -= (double)( (long)(r_cm.x-0.5) ); else r_cm.x -= (double)( (long)(r_cm.x+0.5) ); if ( r_cm.y < 0 ) r_cm.y -= (double)( (long)(r_cm.y-0.5) ); else r_cm.y -= (double)( (long)(r_cm.y+0.5) ); if ( r_cm.z < 0 ) r_cm.z -= (double)( (long)(r_cm.z-0.5) ); else r_cm.z -= (double)( (long)(r_cm.z+0.5) ); */ cluscm.x += conf->particle[current].pos.x*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.y += conf->particle[current].pos.y*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.z += conf->particle[current].pos.z*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; chainvolume += topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; energy += calc_energy(current, intfce, 2, topo, conf, sim,target); i++; current = topo->chainlist[target][i]; } cluscm.x = cluscm.x/chainvolume; cluscm.y = cluscm.y/chainvolume; cluscm.z = cluscm.z/chainvolume; /*do actual rotations around geometrical center*/ cluster_rotate(target, cluscm, sim->chainr[conf->particle[target].chaint].angle, topo, conf); enermove=0.0; reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: if (target == 0) sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli); else sim->wl.neworder[wli] = sim->wl.currorder[wli]; /* if we rotated cluster it is around its CM so no change*/ break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = meshorder_movechain(topo->chainlist[target], &sim->wl.mesh, topo->npart, conf, sim, chorig,wli); break; case 3: if (target == 0) sim->wl.neworder[wli] = (long) floor( (conf->particle[0].dir.z - sim->wl.minorder[wli])/ sim->wl.dorder[wli] ); else sim->wl.neworder[wli] = sim->wl.currorder[wli]; /* only rotation change direction */ break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); if ( target == 0 ) sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); else sim->wl.neworder[wli] = radiusholeorder_movechain(topo->chainlist[target], conf, sim, chorig,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; if ( target == 0 ) sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); else sim->wl.neworder[wli] = contparticles_movechain(topo->chainlist[target],conf,sim,chorig,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calcualte energy */ i=0; current = topo->chainlist[target][0]; while (current >=0 ) { enermove += calc_energy(current, intfce, 2, topo, conf, sim,target); i++; current = topo->chainlist[target][i]; } } if ( reject || movetry(energy, enermove, sim->temper) ) { /* probability acceptance */ i=0; current = topo->chainlist[target][0]; while (current >=0 ) { conf->particle[current] = chorig[i]; i++; current = topo->chainlist[target][i]; } sim->chainr[conf->particle[target].chaint].rej++; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->chainr[conf->particle[target].chaint].acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } return edriftchanges; } /*..............................PRESSURE MOVES....................................*/ /*................................................................................*/ double pressuremove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)) { double edriftchanges,energy,enermove,wlener; int reject=0,wli; double old_side; /* Box length before attempted change */ double *side; /* Box dimension to try changing */ double psch; /* Size of a box change during pressure */ double pvol; /* Size of a volume during pressure */ double pvoln; /* Size of a new volume during pressure */ double rsave; /* Saved random number */ double area; double radiusholemax_orig=0; double ran2(long *); double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *,long); void wlaccept(int, struct wls *); int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim); int mesh_cpy(struct meshs *, struct meshs *); long z_order(struct wls *, struct conf * conf,int); long twopartdist(struct wls *, struct conf *conf,int); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); int longarray_cpy (long **target, long **source,long,long); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); /*=== This is a volume change step ===*/ /*calculate energy*/ edriftchanges=0.0; wlener = 0.0; energy = calc_energy(0, intfce, 0, topo, conf, sim,0); /* Choose an edge */ switch (sim->ptype) { case 0: /* Anisotropic pressure coupling */ rsave = ran2(&seed); if (rsave < 1.0/3.0) { side = &(conf->box.x); area = conf->box.y * conf->box.z; } else if (rsave < 2.0/3.0) { side = &(conf->box.y); area = conf->box.x * conf->box.z; } else { side = &(conf->box.z); area = conf->box.x * conf->box.y; } old_side = *side; *side += sim->edge.mx * (ran2(&seed) - 0.5); reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli); break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim) - sim->wl.minorder[wli]); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calculate energy */ enermove = sim->press * area * (*side - old_side) - (double)topo->npart * log(*side/old_side) / sim->temper; enermove += calc_energy(0, intfce, 0, topo, conf, sim,0); } if ( reject || *side <= 0.0 || ( movetry(energy,enermove,sim->temper) ) ) { /* probability acceptance */ *side = old_side; sim->edge.rej++; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->edge.acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } break; case 1: /* Isotropic pressure coupling */ psch = sim->edge.mx * (ran2(&seed) - 0.5); pvol = conf->box.x * conf->box.y * conf->box.z; conf->box.x += psch; conf->box.y += psch; conf->box.z += psch; pvoln = conf->box.x * conf->box.y * conf->box.z; reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: sim->wl.neworder[wli] = z_order(&sim->wl,conf,wli); break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim) - sim->wl.minorder[wli]); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calcualte energy */ enermove = sim->press * (pvoln - pvol) - (double)topo->npart * log(pvoln/pvol) / sim->temper; enermove += calc_energy(0, intfce, 0, topo, conf, sim,0); } if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */ conf->box.x -= psch; conf->box.y -= psch; conf->box.z -= psch; sim->edge.rej++; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->edge.acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } break; case 2: /* Isotropic pressure coupling in xy, z constant */ psch = sim->edge.mx * (ran2(&seed) - 0.5); pvol = conf->box.x * conf->box.y; conf->box.x += psch; conf->box.y += psch; pvoln = conf->box.x * conf->box.y; reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { /*no change in case 1, it does not change box.z*/ case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize,topo->npart, conf, sim) - sim->wl.minorder[wli]); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calculate energy */ enermove = sim->press * conf->box.z * (pvoln - pvol) - (double)topo->npart * log(pvoln/pvol) / sim->temper; enermove += calc_energy(0, intfce, 0, topo, conf, sim,0); } if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */ conf->box.x -= psch; conf->box.y -= psch; sim->edge.rej++; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->edge.acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } break; case 3: /* Isotropic pressure coupling in xy, z coupled to have fixed volume */ psch = sim->edge.mx * (ran2(&seed) - 0.5); pvol = conf->box.x * conf->box.y * conf->box.z; conf->box.x += psch; conf->box.y += psch; conf->box.z = pvol / conf->box.x / conf->box.y; reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli); break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize,topo->npart, conf, sim) - sim->wl.minorder[wli]); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calculate energy */ enermove = calc_energy(0, intfce, 0, topo, conf, sim,0); } if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */ conf->box.x -= psch; conf->box.y -= psch; conf->box.z = pvol / conf->box.x / conf->box.y; sim->edge.rej++; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->edge.acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } break; default: fprintf (stderr, "ERROR: unknown type of pressure coupling %d",sim->ptype); exit(1); } /*=== End volume change step ===*/ return edriftchanges; } /*..................... Switch replicas move in MPI ..............................*/ /*.................................................................................*/ double replicaexchangemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long sweep ) { double edriftchanges=0.0; #ifdef MPI double change, *recwlweights; MPI_Status status; int oddoreven,count,wli,sizewl = 0; struct mpiexchangedata localmpi,receivedmpi; BOOL reject; long localwl,receivedwl; double ran2(long *); void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf); int longarray_cpy (long **target, long **source,long,long); int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim); double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); void wlaccept(int, struct wls *); //int mpi_newdatatypes(); //mpi_newdatatypes(); int i; struct vector vec; struct particles part; struct mpiexchangedata exch; MPI_Aint dispstart; MPI_Datatype MPI_vector; MPI_Datatype type[3] = {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE}; int blocklen[3] = {1, 1, 1}; MPI_Aint disp[3]; MPI_Address( &vec, &dispstart); MPI_Address( &(vec.x), &disp[0]); MPI_Address( &(vec.y), &disp[1]); MPI_Address( &(vec.z), &disp[2]); for (i=0; i <3; i++) disp[i] -= dispstart; MPI_Type_struct( 3, blocklen, disp, type, &MPI_vector); MPI_Type_commit( &MPI_vector); MPI_Datatype MPI_Particle; MPI_Datatype type2[11] = {MPI_vector,MPI_vector,MPI_vector,MPI_vector,MPI_vector, MPI_LONG, MPI_LONG, MPI_INT,MPI_INT,MPI_DOUBLE, MPI_INT}; int blocklen2[11] = {1, 1, 2,4,2,1,1,1,1,1,1,}; MPI_Aint disp2[11]; MPI_Address( &part, &dispstart); MPI_Address( &(part.pos), &disp2[0]); MPI_Address( &(part.dir), &disp2[1]); MPI_Address( &(part.patchdir), &disp2[2]); MPI_Address( &(part.patchsides), &disp2[3]); MPI_Address( &(part.chdir), &disp2[4]); MPI_Address( &(part.chaint), &disp2[5]); MPI_Address( &(part.chainn), &disp2[6]); MPI_Address( &(part.type), &disp2[7]); MPI_Address( &(part.switchtype), &disp2[8]); MPI_Address( &(part.delta_mu), &disp2[9]); MPI_Address( &(part.switched), &disp2[10]); for (i=0; i <11; i++) disp2[i] -= dispstart; MPI_Type_struct( 11, blocklen2, disp2, type2, &MPI_Particle); MPI_Type_commit( &MPI_Particle); if (sim->wl.length[1] > 0) { sizewl = sim->wl.length[1] * sim->wl.length[0]; } else { sizewl = sim->wl.length[0]; } MPI_Datatype MPI_exchange; MPI_Datatype type3[7] = {MPI_vector, MPI_DOUBLE, MPI_DOUBLE, MPI_INT, MPI_vector, MPI_LONG, MPI_LONG}; int blocklen3[7] = {1, 1, 1, 1, 1, 1, 2}; MPI_Aint disp3[7]; MPI_Address( &exch, &dispstart); MPI_Address( &(exch.box), &disp3[0]); MPI_Address( &(exch.energy), &disp3[1]); MPI_Address( &(exch.volume), &disp3[2]); MPI_Address( &(exch.accepted), &disp3[3]); MPI_Address( &(exch.syscm), &disp3[4]); MPI_Address( &(exch.radiusholemax), &disp3[5]); MPI_Address( &(exch.wl_order), &disp3[6]); for (i=0; i <7; i++) disp3[i] -= dispstart; MPI_Type_struct(7, blocklen3, disp3, type3, &MPI_exchange); MPI_Type_commit( &MPI_exchange); /*=== This is an attempt to switch replicas ===*/ localmpi.box = conf->box; localmpi.energy = calc_energy(0, intfce, 0, topo, conf, sim,0); localmpi.volume = conf->box.x * conf->box.y * conf->box.z; localmpi.accepted = 0; localmpi.syscm = conf->syscm; localmpi.radiusholemax = sim->wl.radiusholemax; recwlweights = malloc( sizeof(double) * sizewl ); for (wli=0;wli<2;wli++) { localmpi.wl_order[wli] = 0; receivedmpi.wl_order[wli] = 0; } for (wli=0;wli<sim->wl.wlmdim;wli++) { localmpi.wl_order[wli] = sim->wl.currorder[wli]; //fprintf(stdout,"wli %d %ld %ld\n\n", wli, localmpi.wl_order[wli], sim->wl.currorder[wli] ); } if ( (sweep % (2*sim->nrepchange)) == 0) /* exchange odd ones with even ones*/ oddoreven=1; else /* exchange even ones with odd ones*/ oddoreven=0; if (sim->mpinprocs == 2) oddoreven=1; count = 1; if (sim->mpirank % 2 == oddoreven) { if (sim->mpirank > 0) { MPI_Send(&localmpi, 1, MPI_exchange, sim->mpirank-1, count, MPI_COMM_WORLD); MPI_Send(sim->wl.weights, sizewl, MPI_DOUBLE, sim->mpirank-1, count, MPI_COMM_WORLD); //printf("send data: rank: %d energy: %f volume: %f pressure: %f \n",sim->mpirank,localmpi.energy,localmpi.volume,localmpi.pressure); MPI_Recv(&receivedmpi, 1, MPI_exchange, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); /*decision of accepting or rejecting the exchange was done on other process here we took received configuration (if move was accepted))*/ //printf("received data: rank: %d energy: %f volume: %f pressure: %f \n",sim->mpirank,receivedmpi.energy,receivedmpi.volume,receivedmpi.pressure); if (receivedmpi.accepted == 1) { sim->mpiexch.acc++; struct particles *temppart; temppart = malloc(topo->npart*sizeof(struct particles)); MPI_Recv(temppart, topo->npart, MPI_Particle, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD,&status); /* printf("received data: rank: %d\n", sim->mpirank); printf("part0 x %f y %f z %f\n",temppart[0].pos.x, temppart[0].pos.y, temppart[0].pos.z); printf("part1 x %f y %f z %f\n",temppart[1].pos.x, temppart[1].pos.y, temppart[1].pos.z); printf("part0 chaint %ld chainn %ld type %d\n",temppart[0].chaint,temppart[0].chainn,temppart[0].type); */ MPI_Send(conf->particle, topo->npart, MPI_Particle, sim->mpirank-1, count, MPI_COMM_WORLD); /* printf("send data: rank: %d\n",sim->mpirank); printf("part0 x %f y %f z %f\n",conf->particle[0].pos.x,conf->particle[0].pos.y,conf->particle[0].pos.z); printf("part1 x %f y %f z %f\n",conf->particle[1].pos.x,conf->particle[1].pos.y,conf->particle[1].pos.z); printf("part0 chaint %ld chainn %ld type %d\n",conf->particle[0].chaint,conf->particle[0].chainn,conf->particle[0].type); */ localmpi.accepted = receivedmpi.accepted; conf->box = receivedmpi.box; conf->syscm = receivedmpi.syscm; memcpy(conf->particle,temppart,topo->npart*sizeof(struct particles)); edriftchanges = receivedmpi.energy - localmpi.energy; edriftchanges += sim->press * (receivedmpi.volume - localmpi.volume) - (double)topo->npart * log(receivedmpi.volume / localmpi.volume) / sim->temper; if ( sim->wlm[0] >0 ) { for (wli=0;wli<sim->wl.wlmdim;wli++) { sim->wl.neworder[wli] = receivedmpi.wl_order[wli]; } wlaccept(sim->wlm[0],&sim->wl); //exchange wl data mesh size and radius hole s for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 2: /*it is complicated to send because of different sizes we would have to send sizes first and realocate corrrect mesh size and then send data it is better to recalculate (a bit slower though)*/ mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim); break; case 5: //radiushole_all(topo,conf,sim,wli,&(conf->syscm)); sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax); MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank-1, count, MPI_COMM_WORLD); longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax); sim->wl.radiusholemax=receivedmpi.radiusholemax; break; case 6: //radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax); MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank-1, count, MPI_COMM_WORLD); longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax); sim->wl.radiusholemax=receivedmpi.radiusholemax; break; case 7: //contparticles_all(topo,conf,sim,wli); MPI_Recv(&(sim->wl.partincontactold),1, MPI_LONG, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Send(&(sim->wl.partincontact),1, MPI_LONG, sim->mpirank-1, count, MPI_COMM_WORLD); sim->wl.partincontact=sim->wl.partincontactold; break; } } } free(temppart); } else { sim->mpiexch.rej++; if ( sim->wlm[0] > 0 ) { sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]] -= sim->wl.alpha; sim->wl.hist[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]++; } } } } else { if (sim->mpirank+1 < sim->mpinprocs) { /*there is above process*/ MPI_Recv(&receivedmpi, 1, MPI_exchange, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Recv(recwlweights, sizewl, MPI_DOUBLE, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); /*we got new configuration*/ //printf("received data: rank: %d energy: %f volume: %f \n",sim->mpirank,receivedmpi.energy,receivedmpi.volume); /*evaluate if accepte or reject the configuration*/ /*acc = exp( (1/sim->temper - 1/(sim->temper + sim.dtemp)) * (E_here - E_received) + (sim->press /sim->temper - pressure_received /(sim.temper + sim->dtemp)) * (V_here - V_received) if pressure the same it it simplier*/ reject = FALSE; change = (1/sim->temper - 1/(sim->temper + sim->dtemp)) * (localmpi.energy - receivedmpi.energy); //printf("acceptance decision: change: %f localE: %f receivedE: %f tempf: %f \n",change,localmpi.energy,receivedmpi.energy,(1/sim->temper - 1/(sim->temper + sim->dtemp))); change += (sim->press/sim->temper - (sim->press + sim->dpress)/(sim->temper + sim->dtemp)) * (localmpi.volume - receivedmpi.volume); //printf("pressf: %f \n",(sim->press/sim->temper - (sim->press + sim->dpress)/(sim->temper + sim->dtemp))); if (sim->wlm[0] > 0) { localwl = sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]; receivedwl = receivedmpi.wl_order[0] + receivedmpi.wl_order[1]*sim->wl.length[0]; //fprintf(stdout,"decide wl %ld %ld %ld energychange: %f \n", receivedmpi.wl_order[0], receivedmpi.wl_order[1], receivedwl, change ); //fprintf(stdout,"local weights %ld %f %ld %f \n",localwl,sim->wl.weights[localwl],receivedwl,sim->wl.weights[receivedwl]); change += (-sim->wl.weights[localwl] + sim->wl.weights[receivedwl] )/sim->temper + ( -recwlweights[receivedwl] + recwlweights[localwl])/(sim->temper + sim->dtemp) ; //fprintf(stdout,"wlchange %f \n\n",change); } if ( (!(reject)) && ( (change > 0) || (ran2(&seed) < exp(change)) ) ) { /* Exchange ACCEPTED send local stuff*/ //printf("exchange accepted \n"); sim->mpiexch.acc++; localmpi.accepted = 1; conf->box = receivedmpi.box; conf->syscm = receivedmpi.syscm; edriftchanges = receivedmpi.energy - localmpi.energy; edriftchanges += sim->press * (receivedmpi.volume - localmpi.volume) - (double)topo->npart * log(receivedmpi.volume / localmpi.volume) / sim->temper; //printf("edrift %f\n",edriftchanges); if ( sim->wlm[0] > 0 ) { for (wli=0;wli<sim->wl.wlmdim;wli++) { sim->wl.neworder[wli] = receivedmpi.wl_order[wli]; } wlaccept(sim->wlm[0],&sim->wl); } MPI_Send(&localmpi, 1, MPI_exchange, sim->mpirank+1, count, MPI_COMM_WORLD); //printf("send data: rank: %d energy: %f volume: %f pressure: %f \n",sim->mpirank,localmpi.energy,localmpi.volume,localmpi.pressure); /*send and receive configuration*/ MPI_Send(conf->particle, topo->npart, MPI_Particle, sim->mpirank+1, count, MPI_COMM_WORLD); /* printf("send data: rank: %d\n",sim->mpirank); printf("part0 x %f y %f z %f\n",conf->particle[0].pos.x,conf->particle[0].pos.y,conf->particle[0].pos.z); printf("part1 x %f y %f z %f\n",conf->particle[1].pos.x,conf->particle[1].pos.y,conf->particle[1].pos.z); printf("part0 chaint %ld chainn %ld type %d\n",conf->particle[0].chaint,conf->particle[0].chainn,conf->particle[0].type); */ MPI_Recv(conf->particle, topo->npart, MPI_Particle, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD,&status); /* printf("recieved data: rank: %d\n",sim->mpirank); printf("part0 x %f y %f z %f\n",conf->particle[0].pos.x,conf->particle[0].pos.y,conf->particle[0].pos.z); printf("part1 x %f y %f z %f\n",conf->particle[1].pos.x,conf->particle[1].pos.y,conf->particle[1].pos.z); printf("part0 chaint %ld chainn %ld type %d\n",conf->particle[0].chaint,conf->particle[0].chainn,conf->particle[0].type); */ if ( sim->wlm[0] > 0 ) { //exchange wl data mesh size and radius hole s for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 2: /*it is complicated to send because of different sizes we would have to send sizes first and realocate corrrect mesh size and then send data it is better to recalculate (a bit slower though)*/ mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim); break; case 5: //radiushole_all(topo,conf,sim,wli,&(conf->syscm)); sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax); MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank+1, count, MPI_COMM_WORLD); MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax); sim->wl.radiusholemax=receivedmpi.radiusholemax; break; case 6: //radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax); MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank+1, count, MPI_COMM_WORLD); MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax); sim->wl.radiusholemax=receivedmpi.radiusholemax; break; case 7: //contparticles_all(topo,conf,sim,wli); MPI_Send(&(sim->wl.partincontact),1, MPI_LONG, sim->mpirank+1, count, MPI_COMM_WORLD); MPI_Recv(&(sim->wl.partincontact),1, MPI_LONG, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); break; } } } } else { /*if exchange rejected send back info */ //printf("exchange rejected\n"); sim->mpiexch.rej++; MPI_Send(&localmpi, 1, MPI_exchange, sim->mpirank+1, count, MPI_COMM_WORLD); if ( sim->wlm[0] > 0 ) { sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]] -= sim->wl.alpha; sim->wl.hist[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]++; } } } } if ( (localmpi.accepted) && (sim->pairlist_update) ) gen_pairlist(topo, sim, conf); MPI_Type_free(&MPI_exchange); MPI_Type_free(&MPI_Particle); MPI_Type_free(&MPI_vector); free(recwlweights); #endif return edriftchanges; } /*int mpi_newdatatypes() { int i; struct vector vec; struct particles part; struct mpiexchangedata exch; MPI_Aint dispstart; MPI_Datatype MPI_vector; MPI_Datatype type[3] = {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE}; int blocklen[3] = {1, 1, 1}; MPI_Aint disp[3]; MPI_Address( &vec, &dispstart); MPI_Address( &(vec.x), &disp[0]); MPI_Address( &(vec.y), &disp[1]); MPI_Address( &(vec.z), &disp[2]); for (i=0; i <3; i++) disp[i] -= dispstart; MPI_Type_struct( 3, blocklen, disp, type, &MPI_vector); MPI_Type_commit( &MPI_vector); MPI_Datatype MPI_Particle; MPI_Datatype type2[11] = {MPI_vector,MPI_vector,MPI_vector,MPI_vector,MPI_vector, MPI_LONG, MPI_LONG, MPI_INT,MPI_INT,MPI_DOUBLE, MPI_INT}; int blocklen2[11] = {1, 1, 2,4,2,1,1,1,1,1,1,}; MPI_Aint disp2[11]; MPI_Address( &part, &dispstart); MPI_Address( &(part.pos), &disp2[0]); MPI_Address( &(part.dir), &disp2[1]); MPI_Address( &(part.patchdir), &disp2[2]); MPI_Address( &(part.patchsides), &disp2[3]); MPI_Address( &(part.chdir), &disp2[4]); MPI_Address( &(part.chaint), &disp2[5]); MPI_Address( &(part.chainn), &disp2[6]); MPI_Address( &(part.type), &disp2[7]); MPI_Address( &(part.switchtype), &disp2[8]); MPI_Address( &(part.delta_mu), &disp2[9]); MPI_Address( &(part.switched), &disp2[10]); for (i=0; i <11; i++) disp2[i] -= dispstart; MPI_Type_struct( 11, blocklen2, disp2, type2, &MPI_Particle); MPI_Type_commit( &MPI_Particle); MPI_Datatype MPI_exchange; MPI_Datatype type3[5] = {MPI_vector, MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE, MPI_INT}; int blocklen3[5] = {1, 1, 1, 1, 1}; MPI_Aint disp3[5]; MPI_Address( &exch, &dispstart); MPI_Address( &(exch.box), &disp3[0]); MPI_Address( &(exch.energy), &disp3[1]); MPI_Address( &(exch.volume), &disp3[2]); MPI_Address( &(exch.pressure), &disp3[3]); MPI_Address( &(exch.accepted), &disp3[4]); for (i=0; i <5; i++) disp3[i] -= dispstart; MPI_Type_struct( 5, blocklen3, disp3, type3, &MPI_exchange); MPI_Type_commit( &MPI_exchange); return 0; }*/ /*................................................................................*/ /*................................................................................*/ /*....................END OF MOVES, INTERACTION FUNCTIONS FOLLOW..................*/ /*................................................................................*/ /*..............................................................................*/ /* Determines total energy of two spherocylinders type PSC PSC */ double e_psc_psc(struct interacts * interact) { double atrenergy, repenergy; void closestdist(struct interacts *); double erepulsive(struct interacts *); double eattractive_psc_psc(struct interacts *,int,int); closestdist(interact); repenergy = erepulsive(interact); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { BOOL firstCH=FALSE, secondCH=FALSE; struct vector olddir1 = interact->part1->dir; struct vector olddir2 = interact->part2->dir; if ( (interact->param->geotype[0] == CHPSC) || (interact->param->geotype[0] == TCHPSC) ) firstCH = TRUE; if ( (interact->param->geotype[1] == CHPSC) || (interact->param->geotype[1] == TCHPSC) ) secondCH = TRUE; if (firstCH) interact->part1->dir = interact->part1->chdir[0]; if (secondCH) interact->part2->dir = interact->part2->chdir[0]; if ( (firstCH) || (secondCH) ) { closestdist(interact); } atrenergy = eattractive_psc_psc(interact,0,0); /*addition of interaction of second patches*/ if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) || (interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) ) { BOOL firstT=FALSE, secondT=FALSE; if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) ) firstT = TRUE; if ( (interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) ) secondT = TRUE; if (firstT) { if (firstCH && secondCH) { interact->part1->dir = interact->part1->chdir[1]; interact->part2->dir = interact->part2->chdir[0]; closestdist(interact); } if (firstCH && !secondCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } if (!firstCH && secondCH) { interact->part2->dir = interact->part2->chdir[0]; closestdist(interact); } atrenergy += eattractive_psc_psc(interact,1,0); } if ( (firstT) && (secondT) ) { if (firstCH && secondCH) { interact->part1->dir = interact->part1->chdir[1]; interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } if (firstCH && !secondCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } if (!firstCH && secondCH) { interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } atrenergy += eattractive_psc_psc(interact,1,1); } if (secondT) { if (firstCH && secondCH) { interact->part1->dir = interact->part1->chdir[0]; interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } if (firstCH && !secondCH) { interact->part1->dir = interact->part1->chdir[0]; interact->part2->dir = olddir2; closestdist(interact); } if (!firstCH && secondCH) { interact->part1->dir = olddir1; interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } atrenergy += eattractive_psc_psc(interact,0,1); } } if (firstCH) interact->part1->dir = olddir1; if (secondCH) interact->part2->dir = olddir2; } return repenergy+atrenergy; } /* Determines attractive energy of two spherocylinders type PSC PSC */ double eattractive_psc_psc(struct interacts * interact,int patchnum1,int patchnum2) { int i, intrs; double rcut, atrenergy, ndist; double v1, v2, f0, f1, f2, T1, T2, S1, S2, a; double intersections[5]; struct vector vec1, vec2, vec_intrs, vec_mindist; struct vector vec_sub(struct vector, struct vector); struct vector vec_sum(struct vector, struct vector); struct vector vec_create(double, double, double); struct vector vec_scale(struct vector, double); struct vector vec_perpproject(struct vector *, struct vector*); struct vector mindist_segments(struct vector, double, struct vector, double, struct vector); void normalise(struct vector *); int psc_intersect(struct particles *, struct particles *, double, double, struct vector, double *,double, struct ia_param *, int which, int patchnum); double fanglscale(double, struct ia_param *, int which); rcut = interact->param->rcut; //interact->halfl = interact->param->half_len[0]; //DEBUG_SIM("halfl = %lf", interact->halfl); for(i=0;i<5;i++) intersections[i]=0; //cospatch = param.pcanglsw; //cospatchinr = param.pcangl; /*1- do intersections of spherocylinder2 with patch of spherocylinder1 at. cut distance C*/ //DEBUG_SIM("first intersection"); intrs=psc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1); if (intrs <2){ //DEBUG_SIM("No intersection :("); return 0.0; /*sc is all outside patch, attractive energy is 0*/ } T1=intersections[0]; /*points on sc2*/ T2=intersections[1]; /*2- now do the same oposite way psc1 in patch of psc2*/ for(i=0;i<5;i++) intersections[i]=0; //DEBUG_SIM("get vector"); vec1=vec_scale(interact->r_cm,-1.0); //DEBUG_SIM("second intersection"); intrs=psc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2); if (intrs <2) return 0.0; /*sc is all outside patch, attractive energy is 0*/ S1=intersections[0]; /*points on sc1*/ S2=intersections[1]; /*3- scaling function1: dependence on the length of intersetions*/ v1=fabs(S1-S2); v2=fabs(T1-T2); f0=0.5*(v1+v2); /*4a- with two intersection pices calculate vector between their CM -this is for angular orientation*/ vec1=vec_scale(interact->part1->dir,(S1+S2)*0.5); vec2=vec_scale(interact->part2->dir,(T1+T2)*0.5); vec_intrs.x=vec2.x-vec1.x-interact->r_cm.x; vec_intrs.y=vec2.y-vec1.y-interact->r_cm.y; vec_intrs.z=vec2.z-vec1.z-interact->r_cm.z; /*vec_intrs should be from sc1 to sc2*/ //fprintf (stderr, "segments_CM: %.8f %.8f %.8f \n",vec_intrs.x,vec_intrs.y,vec_intrs.z); /*4b - calculate closest distance attractive energy from it*/ vec_mindist = mindist_segments(interact->part1->dir,v1,interact->part2->dir,v2,vec_intrs); //fprintf (stderr, "segments closest dist: %.8f %.8f %.8f \n",vec_mindist.x,vec_mindist.y,vec_mindist.z); ndist=sqrt(DOT(vec_mindist,vec_mindist)); //dist=DOT(vec_intrs,vec_intrs); if (ndist < interact->param->pdis) atrenergy = -interact->param->epsilon; //atrenergy = -1.0; else { atrenergy = cos(PIH*(ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*5- scaling function2: angular dependence of patch1*/ vec1=vec_scale(vec_intrs,1.0); //vec1=vec_scale(vec_mindist,-1.0); vec1=vec_perpproject(&vec1, &interact->part1->dir); normalise(&vec1); a = DOT(vec1,interact->part1->patchdir[patchnum1]); f1 = fanglscale(a,interact->param, 0+2*patchnum1); /*6- scaling function3: angular dependence of patch2*/ vec1=vec_scale(vec_intrs,-1.0); //vec1=vec_scale(vec_mindist,1.0); vec1=vec_perpproject(&vec1, &interact->part2->dir); normalise(&vec1); a = DOT(vec1,interact->part2->patchdir[patchnum2]); f2 = fanglscale(a,interact->param, 1+2*patchnum2); //printf("v1: %f v2: %f f0: %f f1: %f f2: %f ener: %f\n",v1,v2,f0,f1,f2,atrenergy); /*7- put it all together*/ atrenergy *=f0*f1*f2; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); // fprintf (stderr, "attraction %.8f \n",atrenergy); // exit(1); return atrenergy; } /* a = r_ij * n_i */ double fanglscale(double a, struct ia_param * param, int which) { double f; // TODO for different types if (a <= param->pcanglsw[which]) f=0.0; else { if (a >= param->pcangl[which]) f=1.0; else { f = 0.5 - ((param->pcanglsw[which] + param->pcangl[which])*0.5 - a )/(param->pcangl[which] - param->pcanglsw[which]); } } return f; } /*CPSC..............................................................................*/ /* Determines total energy of two spherocylinders of type 3 -cylindrical psc -CPSC */ double e_cpsc_cpsc(struct interacts * interact) { double atrenergy, repenergy; void closestdist(struct interacts *); double erepulsive(struct interacts *); double eattractive_cpsc_cpsc(struct interacts *,int,int); //DEBUG_SIM("do energy 33") ; closestdist(interact); repenergy = erepulsive(interact); //DEBUG_SIM("got the rep. energy"); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { BOOL firstCH=FALSE, secondCH=FALSE; struct vector olddir1 = interact->part1->dir; struct vector olddir2 = interact->part2->dir; if ( (interact->param->geotype[0] == CHCPSC)||(interact->param->geotype[0] == TCHCPSC) ) firstCH = TRUE; if ( (interact->param->geotype[1] == CHCPSC)||(interact->param->geotype[1] == TCHCPSC) ) secondCH = TRUE; if(firstCH) interact->part1->dir = interact->part1->chdir[0]; if(secondCH) interact->part2->dir = interact->part2->chdir[0]; if ((firstCH) || (secondCH) ) { closestdist(interact); } atrenergy = eattractive_cpsc_cpsc(interact,0,0); /*addition of interaction of second patches*/ if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) || (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ) { BOOL firstT=FALSE, secondT=FALSE; if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) ) firstT = TRUE; if ( (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ) secondT = TRUE; if (firstT) { if (firstCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } atrenergy += eattractive_cpsc_cpsc(interact,1,0); } if ( (firstT) && (secondT) ) { if (secondCH) { interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } atrenergy += eattractive_cpsc_cpsc(interact,1,1); } if (secondT) { if (firstT && firstCH ) { interact->part1->dir = interact->part1->chdir[0]; closestdist(interact); } atrenergy += eattractive_cpsc_cpsc(interact,0,1); } } if (firstCH) interact->part1->dir = olddir1; if (secondCH) interact->part2->dir = olddir2; } return repenergy+atrenergy; } /* Determines attractive energy of two spherocylinders of type 3 -cylindrical psc -CPSC */ double eattractive_cpsc_cpsc(struct interacts * interact, int patchnum1, int patchnum2) { int i, intrs; double rcut, atrenergy, v1, v2, f0, f1, f2, T1, T2, S1, S2, a, ndist; double intersections[5]; struct vector vec1, vec2, vec_intrs, vec_mindist; struct vector vec_sub(struct vector, struct vector); struct vector vec_sum(struct vector, struct vector); struct vector vec_create(double, double, double); struct vector vec_scale(struct vector, double); struct vector vec_perpproject(struct vector*, struct vector*); struct vector mindist_segments(struct vector, double, struct vector, double, struct vector); void normalise(struct vector *); int cpsc_intersect(struct particles *, struct particles *, double, double, struct vector, double *,double, struct ia_param *, int which, int patchnum); double fanglscale(double, struct ia_param *, int which); rcut = interact->param->rcut; // interact->halfl = interact->param->half_len[0]; for(i=0;i<5;i++) intersections[i]=0; /*1- do intersections of spherocylinder2 with patch of spherocylinder1 at. cut distance C*/ intrs=cpsc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1); if (intrs <2) return 0.0; /*sc is all outside patch, attractive energy is 0*/ T1=intersections[0]; /*points on sc2*/ T2=intersections[1]; /*2- now do the same oposite way psc1 in patch of psc2*/ for(i=0;i<5;i++) intersections[i]=0; vec1=vec_scale(interact->r_cm,-1.0); intrs=cpsc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2); if (intrs <2) return 0.0; /*sc is all outside patch, attractive energy is 0*/ S1=intersections[0]; /*points on sc1*/ S2=intersections[1]; /*3- scaling function1: dependence on the length of intersetions*/ v1=fabs(S1-S2); v2=fabs(T1-T2); f0=0.5*(v1+v2); /*4a- with two intersection pices calculate vector between their CM -this is for angular orientation*/ vec1=vec_scale(interact->part1->dir,(S1+S2)*0.5); vec2=vec_scale(interact->part2->dir,(T1+T2)*0.5); vec_intrs.x=vec2.x-vec1.x-interact->r_cm.x; vec_intrs.y=vec2.y-vec1.y-interact->r_cm.y; vec_intrs.z=vec2.z-vec1.z-interact->r_cm.z; /*vec_intrs should be from sc1 to sc2*/ // fprintf (stderr, "segments_CM: %.8f %.8f %.8f \n",vec_intrs.x,vec_intrs.y,vec_intrs.z); /*4b - calculate closest distance attractive energy from it*/ vec_mindist = mindist_segments(interact->part1->dir,v1,interact->part2->dir,v2,vec_intrs); // fprintf (stderr, "segments closest dist: %.8f %.8f %.8f \n",vec_mindist.x,vec_mindist.y,vec_mindist.z); ndist=sqrt(DOT(vec_mindist,vec_mindist)); //dist=DOT(vec_intrs,vec_intrs); if (ndist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy = cos(PIH*(ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*5- scaling function2: angular dependence of patch1*/ vec1=vec_scale(vec_intrs,1.0); //vec1=vec_scale(vec_mindist,-1.0); vec1=vec_perpproject(&vec1, &interact->part1->dir); normalise(&vec1); a = DOT(vec1,interact->part1->patchdir[patchnum1]); f1 = fanglscale(a,interact->param, 0+2*patchnum1); /*6- scaling function3: angular dependence of patch2*/ vec1=vec_scale(vec_intrs,-1.0); //vec1=vec_scale(vec_mindist,1.0); vec1=vec_perpproject(&vec1, &interact->part2->dir); normalise(&vec1); a = DOT(vec1,interact->part2->patchdir[patchnum2]); f2 = fanglscale(a,interact->param, 1+2*patchnum2); /*7- put it all together*/ atrenergy *=f0*f1*f2; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); // fprintf (stderr, "attraction %.8f \n",atrenergy); // exit(1); return atrenergy; } /*..............................................................................*/ /* Determines total energy of spherocylinders type PSC and CPSC */ double e_psc_cpsc(struct interacts * interact) { double atrenergy, repenergy; void closestdist(struct interacts *); double erepulsive(struct interacts *); double eattractive_psc_cpsc(struct interacts *,int,int); //DEBUG_SIM("do energy 23") ; closestdist(interact); repenergy = erepulsive(interact); //DEBUG_SIM("got the rep. energy"); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { BOOL firstCH=FALSE, secondCH=FALSE; struct vector olddir1 = interact->part1->dir; struct vector olddir2 = interact->part2->dir; if ((interact->param->geotype[0] == CHPSC) || (interact->param->geotype[0] == CHCPSC)|| (interact->param->geotype[0] == TCHPSC) || (interact->param->geotype[0] == TCHCPSC) ) firstCH = TRUE; if ((interact->param->geotype[1] == CHPSC) || (interact->param->geotype[1] == CHCPSC)|| (interact->param->geotype[1] == TCHPSC) || (interact->param->geotype[1] == TCHCPSC) ) secondCH = TRUE; if(firstCH) interact->part1->dir = interact->part1->chdir[0]; if(secondCH) interact->part2->dir = interact->part2->chdir[0]; if ((firstCH) || (secondCH) ) { closestdist(interact); } atrenergy = eattractive_psc_cpsc(interact,0,0); /*addition of interaction of second patches*/ if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) || (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) || (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) || (interact->param->geotype[1] == TPSC) || (interact->param->geotype[1] == TCHPSC) ) { BOOL firstT=FALSE, secondT=FALSE; if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) || (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) ) firstT = TRUE; if ( (interact->param->geotype[1] == TCPSC) || (interact->param->geotype[1] == TCHCPSC) || (interact->param->geotype[1] == TPSC) || (interact->param->geotype[1] == TCHPSC) ) secondT = TRUE; if (firstT) { if (firstCH && secondCH) { interact->part1->dir = interact->part1->chdir[1]; interact->part2->dir = interact->part2->chdir[0]; closestdist(interact); } if (firstCH && !secondCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } if (!firstCH && secondCH) { interact->part2->dir = interact->part2->chdir[0]; closestdist(interact); } atrenergy += eattractive_psc_cpsc(interact,1,0); } if ( (firstT) && (secondT) ) { if (firstCH && secondCH) { interact->part1->dir = interact->part1->chdir[1]; interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } if (firstCH && !secondCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } if (!firstCH && secondCH) { interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } atrenergy += eattractive_psc_cpsc(interact,1,1); } if (secondT) { if (firstCH && secondCH) { interact->part1->dir = interact->part1->chdir[0]; interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } if (firstCH && !secondCH) { interact->part1->dir = interact->part1->chdir[0]; interact->part2->dir = olddir2; closestdist(interact); } if (!firstCH && secondCH) { interact->part1->dir = olddir1; interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } atrenergy += eattractive_psc_cpsc(interact,0,1); } } if (firstCH) interact->part1->dir = olddir1; if (secondCH) interact->part2->dir = olddir2; } return repenergy+atrenergy; } /* Determines attractive energy of spherocylinders type PSC and CPSC */ double eattractive_psc_cpsc(struct interacts * interact,int patchnum1,int patchnum2) { int i, intrs; double rcut, atrenergy, ndist; double v1, v2, f0, f1, f2, T1, T2, S1, S2, a; double intersections[5]; struct vector vec1, vec2, vec_intrs, vec_mindist; struct vector vec_sub(struct vector, struct vector); struct vector vec_sum(struct vector, struct vector); struct vector vec_create(double, double, double); struct vector vec_scale(struct vector, double); struct vector vec_perpproject(struct vector*, struct vector*); struct vector mindist_segments(struct vector, double, struct vector, double, struct vector); void normalise(struct vector *); int psc_intersect(struct particles *, struct particles *, double, double, struct vector, double *,double, struct ia_param *, int which,int patchnum); int cpsc_intersect(struct particles *, struct particles *, double, double, struct vector, double *,double, struct ia_param *, int which,int patchnum); double fanglscale(double, struct ia_param *, int which); rcut = interact->param->rcut; //interact->halfl = interact->param->half_len[0]; //DEBUG_SIM("halfl = %lf", interact->halfl); for(i=0;i<5;i++) intersections[i]=0; BOOL first; if ( (interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)||(interact->param->geotype[0] == TPSC)||(interact->param->geotype[0] == TCHPSC) ){ first = TRUE; } else { first = FALSE; } //cospatch = param.pcanglsw; //cospatchinr = param.pcangl; /*1- do intersections of spherocylinder2 with patch of spherocylinder1 at. cut distance C*/ //DEBUG_SIM("first intersection"); if (first) { intrs=psc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1); } else { intrs=cpsc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1); } //DEBUG_SIM("first intersection: done"); if (intrs <2){ //DEBUG_SIM("No intersection :("); return 0.0; /*sc is all outside patch, attractive energy is 0*/ } T1=intersections[0]; /*points on sc2*/ T2=intersections[1]; /*2- now do the same oposite way psc1 in patch of psc2*/ for(i=0;i<5;i++) intersections[i]=0; //DEBUG_SIM("get vector"); vec1=vec_scale(interact->r_cm,-1.0); //DEBUG_SIM("second intersection"); if (first) { intrs=cpsc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2); } else { intrs=psc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2); } if (intrs <2) return 0.0; /*sc is all outside patch, attractive energy is 0*/ S1=intersections[0]; /*points on sc1*/ S2=intersections[1]; /*3- scaling function1: dependence on the length of intersetions*/ v1=fabs(S1-S2); v2=fabs(T1-T2); f0=0.5*(v1+v2); /*4a- with two intersection pices calculate vector between their CM -this is for angular orientation*/ vec1=vec_scale(interact->part1->dir,(S1+S2)*0.5); vec2=vec_scale(interact->part2->dir,(T1+T2)*0.5); vec_intrs.x=vec2.x-vec1.x-interact->r_cm.x; vec_intrs.y=vec2.y-vec1.y-interact->r_cm.y; vec_intrs.z=vec2.z-vec1.z-interact->r_cm.z; /*vec_intrs should be from sc1 to sc2*/ // fprintf (stderr, "segments_CM: %.8f %.8f %.8f \n",vec_intrs.x,vec_intrs.y,vec_intrs.z); /*4b - calculate closest distance attractive energy from it*/ vec_mindist = mindist_segments(interact->part1->dir,v1,interact->part2->dir,v2,vec_intrs); // fprintf (stderr, "segments closest dist: %.8f %.8f %.8f \n",vec_mindist.x,vec_mindist.y,vec_mindist.z); ndist=sqrt(DOT(vec_mindist,vec_mindist)); //dist=DOT(vec_intrs,vec_intrs); if (ndist < interact->param->pdis) atrenergy = -interact->param->epsilon; //atrenergy = -1.0; else { atrenergy = cos(PIH*(ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*5- scaling function2: angular dependence of patch1*/ vec1=vec_scale(vec_intrs,1.0); //vec1=vec_scale(vec_mindist,-1.0); vec1=vec_perpproject(&vec1, &interact->part1->dir); normalise(&vec1); a = DOT(vec1,interact->part1->patchdir[patchnum1]); f1 = fanglscale(a,interact->param, 0+2*patchnum1); /*6- scaling function3: angular dependence of patch2*/ vec1=vec_scale(vec_intrs,-1.0); //vec1=vec_scale(vec_mindist,1.0); vec1=vec_perpproject(&vec1, &interact->part2->dir); normalise(&vec1); a = DOT(vec1,interact->part2->patchdir[patchnum2]); f2 = fanglscale(a,interact->param, 1+2*patchnum2); /*7- put it all together*/ atrenergy *=f0*f1*f2; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); // fprintf (stderr, "attraction %.8f \n",atrenergy); // exit(1); return atrenergy; } /*..............................................................................*/ /* * Determines total energy of spherocylinder type 1 and sphere type 11 */ double e_spa_sca(struct interacts * interact) { double atrenergy, repenergy, b, f0, halfl; struct vector vec_perpproject(struct vector *, struct vector *); void normalise(struct vector *); void closestdist(struct interacts *); double erepulsive(struct interacts *); double fanglscale(double, struct ia_param *, int which); //DEBUG printf ("do energy 111 \n\n"); closestdist(interact); repenergy = erepulsive(interact); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { /*calculate closest distance attractive energy*/ if (interact->dist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*scaling function for the length of spherocylinder within cutoff*/ if (interact->param->geotype [0] < SP) halfl = interact->param->half_len[0]; else halfl = interact->param->half_len[1]; b = sqrt(interact->param->rcut*interact->param->rcut-interact->dist*interact->dist); if ( interact->contt + b > halfl ) f0 = halfl; else f0 = interact->contt + b; if ( interact->contt - b < -halfl ) f0 -= -halfl; else f0 -= interact->contt - b; atrenergy *= f0; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); //fprintf (stderr, "attraction211 %.8f x: %.8f y: %.8f z: %.8f \n",atrenergy,vec1.x,vec1.y,vec1.z); //exit(1); } return repenergy+atrenergy; } /*..............................................................................*/ /* * Determines total energy of spherocylinder type 2 and sphere type 11 */ double e_psc_spa(struct interacts * interact) { double atrenergy = 0.0, repenergy; void closestdist(struct interacts *); double erepulsive(struct interacts *); double eattractive_psc_spa(struct interacts *, int); //DEBUG_SIM("do energy 211") ; closestdist(interact); repenergy = erepulsive(interact); //DEBUG_SIM("got the rep. energy"); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { BOOL firstCH=FALSE, secondCH=FALSE; struct vector olddir1 = interact->part1->dir; struct vector olddir2 = interact->part2->dir; if ( (interact->param->geotype[0] == CHPSC) || (interact->param->geotype[0] == TCHPSC) ) firstCH = TRUE; if ( (interact->param->geotype[1] == CHPSC) || (interact->param->geotype[1] == TCHPSC) ) secondCH = TRUE; if(firstCH) interact->part1->dir = interact->part1->chdir[0]; if(secondCH) interact->part2->dir = interact->part2->chdir[0]; if ((firstCH) || (secondCH) ) { closestdist(interact); } if(interact->dist < interact->param->rcut) atrenergy = eattractive_psc_spa(interact,0); //addition of interaction of second patches if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) || (interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) ) { BOOL firstT=FALSE, secondT=FALSE; if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) ) firstT = TRUE; if ( (interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) ) secondT = TRUE; if (firstT) { if (firstCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } if(interact->dist < interact->param->rcut) atrenergy += eattractive_psc_spa(interact,1); } if (secondT) { if(secondCH) { interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } if(interact->dist < interact->param->rcut) atrenergy += eattractive_psc_spa(interact,1); } if ( (firstT) && (secondT) ) { fprintf (stderr, "ERROR PSC should interact s SPA but got two PSC \n"); exit(1); } } interact->part1->dir = olddir1; interact->part2->dir = olddir2; } return repenergy+atrenergy; } /* * Determines attractive energy of spherocylinder type 2 and sphere type 11 */ double eattractive_psc_spa(struct interacts * interact, int patchnum1) { double atrenergy, a, b, f0, halfl; struct vector vec1; struct vector vec_perpproject(struct vector *, struct vector*); void normalise(struct vector *); double fanglscale(double, struct ia_param *, int which); int which; /*calculate closest distance attractive energy*/ if (interact->dist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*scaling function: angular dependence of patch1*/ if (interact->param->geotype[0] < SP) { which = 0; vec1=vec_perpproject(&interact->distvec, &interact->part1->dir); normalise(&vec1); a = DOT(vec1,interact->part1->patchdir[patchnum1]); halfl=interact->param->half_len[0]; } else { which = 1; vec1=vec_perpproject(&interact->distvec, &interact->part2->dir); normalise(&vec1); a = DOT(vec1,interact->part2->patchdir[patchnum1]); halfl=interact->param->half_len[1]; } // caling function for the length of spherocylinder within cutoff b = sqrt(interact->param->rcut*interact->param->rcut - interact->dist*interact->dist); if ( interact->contt + b > halfl ) f0 = halfl; else f0 = interact->contt + b; if ( interact->contt - b < -halfl ) f0 -= -halfl; else f0 -= interact->contt - b; atrenergy *= fanglscale(a,interact->param, which)*f0; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); //fprintf (stderr, "attraction211 %.8f x: %.8f y: %.8f z: %.8f \n",atrenergy,vec1.x,vec1.y,vec1.z); //exit(1); return atrenergy; } /*..............................................................................*/ /* Determines total energy of spherocylinder type 3 and sphere type 11 */ double e_cpsc_spa(struct interacts * interact) { double atrenergy, repenergy, halfl; void closestdist(struct interacts *); double erepulsive(struct interacts *); double eattractive_cpsc_spa(struct interacts *,int); //DEBUG_SIM("do energy 311") ; closestdist(interact); repenergy = erepulsive(interact); //DEBUG_SIM("got the rep. energy"); if (interact->param->geotype[0] < SP) { halfl=interact->param->half_len[0]; } else { halfl=interact->param->half_len[1]; } if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) || ( interact->dist > interact->param->rcut ) || (interact->contt > halfl) || (interact->contt < -halfl) ) atrenergy = 0.0; else { BOOL firstCH=FALSE, secondCH=FALSE; struct vector olddir1 = interact->part1->dir; struct vector olddir2 = interact->part2->dir; if ( (interact->param->geotype[0] == CHCPSC) || (interact->param->geotype[0] == TCHCPSC) ) firstCH = TRUE; if ( (interact->param->geotype[1] == CHCPSC) || (interact->param->geotype[1] == TCHCPSC) ) secondCH = TRUE; if(firstCH) interact->part1->dir = interact->part1->chdir[0]; if(secondCH) interact->part2->dir = interact->part2->chdir[0]; if ((firstCH) || (secondCH) ) { closestdist(interact); } if(interact->dist < interact->param->rcut) atrenergy = eattractive_cpsc_spa(interact,0); /*addition of interaction of second patches*/ if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) || (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ) { BOOL firstT=FALSE, secondT=FALSE; if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) ) firstT = TRUE; if ( (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ) secondT = TRUE; if (firstT) { if (firstCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } if(interact->dist < interact->param->rcut) atrenergy += eattractive_cpsc_spa(interact,1); } if (secondT) { if(secondCH) { interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } if(interact->dist < interact->param->rcut) atrenergy += eattractive_cpsc_spa(interact,1); } if ( (firstT) && (secondT) ) { fprintf (stderr, "ERROR PSC should interact s SPA but got two PSC \n"); exit(1); } } if (firstCH) interact->part1->dir = olddir1; if (secondCH) interact->part2->dir = olddir2; } return repenergy+atrenergy; } /* Determines attractive energy of spherocylinder type 3 and sphere type 11 */ double eattractive_cpsc_spa(struct interacts * interact,int patchnum1) { double atrenergy, a, b, f0, halfl; struct vector vec1; int which; struct vector vec_perpproject(struct vector *, struct vector*); void normalise(struct vector *); double fanglscale(double, struct ia_param *, int which); /*if it is in cylindrical part c>-halfl and c<halfl*/ /*calculate closest distance attractive energy*/ if (interact->dist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*scaling function: angular dependence of patch1*/ if (interact->param->geotype[0] < SP) { which = 0; vec1=vec_perpproject(&interact->distvec, &interact->part1->dir); normalise(&vec1); a = DOT(vec1,interact->part1->patchdir[patchnum1]); halfl = interact->param->half_len[0]; } else { which = 1; vec1=vec_perpproject(&interact->distvec, &interact->part2->dir); normalise(&vec1); a = DOT(vec1,interact->part2->patchdir[patchnum1]); halfl = interact->param->half_len[1]; } /*scaling function for the length of spherocylinder within cutoff*/ b = sqrt(interact->param->rcut*interact->param->rcut-interact->dist*interact->dist); if ( interact->contt + b > halfl ) f0 = halfl; else f0 = interact->contt + b; if ( interact->contt - b < -halfl ) f0 -= -halfl; else f0 -= interact->contt - b; atrenergy *= fanglscale(a,interact->param, which)*f0; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); //fprintf (stderr, "attraction311 %.8f a: %.8f\n",atrenergy,a); //exit(1); return atrenergy; } /*..............................................................................*/ /* Determines total energy of two spherocylinders type 11 */ double e_2sca_or_2spa(struct interacts * interact) { double repenergy, atrenergy; double erepulsive(struct interacts *); void closestdist(struct interacts *); closestdist(interact); repenergy = erepulsive(interact); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { if (interact->dist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } } return repenergy+atrenergy; } /*..............................................................................*/ /* Determines total energy with purely repulsive types */ double e_spn_or_scn(struct interacts * interact) { double repenergy; double erepulsive(struct interacts *); void closestdist(struct interacts *); closestdist(interact); repenergy = erepulsive(interact); return repenergy; } /*..............................................................................*/ /* Determines repulsive energy of two spherocylinders */ double erepulsive(struct interacts * interact) { double repenergy, en6; /* WCA repulsion */ if (interact->dist > interact->param->rcutwca) repenergy = 0.0; else { en6 = pow((interact->param->sigma/interact->dist),6); repenergy = interact->param->epsilon * ( 4*en6*(en6-1) + 1.0); } //int Digs = 20; //printf("dist: %.*e, repenergy: %.*e\n",Digs, interact->dist, Digs, repenergy); return repenergy; } /*..............................................................................*/ /* Indicates not yet programmed interaction */ double enoexist(struct interacts * interact) { double energy=0.0; fprintf (stderr, "ERROR: We have not programed interaction of types %d and %d\n", interact->part1->type,interact->part2->type); exit (1); return energy; } /* function for calculation of harmonic potential*/ double harmonic(double aktualvalue, double eqvalue, double springconst) { return springconst*(aktualvalue-eqvalue)*(aktualvalue-eqvalue)*0.5; } /*..............................................................................*/ /* Determines bond energy */ double bondenergy(long num1, long num2, struct interacts * interact, struct topo * topo, struct conf * conf) { double energy=0.0, bondlength, halfl; struct vector vec1, vec2, vecbond; int * geotype = interact->param->geotype; struct vector image(struct vector, struct vector, struct vector); double harmonic(double, double, double); /*interaction with nearest neighbours -harmonic*/ if ((topo->chainparam[conf->particle[num1].chaint]).bond1c >= 0) { if (num2 == topo->conlist[num1][1]) { /*num1 is connected to num2 by tail*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c); else { if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec1.x=conf->particle[num1].pos.x - conf->particle[num1].dir.x * halfl /conf->box.x; vec1.y=conf->particle[num1].pos.y - conf->particle[num1].dir.y * halfl /conf->box.y; vec1.z=conf->particle[num1].pos.z - conf->particle[num1].dir.z * halfl /conf->box.z; if (geotype[1] < SP) halfl=interact->param->half_len[1]; else halfl = 0.0; vec2.x=conf->particle[num2].pos.x + conf->particle[num2].dir.x * halfl /conf->box.x; vec2.y=conf->particle[num2].pos.y + conf->particle[num2].dir.y * halfl /conf->box.y; vec2.z=conf->particle[num2].pos.z + conf->particle[num2].dir.z * halfl /conf->box.z; vecbond = image(vec1, vec2, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c); } } else { if (num2 == topo->conlist[num1][0]) { /*num1 is connected to num2 by head*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c); else { if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec1.x=conf->particle[num1].pos.x + conf->particle[num1].dir.x * halfl /conf->box.x; vec1.y=conf->particle[num1].pos.y + conf->particle[num1].dir.y * halfl /conf->box.y; vec1.z=conf->particle[num1].pos.z + conf->particle[num1].dir.z * halfl /conf->box.z; if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec2.x=conf->particle[num2].pos.x - conf->particle[num2].dir.x * halfl /conf->box.x; vec2.y=conf->particle[num2].pos.y - conf->particle[num2].dir.y * halfl /conf->box.y; vec2.z=conf->particle[num2].pos.z - conf->particle[num2].dir.z * halfl /conf->box.z; vecbond = image(vec1, vec2, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c); } } } } /*interaction with second nearest neighbours -harmonic*/ if (topo->chainparam[conf->particle[num1].chaint].bond2c >= 0) { if (num2 == topo->conlist[num1][2]) { /*num1 is connected to num2 by tail*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c); else { vecbond = image(conf->particle[num1].pos, conf->particle[num2].pos, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c); } } else { if (num2 == topo->conlist[num1][3]) { /*num1 is connected to num2 by head*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c); else { vecbond = image(conf->particle[num1].pos, conf->particle[num2].pos, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c); } } } } /*interaction with nearest neighbours - direct harmonic bond*/ if ((topo->chainparam[conf->particle[num1].chaint]).bonddc > 0) { if (num2 == topo->conlist[num1][1]) { /*num1 is connected to num2 by tail*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bonddeq,topo->chainparam[conf->particle[num1].chaint].bonddc); else { if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec1.x=conf->particle[num1].pos.x - conf->particle[num1].dir.x * halfl /conf->box.x; vec1.y=conf->particle[num1].pos.y - conf->particle[num1].dir.y * halfl /conf->box.y; vec1.z=conf->particle[num1].pos.z - conf->particle[num1].dir.z * halfl /conf->box.z; if (geotype[1] < SP) halfl=interact->param->half_len[1]; else halfl = 0.0; vec2.x=conf->particle[num2].pos.x + conf->particle[num2].dir.x * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.x ; vec2.y=conf->particle[num2].pos.y + conf->particle[num2].dir.y * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.y ; vec2.z=conf->particle[num2].pos.z + conf->particle[num2].dir.z * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.z ; vecbond = image(vec1, vec2, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,0.0,topo->chainparam[conf->particle[num1].chaint].bonddc); } } else { if (num2 == topo->conlist[num1][0]) { /*num1 is connected to num2 by head*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c); else { if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec1.x=conf->particle[num1].pos.x + conf->particle[num1].dir.x * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.x ; vec1.y=conf->particle[num1].pos.y + conf->particle[num1].dir.y * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.y ; vec1.z=conf->particle[num1].pos.z + conf->particle[num1].dir.z * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.z ; if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec2.x=conf->particle[num2].pos.x - conf->particle[num2].dir.x * halfl /conf->box.x; vec2.y=conf->particle[num2].pos.y - conf->particle[num2].dir.y * halfl /conf->box.y; vec2.z=conf->particle[num2].pos.z - conf->particle[num2].dir.z * halfl /conf->box.z; vecbond = image(vec1, vec2, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,0.0,topo->chainparam[conf->particle[num1].chaint].bonddc); } } } } //printf("bondlength: %f\n",bondlength); // printf("bondener: %f\n",energy); return energy; } /*..............................................................................*/ /* Determines angle energy between spherocylinders */ double angleenergy(long num1, long num2, struct interacts * interact, struct topo * topo, struct conf * conf) { double energy=0.0, currangle, halfl; struct vector vec1, vec2; int * geotype = interact->param->geotype; struct vector image(struct vector, struct vector, struct vector); void normalise(struct vector *); double harmonic(double, double, double); /*angle interaction with nearest neighbours -harmonic*/ if ((topo->chainparam[conf->particle[num1].chaint]).angle1c >= 0) { if (num2 == topo->conlist[num1][0]) { /*num1 is connected to num2 by tail*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) /*spheres do not have this interaction*/ energy += 0.0; else { if (geotype[0] < SP) vec1 = conf->particle[num1].dir; else { halfl=interact->param->half_len[1]; //sphere angle is defined versus the end of spherocylinder vec1.x=conf->particle[num2].pos.x - conf->particle[num2].dir.x * halfl /conf->box.x; vec1.y=conf->particle[num2].pos.y - conf->particle[num2].dir.y * halfl /conf->box.y; vec1.z=conf->particle[num2].pos.z - conf->particle[num2].dir.z * halfl /conf->box.z; vec1 = image(vec1, conf->particle[num1].pos, conf->box); } if (geotype[1] < SP) vec2 = conf->particle[num2].dir; else { halfl=interact->param->half_len[0]; vec2.x=conf->particle[num1].pos.x + conf->particle[num1].dir.x * halfl /conf->box.x; vec2.y=conf->particle[num1].pos.y + conf->particle[num1].dir.y * halfl /conf->box.y; vec2.z=conf->particle[num1].pos.z + conf->particle[num1].dir.z * halfl /conf->box.z; vec2 = image(vec2, conf->particle[num2].pos, conf->box); } normalise(&vec1); normalise(&vec2); currangle = acos(DOT(vec1,vec2)); energy += harmonic(currangle,topo->chainparam[conf->particle[num1].chaint].angle1eq,topo->chainparam[conf->particle[num1].chaint].angle1c); } } else { if (num2 == topo->conlist[num1][1]) { /*num1 is connected to num2 by head*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) /*spheres do not have this interaction*/ energy += 0.0; else { if (geotype[0] < SP) vec1 = conf->particle[num1].dir; else { halfl=interact->param->half_len[1]; //sphere angle is defined versus the end of spherocylinder vec1.x=conf->particle[num2].pos.x + conf->particle[num2].dir.x * halfl /conf->box.x; vec1.y=conf->particle[num2].pos.y + conf->particle[num2].dir.y * halfl /conf->box.y; vec1.z=conf->particle[num2].pos.z + conf->particle[num2].dir.z * halfl /conf->box.z; vec1 = image(vec1, conf->particle[num1].pos, conf->box); } if (geotype[1] < SP) vec2 = conf->particle[num2].dir; else { halfl=interact->param->half_len[0]; vec2.x=conf->particle[num1].pos.x - conf->particle[num1].dir.x * halfl /conf->box.x; vec2.y=conf->particle[num1].pos.y - conf->particle[num1].dir.y * halfl /conf->box.y; vec2.z=conf->particle[num1].pos.z - conf->particle[num1].dir.z * halfl /conf->box.z; vec2 = image(vec2, conf->particle[num2].pos, conf->box); } normalise(&vec1); normalise(&vec2); currangle = acos(DOT(vec1,vec2)); energy += harmonic(currangle,topo->chainparam[conf->particle[num2].chaint].angle1eq,topo->chainparam[conf->particle[num2].chaint].angle1c); } } } } /*interaction between the orientation of spherocylinders patches -harmonic*/ if (topo->chainparam[conf->particle[num1].chaint].angle2c >= 0) { if (num2 == topo->conlist[num1][0]) { /*num1 is connected to num2 by tail*/ if ( (geotype[0] < SP) && (geotype[1] < SP) ) { currangle = acos(DOT(conf->particle[num1].patchdir[0],conf->particle[num2].patchdir[0]) - DOT(conf->particle[num1].dir,conf->particle[num2].patchdir[0]) ); energy += harmonic(currangle,topo->chainparam[conf->particle[num1].chaint].angle2eq,topo->chainparam[conf->particle[num1].chaint].angle2c); } else { energy += 0.0; } } else { if (num2 == topo->conlist[num1][1]) { /*num1 is connected to num2 by head*/ if ( (geotype[0] < SP) && (geotype[1] < SP) ) { currangle = acos(DOT(conf->particle[num2].patchdir[0],conf->particle[num1].patchdir[0]) - DOT(conf->particle[num2].dir,conf->particle[num1].patchdir[0]) ); energy += harmonic(currangle,topo->chainparam[conf->particle[num2].chaint].angle2eq,topo->chainparam[conf->particle[num2].chaint].angle2c); } else { energy += 0.0; } } } } // printf("angleener: %f\n",energy); return energy; } /* cluses distance calculation*/ void closestdist(struct interacts * interact) { double c, d, halfl; struct vector mindist_segments(struct vector dir1, double halfl1, struct vector dir2, double halfl2, struct vector r_cm); double linemin(double, double); //printf("we have %d %d ",interact->param->geotype[0],interact->param->geotype[1] ); if ((interact->param->geotype[0] >= SP) && (interact->param->geotype[1] >= SP)) { /*we have two spheres - most common, do nothing*/ //printf("we have two spheres "); interact->distvec = interact->r_cm; interact->dist = sqrt(interact->dotrcm); interact->distcm = interact->dist; } else { if ((interact->param->geotype[0] < SP) && (interact->param->geotype[1] < SP)) { /*we have two spherocylinders*/ interact->distvec = mindist_segments(interact->part1->dir,interact->param->half_len[0], interact->part2->dir, interact->param->half_len[1], interact->r_cm); interact->dist=sqrt(DOT(interact->distvec,interact->distvec)); } else { if (interact->param->geotype[0] < SP) { /*We have one spherocylinder -it is first one*/ halfl=interact->param->half_len[0];/*finding closest vector from sphyrocylinder to sphere*/ c = DOT(interact->part1->dir,interact->r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } interact->contt = c; interact->distvec.x = - interact->r_cm.x + interact->part1->dir.x * d; interact->distvec.y = - interact->r_cm.y + interact->part1->dir.y * d; interact->distvec.z = - interact->r_cm.z + interact->part1->dir.z * d; interact->dist=sqrt(DOT(interact->distvec,interact->distvec)); } else { /*lst option first one is sphere second one spherocylinder*/ halfl=interact->param->half_len[1]; /*finding closest vector from sphyrocylinder to sphere*/ c = DOT(interact->part2->dir,interact->r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } interact->contt = -c; interact->distvec.x = interact->r_cm.x - interact->part2->dir.x * d; interact->distvec.y = interact->r_cm.y - interact->part2->dir.y * d; interact->distvec.z = interact->r_cm.z - interact->part2->dir.z * d; interact->dist=sqrt(DOT(interact->distvec,interact->distvec)); } } } } /*..............................................................................*/ /* Determines energy of two particles */ double paire(long num1, long num2, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct conf * conf) { double energy=0.0; /* energy*/ struct vector r_cm; /* Vector between centres of mass from part2 to part1*/ struct interacts interact; /*interaction parameters*/ double bondenergy(long, long, struct interacts *, struct topo * topo, struct conf * conf); double angleenergy(long, long, struct interacts *, struct topo * topo, struct conf * conf); /*Placing interactin particle in unit box and finding vector connecting CM*/ /*r_cm = image(part1.pos, part2.pos, box); explicit statement below for performance optimization*/ r_cm.x = conf->particle[num1].pos.x - conf->particle[num2].pos.x; r_cm.y = conf->particle[num1].pos.y - conf->particle[num2].pos.y; r_cm.z = conf->particle[num1].pos.z - conf->particle[num2].pos.z; if ( r_cm.x < 0 ) r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x-0.5) ) ); else r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x+0.5) ) ); if ( r_cm.y < 0 ) r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y-0.5) ) ); else r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y+0.5) ) ); if ( r_cm.z < 0 ) r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z-0.5) ) ); else r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z+0.5) ) ); interact.dotrcm = DOT(r_cm,r_cm); if ( interact.dotrcm > topo->sqmaxcut) return 0.0; /* distance so far that even spherocylinders cannot be within cutoff */ interact.r_cm=r_cm; interact.contt = 0; interact.distvec.x = 0; interact.distvec.y = 0; interact.distvec.z = 0; interact.box = conf->box; interact.part1 = &conf->particle[num1]; interact.part2 = &conf->particle[num2]; interact.param = topo->ia_params[conf->particle[num1].type] + conf->particle[num2].type; if(intfce[conf->particle[num1].type][conf->particle[num2].type] == NULL){ fprintf(stderr, "interaction function for type %d and %d not defined!\n", conf->particle[num1].type, conf->particle[num2].type); } energy = (*intfce[conf->particle[num1].type][conf->particle[num2].type])( &interact); //printf("num: %ld %ld e: %f dist: %f",num1,num2,energy,interact.dist); energy += bondenergy ( num1, num2, &interact, topo, conf); energy += angleenergy ( num1, num2, &interact, topo, conf); //printf(" e: %f\n",energy); return energy; } /*...........................................................................*/ /*Calculates interaction of target particle and external field version 2 calculate projection of spherocylinder in direction of patch and calculate interacting line segment within cutoff */ double extere2 (long target, struct topo * topo, struct conf * conf) { double repenergy=0.0,atrenergy=0.0; /* energy*/ double rcmz; /* z distance between*/ double ndist; /* distance for CM of interacting line segment*/ double interendz; /* z coordinate of interaction end*/ struct interacts interact; /* interaction parameters*/ double orient; double halfl; BOOL positive, orientin; struct vector olddir; struct vector project; /*vector for projection down to plane */ double erepulsive(struct interacts *); // struct vector vec_perpproject(struct vector*, struct vector*); // void normalise(struct vector *); double fanglscale(double, struct ia_param *, int which); void exter2_closestdist(struct interacts * interact, BOOL *positive, BOOL *orientin, double *orient, double *rcmz,double *interendz, struct vector *project); double exter2_atre(struct interacts * interact,int *orientin, double *rcmz, double *interendz, BOOL *positive, double orient,struct vector *project, double *ndist,int, double ); /* calcualte distance to center of mass*/ if ( conf->particle[target].pos.z < 0 ) { rcmz = conf->box.z * (conf->particle[target].pos.z - (double)( (long)(conf->particle[target].pos.z - 0.5) ) ); } else { rcmz = conf->box.z * (conf->particle[target].pos.z - (double)( (long)(conf->particle[target].pos.z + 0.5) ) ); } project.x=0; project.y=0; if (rcmz < 0) { interact.dist = -rcmz; positive = FALSE; interendz = -1.0; project.z = 1.0; } else { interact.dist = rcmz; positive = TRUE; interendz = 1.0; project.z = -1.0; } interact.dotrcm = rcmz * rcmz; if ( interact.dotrcm > topo->exter.sqmaxcut) return 0.0; /* distance so far that even spherocylinders cannot be within cutoff */ interact.distvec.z = interact.r_cm.z; interact.distcm = interact.dist; interact.box = conf->box; interact.part1 = &conf->particle[target]; interact.param = &topo->exter.interactions[conf->particle[target].type]; halfl = 0.5* topo->exter.interactions[conf->particle[target].type].len[0]; ndist = interact.dist; orientin = TRUE; orient = 0.0; exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project); /* now we have closest distance so we can calculate repulsion*/ repenergy = erepulsive(&interact); //printf("dist: %f",interact.dist); /*save chiral stuff*/ olddir = interact.part1->dir; if ((interact.param->geotype[0] == CHCPSC)||(interact.param->geotype[0] == CHPSC)) { interact.part1->dir = interact.part1->chdir[0]; exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project); } if (( interact.dist > interact.param->rcut ) || (interact.param->epsilon == 0.0 ) || ( (interact.part1->patchdir[0].z >0)&&(positive) ) || ( (interact.part1->patchdir[0].z <0)&&(!(positive)) ) ) atrenergy = 0.0; else { atrenergy = exter2_atre(&interact,&orientin,&rcmz,&interendz,&positive,orient,&project,&ndist,0,halfl); } if ((interact.param->geotype[0] == TCPSC)||(interact.param->geotype[0] == TPSC)|| (interact.param->geotype[0] == TCHCPSC)||(interact.param->geotype[0] == TCHPSC)) { if ((interact.param->geotype[0] == TCHCPSC)||(interact.param->geotype[0] == TCHPSC)) { interact.part1->dir = interact.part1->chdir[1]; exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project); } exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project); if (( interact.dist > interact.param->rcut ) || (interact.param->epsilon == 0.0 ) || ( (interact.part1->patchdir[1].z >0)&&(positive) ) || ( (interact.part1->patchdir[1].z <0)&&(!(positive)) ) ) atrenergy += 0.0; else { atrenergy += exter2_atre(&interact,&orientin,&rcmz,&interendz,&positive,orient,&project,&ndist,1,halfl); } } if ((interact.param->geotype[0] == CHCPSC)||(interact.param->geotype[0] == CHPSC)|| (interact.param->geotype[0] == TCHCPSC)||(interact.param->geotype[0] == TCHPSC) ) { interact.part1->dir = olddir; } //printf("%f %f \n",conf->particle[target].pos.z*conf->box.z,repenergy+atrenergy); return repenergy+atrenergy; } double exter2_atre(struct interacts * interact,int *orientin, double *rcmz, double *interendz, BOOL *positive, double orient,struct vector *project, double *ndist,int numpatch, double halfl) { struct vector pbeg,pend; /* projected spherocylinder begining and end*/ double a,length1,length2, f0,f1; struct vector cm1,cm2; /* centrar of interacting segments */ int line; struct vector partbeg,partend; /*closest and furthest point of particle*/ struct vector inters; double atrenergy=0.0; int cpsc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, double* halfl,BOOL* orientin,BOOL* positive, double* rcmz, double * cut, struct vector* partbeg, struct vector* partend); int psc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, BOOL* positive, double * cut, struct vector* partbeg, struct vector* partend); /*interaction with PATCHY SPHEROCYLINDERS*/ if ((interact->param->geotype[0] < SP)&&(interact->param->geotype[0] > SCA)) { //printf("partdir: %f %f %f \n ",interact->part1->dir.x,interact->part1->dir.y,interact->part1->dir.z); //printf("patchdir: %f %f %f \n ",interact->part1->patchdir[0].x,interact->part1->patchdir[0].y,interact->part1->patchdir[0].z); /* calculate position of closest and furthest point (begining and end of spherocylinder)*/ a = (*orientin-0.5)*2; /*if orientin a =1 else a=-1 */ partbeg.x = a * interact->part1->dir.x * halfl; partbeg.y = a * interact->part1->dir.y * halfl; partbeg.z = *rcmz + a * interact->part1->dir.z *halfl; partend.x = - a * interact->part1->dir.x * halfl; partend.y = - a * interact->part1->dir.y * halfl; partend.z = *rcmz - a * interact->part1->dir.z * halfl; //printf("partbeg %f %f %f partend %f %f %f \n",partbeg.x,partbeg.y,partbeg.z,partend.x,partend.y,partend.z); /*calculate interacting line segment and its cm of spherocylinder*/ /*calculate end point z*/ if ( (interact->param->rcut - interact->dist)/fabs(interact->part1->dir.z) < 2.0*halfl ){ /*if cutoff goes through spherocylinder the end point is at cutoff*/ *interendz *= interact->param->rcut; } else { /*endpoint is at the end of spherocylinders*/ *interendz = partend.z; } /*calculate CM of interacting line segment of spherocylinder*/ if (*positive) { cm1.z = AVER(*interendz,interact->dist); } else { cm1.z = AVER(*interendz,-interact->dist); } if (interact->part1->dir.z != 0.0 ) { a = (*interendz - cm1.z ) / interact->part1->dir.z; length1= -orient*2.0*a; a = a + orient*halfl; } else { a = 0.0; length1 = 2.0*halfl; } //printf("len1: %f rcm %f interz %f cutoff %f \n",length1,rcmz, interendz,interact.dist); cm1.x = interact->part1->dir.x * a; cm1.y = interact->part1->dir.y * a; /* we have interacting segment*/ if ((interact->param->geotype[0] == CPSC)||(interact->param->geotype[0] == CHCPSC)) { /*CPSC type*/ if ( ((*interendz >= interact->dist)&&(*positive)) || ((*interendz <= -interact->dist)&&(!(*positive))) ){ /*test if projection is not all out of interaction*/ line = cpsc_wall(&pbeg,&pend,project,&interact->part1->dir, \ &interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend); //printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y); } else { line = 0; } } else { /*PSC and CHPSC interaction with wall */ line = psc_wall(&pbeg,&pend,project,&interact->part1->dir, \ positive,&interact->param->rcut,&partbeg,&partend); //printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y); } if (line > 0) { /*cm2 by average begining and end*/ cm2.x = AVER(pbeg.x,pend.x); cm2.y = AVER(pbeg.y,pend.y); cm2.z = 0.0; /*length by size of end-benining*/ length2 = sqrt( (pend.x-pbeg.x)*(pend.x-pbeg.x)+(pend.y-pbeg.y)*(pend.y-pbeg.y) ); inters.x = cm2.x - cm1.x; inters.y = cm2.y - cm1.y; inters.z = cm2.z - cm1.z; //printf("cm2 %f %f %f inters %f %f %f \n",cm2.x,cm2.y,cm2.z,inters.x,inters.y,inters.z); *ndist = sqrt(DOT(inters,inters)); if (*ndist < interact->param->pdis) { atrenergy = -interact->param->epsilon; } else { atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon; } /* scaling function1: dependence on the length of intersetions plus*/ f0=(length1 + length2)*0.5; /*scaling with angle*/ f1 = fabs(interact->part1->patchdir[numpatch].z); atrenergy *= f0*f1; //printf(" %f %f %f %f %f %f %f \n",conf->particle[target].pos.z*conf->box.z,atrenergy, area, length1, length2,f0,ndist); //printf("%f %f %f %f\n",pbeg.x,pbeg.y,pend.x,pend.y); } else { atrenergy = 0.0; } } else { if (*ndist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon; } /*add wall scaling wall area/ particle arear.. to reflect that we have a wall not sphere */ atrenergy *= (interact->param->rcut*interact->param->rcut - (*ndist)*(*ndist))/(interact->param->sigma*interact->param->sigma) ; } return atrenergy; } void exter2_closestdist(struct interacts * interact, BOOL *positive, BOOL *orientin, double *orient, double *rcmz,double *interendz, struct vector *project) { if (*rcmz < 0) { interact->dist = -(*rcmz); *positive = FALSE; *interendz = -1.0; project->z = 1.0; } else { interact->dist = (*rcmz); *positive = TRUE; *interendz = 1.0; project->z = -1.0; } /*psc closest is allways end closer to wall*/ if (interact->param->geotype[0] < SP ){ /*calculate closest point distance*/ if (interact->part1->dir.z > 0) { if (*positive) { *orientin = FALSE; *orient = -1.0; interact->dist = *rcmz -interact->part1->dir.z * interact->param->half_len[0]; } else { *orientin = TRUE; *orient = 1.0; interact->dist = -( *rcmz + interact->part1->dir.z * interact->param->half_len[0]); } } else { if (*positive) { *orientin = TRUE; *orient = 1.0; interact->dist = *rcmz + interact->part1->dir.z * interact->param->half_len[0]; } else { *orientin = FALSE; *orient = -1.0; interact->dist = -( *rcmz -interact->part1->dir.z * interact->param->half_len[0]); } } } } /*...........................................................................*/ /*Calculates interaction of target particle and external field calculate projection of patch of spherocylinder on wall evaluate intersection area and calculate interaction from that */ double exter_atre(struct interacts * interact,int *orientin, double *rcmz, double *interendz, BOOL *positive, double orient,struct vector *project, double *ndist,int numpatch,double halfl) { double area,a,b,c,r2; double atrenergy=0.0; /* energy*/ BOOL countend; struct vector cm1,cm2; /* centrar of interacting segments */ struct vector pbeg,pend; /* projected spherocylinder begining and end*/ struct vector inters,newdir; struct vector pbeg1,pend1,pbeg2,pend2,pextr1,pextr2,pextr3,pextr4; /*additinal point of projected patch for calculation of area */ double length1, cuttoproject, f0; int line, line1, line2,extra; struct vector partbeg,partend; /*closest and furthest point of particle*/ double erepulsive(struct interacts *); struct vector vec_perpproject(struct vector*, struct vector*); void normalise(struct vector *); double fanglscale(double, struct ia_param *, int which); struct vector vec_create(double, double, double); double areaeightpoints(struct vector*,struct vector*,struct vector*,struct vector*,struct vector*,struct vector*,struct vector*,struct vector*); int cpsc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, double* halfl,BOOL* orientin,BOOL* positive, double* rcmz, double * cut, struct vector* partbeg, struct vector* partend); int psc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, BOOL* positive, double * cut, struct vector* partbeg, struct vector* partend); int cutprojectatwall(struct vector* pextr1, struct vector* pextr2, struct vector* pextr3, struct vector* pextr4, struct vector* projectdir, struct vector* partdir, double * cutdist, struct vector *partbeg, struct vector *partend, struct vector *pend, double *cuttoproject, BOOL* orientin); void exter2_closestdist(struct interacts * interact, BOOL *positive, BOOL *orientin, double *orient, double *rcmz,double *interendz, struct vector *project); /*interaction with PATCHY SPHEROCYLINDERS*/ if ((interact->param->geotype[0] < SP)&&(interact->param->geotype[0] > SCA)) { //printf("partdir: %f %f %f \n ",interact->part1->dir.x,interact->part1->dir.y,interact->part1->dir.z); //printf("patchdir: %f %f %f \n ",interact->part1->patchdir[numpatch].x,interact->part1->patchdir[numpatch].y,interact->part1->patchdir[numpatch].z); /* calculate position of closest and furthest point (begining and end of spherocylinder)*/ a = (*orientin-0.5)*2; /*if orientin a =1 else a=-1 */ partbeg.x = a * interact->part1->dir.x * halfl; partbeg.y = a * interact->part1->dir.y * halfl; partbeg.z = *rcmz + a * interact->part1->dir.z * halfl; partend.x = - a * interact->part1->dir.x * halfl; partend.y = - a * interact->part1->dir.y * halfl; partend.z = *rcmz - a * interact->part1->dir.z * halfl; //printf("partbeg %f %f %f partend %f %f %f \n",partbeg.x,partbeg.y,partbeg.z,partend.x,partend.y,partend.z); /*calculate interacting line segment and its cm of spherocylinder*/ /*calculate end point z*/ if ( (interact->param->rcut - interact->dist)/fabs(interact->part1->dir.z) < halfl*2.0 ){ /*if cutoff goes through spherocylinder the end point is at cutoff*/ *interendz *= interact->param->rcut; } else { /*endpoint is at the end of spherocylinders*/ *interendz = partend.z; } /*calculate CM of interacting line segment of spherocylinder*/ if (*positive) { cm1.z = AVER(*interendz,interact->dist); } else { cm1.z = AVER(*interendz,-interact->dist); } if (interact->part1->dir.z != 0.0 ) { a = (*interendz - cm1.z ) / interact->part1->dir.z; length1= -orient*2.0*a; a = a + orient*halfl; } else { a = 0.0; length1 = 2.0*halfl; } //printf("len1: %f rcm %f interz %f cutoff %f \n",length1,rcmz, interendz,interact->dist); cm1.x = interact->part1->dir.x * a; cm1.y = interact->part1->dir.y * a; /*calculate projection on wall as infinite line and make it interacting segment*/ if (interact->part1->patchdir[numpatch].z != 0) { cuttoproject = -interact->param->rcut*interact->part1->patchdir[numpatch].z; /*z coordinate of point where projection is in cut distance*/ if ( ((partend.z < cuttoproject)&&(*positive)) || ((cuttoproject < partend.z)&&(!(*positive))) ){ cuttoproject = partend.z; } } else { cuttoproject = partbeg.z; } //printf("cutproject %f \n",cuttoproject); //printf("cm1 %f %f %f \n",cm1.x, cm1.y,cm1.z ); /* we have interacting segment*/ if ((interact->param->geotype[0] == CPSC)||(interact->param->geotype[0] == CHCPSC)) { /*CPSC type*/ if ( ((cuttoproject >= interact->dist)&&(*positive)) || ((cuttoproject <= -interact->dist)&&(!(*positive))) ){ /*test if projection is not all out of interaction*/ line = cpsc_wall(&pbeg,&pend,&interact->part1->patchdir[numpatch],&interact->part1->dir, \ &interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend); //printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y); } else { line = 0; } } else { /*PSC and CHPSC interaction with wall */ line = psc_wall(&pbeg,&pend,&interact->part1->patchdir[numpatch],&interact->part1->dir, \ positive,&interact->param->rcut,&partbeg,&partend); //printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y); } if (line > 0) { area = 0.0; /*project cutoff boudaries*/ if (line == 2 ) { /*if projection end is on sphere of begining don't care about cylinder cutoff*/ extra = 0; } else { extra = cutprojectatwall(&pextr1, &pextr2, &pextr3, &pextr4, &interact->part1->patchdir[numpatch], \ &interact->part1->dir, &interact->param->rcut, &partbeg, &partend,&pend,&cuttoproject,orientin); } //printf("extr1: %d %f %f extr2 %f %f extr3 %f %f extr4 %f %f \n",extra,pextr1.x,pextr1.y,pextr2.x,pextr2.y,pextr3.x,pextr3.y,pextr4.x,pextr4.y); /*project patch boundaries on the first side*/ newdir=interact->part1->patchsides[0+2*numpatch]; line1 = cpsc_wall(&pbeg1,&pend1,&newdir,&interact->part1->dir, \ &interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend); if ( ((interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)) ) { line1 = psc_wall(&pbeg1,&pend1,&newdir,&interact->part1->dir, \ positive,&interact->param->rcut,&partbeg,&partend); } //printf("line1: %d beg1 %f %f end1 %f %f \n",line1,pbeg1.x,pbeg1.y,pend1.x,pend1.y); /*project patch boundaries on the second side*/ newdir=interact->part1->patchsides[1+2*numpatch]; line2 = cpsc_wall(&pbeg2,&pend2,&newdir,&interact->part1->dir, \ &interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend); if ( ((interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)) ) { line2 = psc_wall(&pbeg2,&pend2,&newdir,&interact->part1->dir, \ positive,&interact->param->rcut,&partbeg,&partend); } //printf("line2: %d beg2 %f %f end2 %f %f \n",line2,pbeg2.x,pbeg2.y,pend2.x,pend2.y); /*calculate area*/ if (extra == 0) { /*thish should only happen when there is PSC interacting only with end*/ if (line1 == 0) { if (line2==0) { /*circle around middle-pbeg*/ area = PI*( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); } else{ /* circle around middle-pbeg minus circle segment*/ a = AVER(pbeg2.x,pend2.x); b = AVER(pbeg2.y,pend2.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/ area = r2*(PI-acos(sqrt(c/r2))) + sqrt(r2*c-c*c); } } else { if (line2==0) { /* circle around middle-pbeg minus circle segment*/ a = AVER(pbeg1.x,pend1.x); b = AVER(pbeg1.y,pend1.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/ area = r2*(PI-acos(sqrt(c/r2))) + sqrt(r2*c-c*c); } else { //area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 E E1 B1 */ /*circle minus two circle segments*/ r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/ area = r2*PI; a = AVER(pbeg1.x,pend1.x); b = AVER(pbeg1.y,pend1.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ area += -r2*acos(sqrt(c/r2)) + sqrt(r2*c-c*c); a = AVER(pbeg2.x,pend2.x); b = AVER(pbeg2.y,pend2.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ area += -r2*acos(sqrt(c/r2)) + sqrt(r2*c-c*c); } } } else { b = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend.y)- (pextr2.x-pend.x)*(pextr4.y-pextr2.y));/*pend on 42*/ c = fabs((pextr1.x-pextr3.x)*(pextr3.y-pend.y)- (pextr3.x-pend.x)*(pextr1.y-pextr3.y));/*pend on 13*/ if ( ( b< ZEROTOL) || ( c< ZEROTOL) ) countend = FALSE; else countend = TRUE; if (line1 == 0) { if (line2 == 0) { if ( countend ) { area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pextr3,&pextr1,NULL,NULL);/* B 2 4 E 3 1 */ } else area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pextr3,&pextr1,NULL,NULL,NULL);/* B 2 4 3 1 */ } else { a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend2.y)- (pextr2.x-pend2.x)*(pextr4.y-pextr2.y)); if ( a< ZEROTOL) /*pend2 on 42*/ { if ( countend ) { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pextr1,NULL); /* B B2 E2 4 E 3 1 */ } else { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pextr1,NULL,NULL); /* B B2 E2 4 3 1 */ } } else { a = fabs((pextr1.x-pextr3.x)*(pextr3.y-pend2.y)- (pextr3.x-pend2.x)*(pextr1.y-pextr3.y)); if ( a< ZEROTOL) /*pend2 on 13*/ { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr1,NULL,NULL,NULL,NULL); /* B B2 E2 1 */ } else { /*pend2 on 34 or on begining sphere of psc*/ if (line2 == 2) { if ( countend ) { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pextr1,NULL); /* B B2 E2 4 E 3 1 */ } else { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pextr1,NULL,NULL); /* B B2 E2 4 3 1 */ } } else { if ( countend ) { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pextr3,&pextr1,NULL,NULL); /* B B2 E2 E 3 1 */ } else { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr3,&pextr1,NULL,NULL,NULL); /* B B2 E2 3 1 */ } } } } } } else { a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend1.y)- (pextr2.x-pend1.x)*(pextr4.y-pextr2.y)); if ( a< ZEROTOL) /*pend1 on 42*/ { if (line2 == 0) { area = areaeightpoints(&pbeg,&pextr2,&pend1,&pbeg1,NULL,NULL,NULL,NULL); /* B 2 E1 B1 */ } else { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend1,&pbeg1,NULL,NULL,NULL); /* B B2 E2 E1 B1 */ } } else { a = fabs((pextr1.x-pextr3.x)*(pextr3.y-pend1.y)- (pextr3.x-pend1.x)*(pextr1.y-pextr3.y)); if ( a< ZEROTOL) /*pend1 on 13*/ { if (line2 == 0) { if (countend) { area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B 2 4 E 3 E1 B1 */ } else { area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B 2 4 3 E1 B1 */ } } else { a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend2.y)- (pextr2.x-pend2.x)*(pextr4.y-pextr2.y)); if ( a< ZEROTOL) /*pend2 on 42*/ { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pend1,&pbeg1); /* B B2 E2 4 E 3 E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 4 3 E1 B1 */ } else { a = fabs((pextr3.x-pextr1.x)*(pextr1.y-pend2.y)- (pextr1.x-pend2.x)*(pextr3.y-pextr1.y)); if ( a< ZEROTOL) /*pend2 on 31*/ { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend1,&pbeg1,NULL,NULL,NULL); /* B B2 E2 E1 B1 */ } else { /*pend2 close to 34 or on begining sphere of psc*/ if (line2 == 2) { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pend1,&pbeg1); /* B B2 E2 4 E 3 E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 4 3 E1 B1 */ } else { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 E 3 E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 3 E1 B1 */ } } } } } else {/*pend1 close to 34 or on beging sphere for psc*/ if (line2 == 0) { if (line1 ==2) { if (countend) area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B 2 4 E 3 E1 B1*/ else { area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B 2 4 3 E1 B1*/ } } else { if (countend) area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pend1,&pbeg1,NULL,NULL); /* B 2 4 E E1 B1*/ else { area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend1,&pbeg1,NULL,NULL,NULL); /* B 2 4 E1 B1*/ } } } else { a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend2.y)- (pextr2.x-pend2.x)*(pextr4.y-pextr2.y)); if ( a< ZEROTOL) /* pend2 on 42 */ { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pend1,&pbeg1,NULL); /* B B2 E2 4 E E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 4 E1 B1 */ } else { /*pend1 and pend2 close to 34 or on beging sphere for psc*/ if (line2 == 2) { if (line1 == 2) { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pend1,&pbeg1); /* B B2 E2 4 E 3 E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 4 3 E1 B1 */ } else { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pend1,&pbeg1,NULL); /* B B2 E2 4 E E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 4 E1 B1 */ } } else { if (line1 == 2) { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 E 3 E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 3 E1 B1 */ } else { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 E E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend1,&pbeg1,NULL,NULL,NULL); /* B B2 E2 E1 B1 */ } } } } } } } /*extra != 0*/ if ((interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)) { if (line1==2) { /* add circle segment*/ a = AVER(pextr1.x,pend1.x); /*end to cutoff - pextr1 ,pend1 */ b = AVER(pextr1.y,pend1.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pend1.x)*(partbeg.x-pend1.x) + (partbeg.y-pend1.y)*(partbeg.y-pend1.y)); /*radius squared*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); a = AVER(pbeg.x,pbeg1.x); /* between beginings - pbeg ,pbeg1 */ b = AVER(pbeg.y,pbeg1.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); } else { if (line1==0) { /* add circle segment*/ a = AVER(pextr1.x,pbeg.x); /* begining to cutoff*/ b = AVER(pextr1.y,pbeg.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); } } if (line2==2) { /* add circle segment*/ a = AVER(pextr3.x,pend2.x); /*end to cutoff - pextr3 ,pend2 */ b = AVER(pextr3.y,pend2.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pend2.x)*(partbeg.x-pend2.x) + (partbeg.y-pend2.y)*(partbeg.y-pend2.y)); /*radius squared*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); a = AVER(pbeg.x,pbeg2.x); /* between beginings - pbeg ,pbeg2 */ b = AVER(pbeg.y,pbeg2.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); } else { if (line2==0) { /* add circle segment*/ a = AVER(pextr3.x,pbeg.x); /* begining to cutoff*/ b = AVER(pextr3.y,pbeg.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); } } } } /*area finished*/ /*cm2 by average begining and end*/ cm2.x = AVER(pbeg.x,pend.x); cm2.y = AVER(pbeg.y,pend.y); cm2.z = 0.0; /*length by size of end-benining*/ //length2 = sqrt( (pend.x-pbeg.x)*(pend.x-pbeg.x)+(pend.y-pbeg.y)*(pend.y-pbeg.y) ); inters.x = cm2.x - cm1.x; inters.y = cm2.y - cm1.y; inters.z = cm2.z - cm1.z; //printf("cm2 %f %f %f inters %f %f %f \n",cm2.x,cm2.y,cm2.z,inters.x,inters.y,inters.z); *ndist = sqrt(DOT(inters,inters)); if (*ndist < interact->param->pdis) { atrenergy = -interact->param->epsilon; } else { atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon; } /* scaling function1: dependence on the length of intersetions plus SCALING WITH AREA*/ f0=(length1 + area / interact->param->sigma)*0.5; atrenergy *= f0; //printf(" %f %f %f %f %f %f %f %d %d %d \n",conf->particle[target].pos.z*conf->box.z,atrenergy, area, length1, length2,f0,ndist,extra,line1,line2); //printf("%f %f %f %f\n",pbeg.x,pbeg.y,pend.x,pend.y); //printf("%f %f %f %f %f %f\n",pbeg2.x,pend2.y,pextr2.x,pextr2.y,pextr1.x,pextr1.y); } else { atrenergy = 0.0; } } else { if (*ndist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon; } /*add wall scaling wall area/ particle arear.. to reflect that we have a wall not sphere */ atrenergy *= (interact->param->rcut*interact->param->rcut - (*ndist)*(*ndist))/(interact->param->sigma*interact->param->sigma) ; } //printf("%f %f \n",conf->particle[target].pos.z*conf->box.z,atrenergy); return atrenergy; } /*..............................................................................*/ /* Initializes the array with the pointers to the energy function */ void init_intfce(double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo){ // NB // Fill in the names of the functions for calculating the // interaction energy long geotype, other_geotype; int i, j; for(i = 0; i < MAXT; i++){ for(j = 0; j < MAXT; j++){ /* Initialize them as not existing */ intfce[i][j] = &enoexist; geotype = topo->ia_params[i][j].geotype[0]; other_geotype = topo->ia_params[i][j].geotype[1]; if ( ( (geotype == CHCPSC || geotype == CPSC || geotype == TCHCPSC || geotype == TCPSC) && (other_geotype == CHPSC || other_geotype == PSC || other_geotype == TCHPSC || other_geotype == TPSC) ) || ( (geotype == CHPSC || geotype == PSC || geotype == TCHPSC || geotype == TPSC) && (other_geotype == CHCPSC || other_geotype == CPSC || other_geotype == TCHCPSC || other_geotype == TCPSC) ) ) { intfce[i][j] = &e_psc_cpsc; } if ( (geotype == CHCPSC || geotype == CPSC || geotype == TCHCPSC || geotype == TCPSC) && (other_geotype == CHCPSC || other_geotype == CPSC || other_geotype == TCHCPSC || other_geotype == TCPSC) ){ intfce[i][j] = &e_cpsc_cpsc; } if ( (geotype == CHPSC || geotype == PSC || geotype == TCHPSC || geotype == TPSC) && (other_geotype == CHPSC || other_geotype == PSC || other_geotype == TCHPSC || other_geotype == TPSC) ){ intfce[i][j] = &e_psc_psc; } if(geotype == SCN || geotype == SPN || other_geotype == SCN || other_geotype == SPN){ intfce[i][j] = &e_spn_or_scn; } if((geotype == SCA && other_geotype == SCA) || (geotype == SPA && other_geotype == SPA)){ intfce[i][j] = &e_2sca_or_2spa; } if((geotype == SCA && other_geotype == SPA) || (geotype == SPA && other_geotype == SCA)){ intfce[i][j] = &e_spa_sca; } if(( (geotype == PSC || geotype == CHPSC || geotype == TCHPSC || geotype == TPSC) && other_geotype == SPA) || (geotype == SPA && (other_geotype == PSC||other_geotype == CHPSC || other_geotype == TCHPSC || other_geotype == TPSC) )){ intfce[i][j] = &e_psc_spa; } if(( (geotype == CPSC ||geotype == CHCPSC || geotype == TCHCPSC || geotype == TCPSC) && other_geotype == SPA) || (geotype == SPA && (other_geotype == CPSC||other_geotype == CHCPSC || other_geotype == TCHCPSC || other_geotype == TCPSC) )){ intfce[i][j] = &e_cpsc_spa; } } } } /*..............................................................................*/ /* Compare energy change to temperature and based on Boltzmann probability return either 0 to accept or 1 to reject the move */ int movetry(double energyold, double energynew, double temperature) { double ran2(long *); /*DEBUG printf (" Move trial: %13.8lf %13.8lf %13.8lf %13.8lf\n", energynew, energyold, temperature, ran2(&seed));*/ if (energynew <= energyold ) return 0; else if (exp(-1.0*(energynew-energyold)/temperature) > ran2(&seed)) return 0; else return 1; } /*..............................................................................*/ /* * Calculate the different energy contributions. This is a merge of the different * energy calculation functions (energyone, -chain, -all) * 0: all * 1: one * 2: chain */ double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim, int chainnum) { long i=0,j=0; double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct conf * conf); // double extere(long, struct topo * topo, struct conf * conf); double extere2(long, struct topo * topo, struct conf * conf); //DEBUG_SIM("Calculate the energy with mode %d", mode) double energy = 0; /* Calculates energy between particle "target" and the rest. Returns energy */ if(mode == 1){ if (sim->pairlist_update) { #ifdef OMP #pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic) #endif for (i = 0; i < sim->pairlist[target].num_pairs; i++){ energy+= paire(target, sim->pairlist[target].pairs[i], intfce, topo, conf); } } else{ #ifdef OMP #pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic) #endif for (i = 0; i < target; i++) { energy+= paire(target, i, intfce, topo, conf); } #ifdef OMP #pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic) #endif for (i = target + 1; i < topo->npart; i++) { energy+= paire(target, i, intfce, topo, conf); } } /*add interaction with external potential*/ if (topo->exter.exist) energy+= extere2(target,topo,conf); } /* * Calculates energy between particle "target" and the rest. skipping * particles from the given chain -particles has to be sorted in chain!! * so similar to energy one but with chain exception */ else if(mode == 2){ //#ifdef OMP //#pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic) //#endif for (i = 0; i < target; i++) { if (i != topo->chainlist[chainnum][j]) { energy+= paire(target, i, intfce, topo, conf); } else { j++; } } j++; //#ifdef OMP //#pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic) //#endif for (i = target + 1; i < topo->npart; i++) { if (i != topo->chainlist[chainnum][j]) { energy+= paire(target, i, intfce, topo, conf); } else { j++; } } /*add interaction with external potential*/ if (topo->exter.exist) energy+= extere2(target,topo,conf); } /* Calculates energy between all pairs. Returns energy */ else if(mode == 0){ #ifdef OMP #pragma omp parallel for private(i,j) reduction (+:energy) schedule (dynamic) #endif for (i = 0; i < topo->npart - 1; i++) { for (j = i + 1; j < topo->npart; j++) { energy+= paire(i, j, intfce, topo, conf); } /*for every particle add interaction with external potential*/ if (topo->exter.exist) energy+= extere2(i,topo,conf); } /*add interaction of last particle with external potential*/ if (topo->exter.exist) energy+= extere2(topo->npart-1,topo,conf); } else { fprintf(stderr, "ERROR: Wrong mode (%d) was given to calc_energy!", mode); return 0.0; } // DEBUG_SIM("Will return energy from calc_energy") //printf("energymove %f\n",energy); return energy; } /*..............................................................................*/ /* Checks for overlaps between particle "target" and the rest. Returns 1 if overlap detected, 0 otherwise. */ int forbidden(long npart, struct particles *particle, long target, struct vector box, struct ia_param ia_params[MAXT][MAXT]) { long test; int overlap(struct particles, struct particles, struct vector,struct ia_param [MAXT][MAXT]); for (test=0; test<npart; test++) { if (test != target) { if ( overlap(particle[target], particle[test], box, ia_params) ) { return 1; } } } return 0; } /*..............................................................................*/ /* Checks for overlaps between all pairs of particles. Returns 1 if overlap detected, 0 otherwise. */ int checkall(long npart, struct particles *particle, struct vector box, struct ia_param ia_params[MAXT][MAXT]) { long i, j; int overlap(struct particles, struct particles, struct vector, struct ia_param [MAXT][MAXT]); for (i=0; i<npart-1; i++) { for (j=i+1; j<npart; j++) { if ( overlap(particle[i], particle[j], box, ia_params) ) { return 1; } } } return 0; } /*..............................................................................*/ /* Optimize the maximum displacement within the specified limits and resets the acceptance counters to zero. */ void optimizestep(struct disp *x, double hi, double lo) { double newrmsd; newrmsd = (*x).mx * RATIO(*x); if ((*x).oldrmsd > 0) { if ( newrmsd < (*x).oldrmsd ) { if ( (*x).oldmx > 1 ) { (*x).mx /= 1.05; (*x).oldmx = 0.95; } else { (*x).mx *= 1.05; (*x).oldmx = 1.05; } } else { if ( (*x).oldmx > 1 ) { (*x).mx *= 1.05; (*x).oldmx = 1.05; } else { (*x).mx /= 1.05; (*x).oldmx = 0.95; } } } if (newrmsd > 0 ) (*x).oldrmsd = newrmsd; else { (*x).oldrmsd = 0.0; (*x).mx /= 1.05; (*x).oldmx = 0.95; } if ( (*x).mx > hi ) (*x).mx = hi; if ( (*x).mx < lo ) (*x).mx = lo; (*x).acc = (*x).rej = 0; } /*..............................................................................*/ /* Optimize the maximum rotation within the specified limits and resets the acceptance counters to zero. Rotation is given by cos of angle larger rotation = smaller cos */ void optimizerot(struct disp *x, double hi, double lo) { double newrmsd; newrmsd = (*x).mx * RATIO((*x)) ; if ((*x).oldrmsd > 0) { if ( newrmsd > (*x).oldrmsd ) { if ( (*x).oldmx > 1) { (*x).mx *= 0.99; (*x).oldmx *= 0.99; } else { (*x).mx *= 1.01; (*x).oldmx *= 1.01; } } else { if ( (*x).oldmx > 1) { (*x).mx *= 1.01; (*x).oldmx *= 1.01; } else { (*x).mx *= 0.99; (*x).oldmx *= 0.99; } } } if (newrmsd > 0 ) (*x).oldrmsd = newrmsd; else { (*x).oldrmsd = 0.0; (*x).mx *= 1.01; (*x).oldmx = 1.01; } if ( (*x).mx > hi ) (*x).mx = hi; if ( (*x).mx < lo ) (*x).mx = lo; (*x).acc = (*x).rej = 0; } /*................................................................................*/ /* Accumulate a value into the statistics and update the mean and rms values. */ void accumulate(struct stat *q, double x) { (*q).sum += x; (*q).sum2 += x*x; (*q).samples++; (*q).mean = (*q).sum / (*q).samples; (*q).rms = sqrt(fabs((*q).sum2 / (*q).samples - (*q).sum * (*q).sum / (*q).samples / (*q).samples)); } void printeqstat(struct disp *dat, double scale, int length) { int i; for (i=0;i<length;i++) { if (RATIO(dat[i]) > 0) printf (" TYPE %d %.6lf / %.6lf\n", i, dat[i].mx/scale,RATIO(dat[i])); } } int memoryalloc(struct conf * conf) { printf ("Allocating memory...\n"); conf->particle = malloc( sizeof(struct particles)*MAXN); if(conf->particle == NULL){ return 1; } return 0; } int memorydealloc(struct conf * conf, struct topo * topo, struct sim * sim) { int dealloc_pairlist(struct topo * topo, struct sim * sim); printf ("Deallocating memory...\n"); if (conf->particle != NULL) free(conf->particle); conf->particle = NULL; if (sim->clusterlist != NULL) free(sim->clusterlist); if (sim->clustersenergy != NULL) free(sim->clustersenergy); if(topo->switchlist){ free(topo->switchlist); } if (sim->pairlist_update) { if(dealloc_pairlist(topo, sim)){ return 1; } } return 0; } /*............................................................................*/ /** * nice malloc, which does the error checking for us */ void * xmalloc (size_t num){ void *new = malloc (num); if (!new){ fprintf(stderr, "Couldn't allocate any memory!\n"); exit(1); } return new; } /*............................................................................*/ /* *********************** GEOMETRICAL FUNCTIONS **************************** */ /*.........................PATCHY SPOHEROCYLINDERS INTERACTION....................*/ /*................................................................................*/ /* Calculate intersections of sc2 with a patch of sc1 and return them in */ int psc_intersect(struct particles * part1, struct particles * part2, double halfl1, double halfl2, struct vector r_cm, double intersections[5], double rcut, struct ia_param * param, int which, int patchnum) { int intrs; double a, b, c, d, e, x1, x2, rcut2; struct vector cm21, vec1, vec2, vec3, vec4; struct vector vec_crossproduct(struct vector, struct vector); struct vector vec_sub(struct vector, struct vector); struct vector vec_create(double, double, double); struct vector vec_scale(struct vector, double); struct vector vec_perpproject(struct vector*, struct vector*); struct quat quat_create(struct vector, double, double); void vec_rotate(struct vector *, struct quat); void normalise(struct vector *); int find_intersect_plane(struct particles *, struct particles *, double, struct vector, struct vector, double, double, double *); int test_intrpatch(struct particles *, struct vector, double, double, double *,int); intrs=0; rcut2=rcut*rcut; /*1- do intersections of spherocylinder2 with patch of spherocylinder1 at cut distance C*/ /*1a- test intersection with half planes of patch and look how far they are from spherocylinder. If closer then C we got itersection*/ /* plane1 */ /* find intersections of part2 with plane by par1 and patchsides[0] */ intrs+=find_intersect_plane(part1,part2,halfl2,r_cm,part1->patchsides[0+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections); // printf("plane1 %d\n", intrs); /* plane2 */ /* find intersections of part2 with plane by par1 and patchsides[1] */ intrs+=find_intersect_plane(part1,part2,halfl2,r_cm,part1->patchsides[1+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections); if ( (intrs == 2 ) && (param->pcanglsw[which+2*patchnum] <0) ) { fprintf (stderr, "ERROR: Patch is larger than 180 degrees and we are getting two segments - this hasnot been programed yet.\n\n"); exit (1); } // printf("plane2 %d\n", intrs); /*1b- test intersection with cylinder - it is at distance C*/ if (intrs < 2 ) { cm21=vec_scale(r_cm,-1.0); vec1=vec_crossproduct(cm21,part1->dir); vec2=vec_crossproduct(part2->dir,part1->dir); a = DOT(vec2,vec2); b = 2*DOT(vec1,vec2); c = -rcut*rcut + DOT(vec1,vec1); d = b*b - 4*a*c; if ( d >= 0) { /*there is intersection with infinite cylinder */ x1 = (-b+sqrt(d))*0.5/a;/*parameter on line of SC2 determining intersection*/ if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { /* vectors from center os sc1 to intersection with infinite cylinder*/ vec1.x=part2->dir.x*x1-r_cm.x; vec1.y=part2->dir.y*x1-r_cm.y; vec1.z=part2->dir.z*x1-r_cm.z; e = DOT(part1->dir,vec1); if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/ else { intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum); } } if ( d > 0 ){ x2 = (-b-sqrt(d))*0.5/a;/*parameter on line of SC2 determining intersection*/ if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec2.x = part2->dir.x*x2-r_cm.x; vec2.y = part2->dir.y*x2-r_cm.y; vec2.z = part2->dir.z*x2-r_cm.z; e = DOT(part1->dir,vec2); if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/ else { intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum); } } } } } // printf ("cylinder %d x1 %f x2 %f e %f\n", intrs, x1, x2, e); /*1c- test intersection with spheres at the end - it is at distace C*/ if (intrs < 2 ) { /*centers of spheres*/ /*relative to the CM of sc2*/ vec1.x = part1->dir.x*halfl1 - r_cm.x; vec1.y = part1->dir.y*halfl1 - r_cm.y; vec1.z = part1->dir.z*halfl1 - r_cm.z; vec2.x = -part1->dir.x*halfl1 - r_cm.x; vec2.y = -part1->dir.y*halfl1 - r_cm.y; vec2.z = -part1->dir.z*halfl1 - r_cm.z; /*sphere1*/ a = DOT(part2->dir,part2->dir); b = 2.0*DOT(vec1,part2->dir); c = DOT(vec1,vec1)-rcut*rcut; d = b*b-4*a*c; if (d >= 0) { /*if d<0 there are no intersections*/ x1= (-b + sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec3.x = part2->dir.x*x1-r_cm.x; vec3.y = part2->dir.y*x1-r_cm.y; vec3.z = part2->dir.z*x1-r_cm.z; e = DOT(part1->dir,vec3); if ((e >= halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/ intrs+=test_intrpatch(part1,vec3,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum); } } if ( d > 0) { x2= (-b - sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec4.x = part2->dir.x*x2 - r_cm.x; vec4.y = part2->dir.y*x2 - r_cm.y; vec4.z = part2->dir.z*x2 - r_cm.z; e = DOT(part1->dir,vec4); if ((e >=halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/ intrs+=test_intrpatch(part1,vec4,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum); } } } } // printf ("sphere1 %d x1 %f x2 %f e %f\n", intrs, x1, x2, e); /*sphere2*/ a = DOT(part2->dir,part2->dir); b = 2.0*DOT(vec2,part2->dir); c = DOT(vec2,vec2)-rcut*rcut; d = b*b-4*a*c; if (d >= 0) { /*if d<0 there are no intersections*/ x1= (-b + sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec3.x = part2->dir.x*x1 - r_cm.x; vec3.y = part2->dir.y*x1 - r_cm.y; vec3.z = part2->dir.z*x1 - r_cm.z; e = DOT(part1->dir,vec3); if ((e >=halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/ intrs+=test_intrpatch(part1,vec3,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum); } } if ( d > 0 ) { x2= (-b - sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec4.x = part2->dir.x*x2 - r_cm.x; vec4.y = part2->dir.y*x2 - r_cm.y; vec4.z = part2->dir.z*x2 - r_cm.z; e = DOT(part1->dir,vec4); if ((e >=halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/ intrs+=test_intrpatch(part1,vec4,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum); } } } } // printf ("sphere2 %d\n", intrs); } /*1d- if there is only one itersection shperocylinder ends within patch wedge set as second intersection end inside patch*/ if (intrs < 2 ) { /*whole spherocylinder is in or all out if intrs ==0*/ vec1.x = part2->dir.x*halfl2 - r_cm.x; vec1.y = part2->dir.y*halfl2 - r_cm.y; vec1.z = part2->dir.z*halfl2 - r_cm.z; /*vector from CM of sc1 to end of sc2*/ /*check is is inside sc1*/ a=DOT(vec1,part1->dir); vec3.x = vec1.x - part1->dir.x*a; vec3.y = vec1.y - part1->dir.y*a; vec3.z = vec1.z - part1->dir.z*a; b=DOT(vec3,vec3); d = fabs(a)-halfl1; if ( d <= 0) c = b; /*is inside cylindrical part*/ else c = d*d + b; /*is inside caps*/ /*c is distance squared from line or end to test if is inside sc*/ if (c < rcut2) intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],halfl2,intersections,patchnum); if (intrs < 2 ) { vec2.x = -part2->dir.x*halfl2 - r_cm.x; vec2.y = -part2->dir.y*halfl2 - r_cm.y; vec2.z = -part2->dir.z*halfl2 - r_cm.z; /*check is is inside sc1*/ a=DOT(vec2,part1->dir); vec4.x = vec2.x - part1->dir.x*a; vec4.y = vec2.y - part1->dir.y*a; vec4.z = vec2.z - part1->dir.z*a; b=DOT(vec4,vec4); d = fabs(a) -halfl1; if (d <= 0) c = b; /*is inside cylindrical part*/ else c = d*d + b; /*is inside caps*/ /*c is distance squared from line or end to test if is inside sc*/ if (c < rcut2) intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],-1.0*halfl2,intersections,patchnum); } // printf ("ends %d\n", intrs); } return intrs; } /*................................................................................*/ /* Find if vector vec has angular intersection with patch of sc1 */ int test_intrpatch(struct particles * part1, struct vector vec, double cospatch, double ti, double intersections[5],int patchnum) { double a; int i, intrs; struct vector vec_perpproject(struct vector*, struct vector*); void normalise(struct vector *); intrs=0; /*test if we have intersection*/ /* do projection to patch plane*/ vec=vec_perpproject(&vec,&part1->dir); normalise(&vec); /* test angle distance from patch*/ a = DOT(part1->patchdir[patchnum],vec); if (a >= cospatch) { intrs=1; i=0; while (intersections[i] !=0) { if (ti == intersections[i]) intrs=0; /* found intersection we already have -it is at boundary*/ i++; } if (intrs > 0) intersections[i]=ti; } return intrs; } /*................................................................................*/ /* Find intersections of SC and plane defined by vector w_vec.and returns number of them */ int find_intersect_plane(struct particles * part1, struct particles * part2, double halfl2, struct vector r_cm, struct vector w_vec, double rcut, double cospatch, double intersections[5]) { int i, intrs; double a, c, d, ti, disti; struct vector nplane, d_vec; void normalise(struct vector *); struct vector vec_crossproduct(struct vector, struct vector); nplane=vec_crossproduct(part1->dir,w_vec); normalise(&nplane); normalise(&w_vec); a = DOT(nplane, part2->dir); if (a == 0.0) intrs=0; /* there is no intersection plane and sc are paralel*/ else { ti = DOT(nplane,r_cm)/a; if ((ti > halfl2 ) || (ti < -halfl2)) intrs=0; /* there is no intersection plane sc is too short*/ else { d_vec.x = ti * part2->dir.x - r_cm.x; /*vector from intersection point to CM*/ d_vec.y = ti * part2->dir.y - r_cm.y; d_vec.z = ti * part2->dir.z - r_cm.z; c = DOT (d_vec, w_vec); if ( c * cospatch < 0) intrs=0; /* the intersection in plane is on other side of patch */ else { d = fabs(DOT (d_vec, part1->dir)) - halfl2; if (d <= 0) disti = c*c; /*is inside cylinder*/ else disti = d*d + c*c; /*is inside patch*/ if (disti > rcut*rcut) intrs=0; /* the intersection is outside sc */ else { intrs=1; i=0; while (intersections[i] !=0) { if (ti == intersections[i]) intrs=0; /* found intersection we already have -it is at boundary*/ i++; } if (intrs > 0) { intersections[i]=ti; } } } } } return intrs; } /*CPSC................................................................................*/ /* Calculate intersections of sc2 with a patch of sc1 and return them in this works for cylindrical psc -CPSC */ int cpsc_intersect(struct particles * part1, struct particles * part2, double halfl1, double halfl2, struct vector r_cm, double intersections[5], double rcut, struct ia_param * param, int which, int patchnum) { int intrs; double a, b, c, d, e, x1, x2, rcut2; struct vector cm21, vec1, vec2, vec3, vec4; struct vector vec_crossproduct(struct vector, struct vector); struct vector vec_sub(struct vector, struct vector); struct vector vec_create(double, double, double); struct vector vec_scale(struct vector, double); struct vector vec_perpproject(struct vector*, struct vector*); struct quat quat_create(struct vector, double, double); void vec_rotate(struct vector *, struct quat); void normalise(struct vector *); int find_intersect_planec(struct particles *, struct particles *, double, struct vector, struct vector, double, double, double *); int test_intrpatch(struct particles *, struct vector, double, double, double *, int); intrs=0; rcut2=rcut*rcut; /*1- do intersections of spherocylinder2 with patch of spherocylinder1 at cut distance C*/ /*1a- test intersection with half planes of patch and look how far they are from spherocylinder. If closer then C we got itersection*/ /* plane1 */ /* find intersections of part2 with plane by par1 and part1->patchsides[0] */ intrs+=find_intersect_planec(part1,part2,halfl2,r_cm,part1->patchsides[0+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections); // printf("plane1 %d\n", intrs); /* plane2 */ /* find intersections of part2 with plane by par1 and part1->patchsides[1] */ intrs+=find_intersect_planec(part1,part2,halfl2,r_cm,part1->patchsides[1+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections); if ( (intrs == 2 ) && (param->pcanglsw[which+2*patchnum] < 0) ) { fprintf (stderr, "ERROR: Patch is larger than 180 degrees and we are getting two segments - this hasnot been programed yet.\n\n"); exit (1); } // printf("plane2 %d\n", intrs); /*1b- test intersection with cylinder - it is at distance C*/ if (intrs < 2 ) { cm21=vec_scale(r_cm,-1.0); vec1=vec_crossproduct(cm21,part1->dir); vec2=vec_crossproduct(part2->dir,part1->dir); a = DOT(vec2,vec2); b = 2*DOT(vec1,vec2); c = -rcut*rcut + DOT(vec1,vec1); d = b*b - 4*a*c; if ( d >= 0) { /*there is intersection with infinite cylinder */ x1 = (-b+sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { /* vectors from center os sc1 to intersection with infinite cylinder*/ vec1.x=part2->dir.x*x1-r_cm.x; vec1.y=part2->dir.y*x1-r_cm.y; vec1.z=part2->dir.z*x1-r_cm.z; e = DOT(part1->dir,vec1); if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/ else { intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum); } } if ( d > 0 ){ x2 = (-b-sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec2.x = part2->dir.x*x2-r_cm.x; vec2.y = part2->dir.y*x2-r_cm.y; vec2.z = part2->dir.z*x2-r_cm.z; e = DOT(part1->dir,vec2); if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/ else { intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum); } } } } } // printf ("cylinder %d x1 %f x2 %f e %f\n", intrs, x1, x2, e); /*1c- test intersection with plates at the end - it is at distace C and in wedge*/ if (intrs < 2 ) { a = DOT(part1->dir, part2->dir); if (a == 0.0) intrs=0; /* there is no intersection plane and sc are paralel*/ else { /*plane cap1*/ vec1.x= r_cm.x + halfl1*part1->dir.x; vec1.y= r_cm.y + halfl1*part1->dir.y; vec1.z= r_cm.z + halfl1*part1->dir.z; x1 = DOT(part1->dir,vec1)/a; /*parameter on line of SC2 determining intersection*/ if ((x1 > halfl2 ) || (x1 < -halfl2)) intrs+=0; /* there is no intersection plane sc is too short*/ else { vec2.x = x1*part2->dir.x - vec1.x; /*vector from ENDPOINT to intersection point */ vec2.y = x1*part2->dir.y - vec1.y; vec2.z = x1*part2->dir.z - vec1.z; b = DOT (vec2, vec2); if (b > rcut*rcut) intrs+=0; /* the intersection is outside sc */ else { intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum); } } // printf ("plane cap1 %d %f\n", intrs, x1); /*plane cap2*/ vec1.x= r_cm.x - halfl1*part1->dir.x; vec1.y= r_cm.y - halfl1*part1->dir.y; vec1.z= r_cm.z - halfl1*part1->dir.z; x2 = DOT(part1->dir,vec1)/a; /*parameter on line of SC2 determining intersection*/ if ((x2 > halfl2 ) || (x2 < -halfl2)) intrs+=0; /* there is no intersection plane sc is too short*/ else { vec2.x = x2*part2->dir.x - vec1.x; /*vector from ENDPOINT to intersection point */ vec2.y = x2*part2->dir.y - vec1.y; vec2.z = x2*part2->dir.z - vec1.z; b = DOT (vec2, vec2); if (b > rcut*rcut) intrs+=0; /* the intersection is outside sc */ else { intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum); } } // printf ("plane cap2 %d %f\n", intrs,x2); } } /*1d- if there is only one itersection shperocylinder ends within patch wedge set as second intersection end inside patch*/ if (intrs < 2 ) { /*whole spherocylinder is in or all out if intrs ==0*/ vec1.x = part2->dir.x*halfl2 - r_cm.x; vec1.y = part2->dir.y*halfl2 - r_cm.y; vec1.z = part2->dir.z*halfl2 - r_cm.z; /*vector from CM of sc1 to end of sc2*/ /*check is is inside sc1*/ a=DOT(vec1,part1->dir); vec3.x = vec1.x - part1->dir.x*a; vec3.y = vec1.y - part1->dir.y*a; vec3.z = vec1.z - part1->dir.z*a; b=DOT(vec3,vec3); d = fabs(a)-halfl1; if ( d <= 0) { /*is in cylindrical part*/ /*c is distance squared from line or end to test if is inside sc*/ if (b < rcut2) intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],halfl2,intersections,patchnum); } if (intrs < 2 ) { vec2.x = -part2->dir.x*halfl2 - r_cm.x; vec2.y = -part2->dir.y*halfl2 - r_cm.y; vec2.z = -part2->dir.z*halfl2 - r_cm.z; /*check is is inside sc1*/ a=DOT(vec2,part1->dir); vec4.x = vec2.x - part1->dir.x*a; vec4.y = vec2.y - part1->dir.y*a; vec4.z = vec2.z - part1->dir.z*a; b=DOT(vec4,vec4); d = fabs(a) -halfl1; if (d <= 0) { /*c is distance squared from line or end to test if is inside sc*/ if (b < rcut2) intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],-1.0*halfl2,intersections,patchnum); } } // printf ("ends %d\n", intrs); } return intrs; } /*CPSC................................................................................*/ /* Find intersections of plane defined by vector w_vec.and returns number of them - for cylindrical psc -CPSC */ int find_intersect_planec(struct particles * part1, struct particles * part2, double halfl, struct vector r_cm, struct vector w_vec, double rcut, double cospatch, double intersections[5]) { int i, intrs=0; double a, c, d, ti, disti; struct vector nplane, d_vec; void normalise(struct vector *); struct vector vec_crossproduct(struct vector, struct vector); nplane=vec_crossproduct(part1->dir,w_vec); normalise(&nplane); normalise(&w_vec); a = DOT(nplane, part2->dir); if (a == 0.0) intrs=0; /* there is no intersection plane and sc are paralel*/ else { ti = DOT(nplane,r_cm)/a; if ((ti > halfl ) || (ti < -halfl)) intrs=0; /* there is no intersection plane sc is too short*/ else { d_vec.x = ti*part2->dir.x - r_cm.x; /*vector from intersection point to CM*/ d_vec.y = ti*part2->dir.y - r_cm.y; d_vec.z = ti*part2->dir.z - r_cm.z; c = DOT (d_vec, w_vec); if ( c *cospatch < 0) intrs=0; /* the intersection in plane is on other side of patch */ else { d = fabs(DOT (d_vec, part1->dir)) - halfl; if (d <= 0) { disti= c*c; /*is inside cylinder*/ if (disti > rcut*rcut) intrs=0; /* the intersection is outside sc */ else { intrs=1; i=0; while (intersections[i] !=0) { if (ti == intersections[i]) intrs=0; /* found intersection we already have -it is at boundary*/ i++; } if (intrs > 0) intersections[i]=ti; } } } } } return intrs; } /*..................................................................................*/ /* Find projection of cpsc on plane (0,0,1) including cutoff and return vector to its begining and end and cm */ int psc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, BOOL* positive, double * cutdist, struct vector *partbeg, struct vector *partend) { struct vector vec1; double k,x1,x2,y1,y2,a,b,c,e,d; void projectinz( struct vector* vec1, struct vector * projectdir, struct vector* result); void normalise(struct vector*); if (( (*positive)&& (projectdir->z > 0) ) || ( (!(*positive))&& (projectdir->z < 0) )) return 0; if ( fabs(partbeg->z) > (*cutdist) ) return 0; /* we might have interacting segment*/ x2 = 0.0; y2 = 0.0; /*begining point*/ /*if begining projected along particle direction is within cutoff */ if (fabs(partdir->z) > ZEROTOL2) { projectinz(partbeg,partdir,pbeg); a=0; } else { /*we need some starting point*/ vec1.x = 2.0*partbeg->x - partend->x; vec1.y = 2.0*partbeg->y - partend->y; vec1.z = 2.0*partbeg->z - partend->z; projectinz(&vec1,projectdir,pbeg); a=1; } if (partdir->z != 0) { b = fabs(partbeg->z / partdir->z); } else { b = (*cutdist)+1.0; } if ( (b > (*cutdist)) || (a==1)) { /*else beginig is at sphere, find intersections with sphere of cutoff radius*/ if ( fabs(projectdir->z) > ZEROTOL2) { projectinz(partbeg,projectdir,pend); } else { pend->x = pbeg->x + projectdir->x; pend->y = pbeg->y + projectdir->y; } if (pend->y == pbeg->y) { y1=pbeg->y; y2=pbeg->y; a=sqrt( (*cutdist)*(*cutdist) - partbeg->z*partbeg->z - (pbeg->y-partbeg->y)*(pbeg->y-partbeg->y) ); x1 = partbeg->x + a; x2 = partbeg->x - a; if (pend->x > pbeg->x) {/*select the right intersection*/ pbeg->x = x2; x2 = x1; } else { pbeg->x = x1; } pbeg->y = y1; } else { k = (pend->x - pbeg->x)/ (pend->y - pbeg->y); a = k*k +1; b = partbeg->y + k*k*pbeg->y - k*pbeg->x + k*partbeg->x; c = partbeg->y*partbeg->y + partbeg->z*partbeg->z - (*cutdist)*(*cutdist) + (k*pbeg->y - pbeg->x + partbeg->x)*(k*pbeg->y - pbeg->x + partbeg->x); e = b*b-a*c; if (e < 0) { return 0; /*tehre might be no intersection with sphere*/ } d = sqrt(e); if (pend->y > pbeg->y) {/*select the right intersection*/ y1 = (b - d ) /a; y2 = (b + d ) /a; } else { y1 = (b + d ) /a; y2 = (b - d ) /a; } x1 = k * (y1 - pbeg->y) + pbeg->x; x2 = k * (y2 - pbeg->y) + pbeg->x; pbeg->x = x1; pbeg->y = y1; pbeg->z = 0.0; } } //printf("pscwall beg %f %f \n",pbeg->x,pbeg->y); /*end point*/ a = -(*cutdist) * projectdir->z; /*z coordinate of point where projection is in cut distance*/ //printf("sphere end %f %f ",a,partend->z); if ( ((partend->z < a)&&(*positive)) || ((a < partend->z)&&(!(*positive))) ){ /*end is within cut off - second sphere*/ /*if this is the case vec1 is end of pherocylinder and pend is its projection*/ if (projectdir->z != 0) { projectinz(partend,projectdir,pend); } else { pend->x = pbeg->x + projectdir->x; pend->y = pbeg->y + projectdir->y; } if (pend->y == pbeg->y) { y1=pend->y; y2=pend->y; a=sqrt( (*cutdist)*(*cutdist) - partend->z*partend->z - (pend->y-partend->y)*(pend->y-partend->y) ); x1 = partend->x + a; x2 = partend->x - a; if (pbeg->x > pend->x) {/*select the right intersection*/ pend->x = x2; } else { pend->x = x1; } pend->y = y1; } else { k = (pbeg->x - pend->x)/ (pbeg->y - pend->y); a = k*k +1; b = partend->y + k*k*pend->y - k*pend->x + k*partend->x; c = partend->y*partend->y + partend->z*partend->z - (*cutdist)*(*cutdist) + (k*pend->y - pend->x + partend->x)*(k*pend->y - pend->x + partend->x); e = b*b-a*c; if (e < 0) { return 0; /*there might be no intersection with sphere*/ } d = sqrt(e); if (pbeg->y > pend->y) {/*select the right intersection*/ y1 = (b - d ) /a; y2 = (b + d ) /a; } else { y1 = (b + d ) /a; y2 = (b - d ) /a; } x1 = k * (y1 - pend->y) + pend->x; x2 = k * (y2 - pend->y) + pend->x; pend->x = x1; pend->y = y1; pend->z = 0.0; } } else { if ( ((partbeg->z < a)&&(*positive)) || ((a < partbeg->z)&&(!(*positive))) ) { /*end is at cutoff going through cylindrical part*/ //printf("cylinder "); b = (a - partbeg->z)/ partdir->z; vec1.x = partbeg->x + b * partdir->x; vec1.y = partbeg->y + b * partdir->y; vec1.z = a; projectinz(&vec1,projectdir,pend); } else { /* also projected end is within the same sphere as begining- no contribution from cylinder*/ if (x2 == 0.0 ) { //printf("sphere beg "); if (projectdir->z != 0) { projectinz(partbeg,projectdir,pend); } else { pend->x = pbeg->x + projectdir->x; pend->y = pbeg->y + projectdir->y; } if (pend->y == pbeg->y) { y1=pbeg->y; y2=pbeg->y; a=sqrt( (*cutdist)*(*cutdist) - partbeg->z*partbeg->z - (pbeg->y-partbeg->y)*(pbeg->y-partbeg->y) ); x1 = partbeg->x + a; x2 = partbeg->x - a; if (pend->x > pbeg->x) {/*select the right intersection*/ pend->x = x1; } else { pend->x = x2; } pend->y = y1; } else { k = (pend->x - pbeg->x)/ (pend->y - pbeg->y); a = k*k +1; b = partbeg->y + k*k*pbeg->y - k*pbeg->x + k*partbeg->x; c = partbeg->y*partbeg->y + partbeg->z*partbeg->z - (*cutdist)*(*cutdist) + (k*pbeg->y - pbeg->x + partbeg->x)*(k*pbeg->y - pbeg->x + partbeg->x); e = b*b-a*c; if (e < 0) { return 0; /*tehre might be no intersection with sphere*/ } d = sqrt(e); if (pend->y > pbeg->y) {/*select the right intersection*/ y1 = (b - d ) /a; y2 = (b + d ) /a; } else { y1 = (b + d ) /a; y2 = (b - d ) /a; } x1 = k * (y1 - pbeg->y) + pbeg->x; x2 = k * (y2 - pbeg->y) + pbeg->x; pend->x = x1; pend->y = y1; pend->z = 0.0; } } else { pend->x = x2; pend->y = y2; pend->z = 0.0; } return 2; /*line end is on sphere of particle begining = no cylindrical cutoff*/ } } return 1; } int cpsc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, double* halfl, BOOL* orientin, BOOL* positive, double* rcmz, double * cutdist, struct vector *partbeg, struct vector *partend) { struct vector vec1; double a; void projectinz( struct vector* vec1, struct vector * projectdir, struct vector* result); if (( (*positive)&& (projectdir->z >= 0) ) || ( (!(*positive))&& (projectdir->z <= 0) )) return 0; /*if projected closer point beoynd cutoff no interaction*/ /*project begining of spherocylinder*/ vec1.x = partbeg->x; vec1.y = partbeg->y; vec1.z = partbeg->z; if (-vec1.z/projectdir->z < (*cutdist) ) { projectinz(&vec1,projectdir,pbeg); } else { return 0; } /* we have interacting segment*/ if (-partend->z/projectdir->z < (*cutdist) ) { /*whole segment interacts*/ vec1.z = partend->z; } else { vec1.z = -(*cutdist)*projectdir->z; } if (partdir->z != 0.0) a = (vec1.z - (*rcmz)) / partdir->z; else { if (*orientin) a = -(*halfl); else a = (*halfl); } vec1.x = partdir->x * a; vec1.y = partdir->y * a; projectinz(&vec1,projectdir,pend); return 1; } int cutprojectatwall(struct vector* pextr1, struct vector* pextr2, struct vector* pextr3, struct vector* pextr4, struct vector* projectdir, struct vector* partdir, double * cutdist, struct vector *partbeg, struct vector *partend, struct vector *pend, double *cuttoproject, BOOL* orientin) { double y1,y2,O2z,det,a,b,dirydirz,dir2x,dir2y,dir2z,dirzldiry; void projectinz( struct vector* vec1, struct vector * projectdir, struct vector* result); dirydirz = partdir->y * partdir->z; dir2x = partdir->x * partdir->x; dir2y = partdir->y * partdir->y; dir2z = partdir->z * partdir->z; a = 1/(dir2x+dir2y); if (partdir->x != 0) { O2z = partbeg->z * partbeg->z; b=dir2y*dir2z*O2z - (dir2x+dir2y) * (O2z*(dir2x+dir2z)- (*cutdist)*(*cutdist)*dir2x); if (b < 0 ) { /*no cutoff from cylindrical part*/ return 0; } det = sqrt(b); y1 = partbeg->y + (dirydirz*partbeg->z + det )*a; y2 = partbeg->y + (dirydirz*partbeg->z - det )*a; if (( (partdir->x > 0)&&(!(*orientin)) ) || ( (partdir->x < 0)&&(*orientin) )) { pextr1->y = y1; pextr2->y = y2; } else { pextr1->y = y2; pextr2->y = y1; } pextr1->x = partbeg->x + (partbeg->z*partdir->z - (pextr1->y - partbeg->y)*partdir->y) / partdir->x; pextr2->x = partbeg->x + (partbeg->z*partdir->z - (pextr2->y - partbeg->y)*partdir->y) / partdir->x; O2z = partend->z * partend->z; b= dir2y*dir2z*O2z - (dir2x+dir2y) * (O2z*(dir2x+dir2z)- (*cutdist)*(*cutdist)*dir2x); if (b >= 0) { /*we have intersections from end*/ det = sqrt(b); y1 = partend->y + (dirydirz * partend->z + det )*a; y2 = partend->y + (dirydirz * partend->z - det )*a; //printf("det %f y1 %f y2 %f \n", det,y1,y2); if (( (partdir->x > 0)&&(!(*orientin)) ) || ( (partdir->x < 0)&&(*orientin) )) { pextr3->y = y1; pextr4->y = y2; } else { pextr3->y = y2; pextr4->y = y1; } pextr3->x = partend->x + (partend->z*partdir->z - (pextr3->y - partend->y)*partdir->y) / partdir->x; pextr4->x = partend->x + (partend->z*partdir->z - (pextr4->y - partend->y)*partdir->y) / partdir->x; } else { /*no intersection at the end the cutoff intersects the plane in the perpendicular projection of line segemnt, so we have to use that point */ if (partdir->z == 0) { fprintf (stderr, "\nERROR: Something went wrong in calculation of projection.\n\n"); exit (1); } else { a = ((*cuttoproject) - partbeg->z)/ partdir->z; //if ( projectdir->y * partdir->x < 0 ) pextr3->x = partbeg->x + a * partdir->x; pextr3->y = partbeg->y + a * partdir->y; pextr3->z = (*cuttoproject); //printf("before proj %f %f dir %f %f %f ",pextr3->x,pextr3->y,projectdir->x,projectdir->y,projectdir->z); projectinz(pextr3,projectdir,pextr4); pextr3->x = pextr4->x; pextr3->y = pextr4->y; pextr3->z = 0.0; //printf("after proj %f %f \n",pextr3->x,pextr3->y); return 2; } } } else { if (partdir->y != 0) { dirzldiry = partdir->z/partdir->y; y1 = partbeg->y + partbeg->z * dirzldiry; det = sqrt( (*cutdist)*(*cutdist) - partbeg->z * partbeg->z * (1+dirzldiry*dirzldiry) ); if (( (partdir->y > 0)&&(!(*orientin)) ) || ( (partdir->y < 0)&&(*orientin) )) { pextr1->x = partbeg->x + det; pextr2->x = partbeg->x - det; } else { pextr1->x = partbeg->x - det; pextr2->x = partbeg->x + det; } pextr1->y = y1; pextr2->y = y1; y1 = partend->y + partend->z * dirzldiry; b = (*cutdist)*(*cutdist) - partend->z * partend->z * (1+dirzldiry*dirzldiry); if (b >= 0) { /*we have intersections from end*/ det = sqrt(b); if (( (partdir->y > 0)&&(!(*orientin)) ) || ( (partdir->y < 0)&&(*orientin) )) { pextr3->x = partend->x + det; pextr4->x = partend->x - det; } else { pextr3->x = partend->x - det; pextr4->x = partend->x + det; } pextr3->y = y1; pextr4->y = y1; } else { /*no intersection at the end the cutoff intersects the plane in the perpendicular projection of line segemnt, so we have to use that point */ if (partdir->z == 0) { fprintf (stderr, "\nERROR: Something went wrong in calculation of projection.\n\n"); exit (1); } else { a = ((*cutdist) - partbeg->z)/ partdir->z; y1 = a * partdir->y + partbeg->y; if ( projectdir->x * partdir->y > 0 ) { pextr3->x = a * partdir->x + partbeg->x; pextr3->y = y1; pextr4->x = pend->x; pextr4->y = pend->y; }else { pextr3->x = pend->x; pextr3->y = pend->y; pextr4->x = a * partdir->x + partbeg->x; pextr4->y = y1; } } } } else { return 0; /* if perpendicular to plane we don't have any intersections*/ } } return 1; } /*project a point in project direction to z plane z=0*/ void projectinz(struct vector* vec1, struct vector* projectdir,struct vector * projection) { projection->x = vec1->x - vec1->z * projectdir->x/projectdir->z; projection->y = vec1->y - vec1->z * projectdir->y/projectdir->z; projection->z = 0; } /*calculates area defined by four points in z=0 plane */ double areafourpoints(struct vector * pbeg, struct vector * pend, struct vector * pbeg1, struct vector * pend1 ) { double area =0.0; struct vector vec1,vec2; /*area by four points... two half vector cross product |(pbegining1-pbegining)x(pend-pbegining)|/2 */ vec1.x = pbeg1->x - pbeg->x; vec1.y = pbeg1->y - pbeg->y; vec2.x = pend->x - pbeg->x; vec2.y = pend->y - pbeg->y; //printf("a: %f %f %f %f \n",vec1.x,vec2.y,vec1.y,vec2.x); area += fabs(vec1.x*vec2.y - vec1.y*vec2.x)*0.5; /* + |(pend-pend1)x(pbegining1-pend1)|/2*/ vec1.x = pend->x - pend1->x; vec1.y = pend->y - pend1->y; vec2.x = pbeg1->x - pend1->x; vec2.y = pbeg1->y - pend1->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; return area; } /*calculates area defined by six points in z=0 plane */ double areaeightpoints(struct vector * p1, struct vector * p2, struct vector * p3, struct vector * p4, struct vector * p5, struct vector * p6,struct vector * p7, struct vector * p8) { double area =0.0; struct vector vec1,vec2; /*area by half vector cross product |(pbegining-pbegining)x(pend-pbegining)|/2 */ vec1.x = p2->x - p1->x; vec1.y = p2->y - p1->y; vec2.x = p3->x - p2->x; vec2.y = p3->y - p2->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("3"); if (p4 != NULL) { vec1.x = p3->x - p1->x; vec1.y = p3->y - p1->y; vec2.x = p4->x - p3->x; vec2.y = p4->y - p3->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("4"); if (p5 != NULL) { vec1.x = p4->x - p1->x; vec1.y = p4->y - p1->y; vec2.x = p5->x - p4->x; vec2.y = p5->y - p4->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("5"); if (p6 != NULL) { vec1.x = p5->x - p1->x; vec1.y = p5->y - p1->y; vec2.x = p6->x - p5->x; vec2.y = p6->y - p5->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("6"); if (p7 != NULL) { vec1.x = p6->x - p1->x; vec1.y = p6->y - p1->y; vec2.x = p7->x - p6->x; vec2.y = p7->y - p6->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("7"); if (p8 != NULL) { vec1.x = p7->x - p1->x; vec1.y = p7->y - p1->y; vec2.x = p8->x - p7->x; vec2.y = p8->y - p7->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("8"); } } } } } return area; } /*..............................................................................*/ /*........................INPUT OUTPUT..........................................*/ /*..............................................................................*/ /*..............................................................................*/ /** * convert string num into two integers */ void readii(char * num, int value[2]){ char *end, *num2; void trim (char *); value[0] = strtol(num, &num2, 10); trim(num2); if ((int)strlen(num2) > 0) value[1] = strtol(num2, &end, 10); else { value[1] =0; return; } if(*end){ fprintf(stderr, "Could not convert %s into two integers\n", num); exit(1); } return; } /** * convert string num into integer */ int readi(char * num){ char *end; int i = strtol(num, &end, 10); if(*end){ fprintf(stderr, "Could not convert %s into integer\n", num); exit(1); } return (int) i; } /** * convert string num into long */ long readl(char * num){ char *end; long i = strtol(num, &end, 10); if(*end){ fprintf(stderr, "Could not convert %s into long\n", num); exit(1); } return i; } /** * convert string num into double */ double readd(char * num){ char *end; double i = strtod(num, &end); if(*end){ fprintf(stderr, "Could not convert %s into double\n", num); exit(1); } return i; } /* Reads the run parameters from the external file "options". See the end of the code for a template. All comments starting with '#' are stripped out. The options are summarised on standard output and checked for validity of range. */ void read_options(struct sim* sim,char filename[30]) { int i; int num_options = -1; double transmx, rotmx, chainmmx, chainrmx; double angle, chain_angle; char *id, *value, *tokLine, *line; FILE *infile; void strip_comment (char *); void trim (char *); void readii(char * num, int value[2]); double readd(char *); long readl(char *); int readi(char *); /* for new options add before the last line */ Option options[] = { {"write_cluster", Long, FALSE, &sim->write_cluster}, {"adjust", Long, FALSE, &sim->adjust}, {"movie", Long, FALSE, &sim->movie}, {"nequil", Long, FALSE, &sim->nequil}, {"nsweeps", Long, FALSE, &sim->nsweeps}, {"nrepchange", Long, FALSE, &sim->nrepchange}, {"paramfrq", Long, FALSE, &sim->paramfrq}, {"report", Long, FALSE, &sim->report}, {"seed", Long, FALSE, &seed}, {"pairlist_update", Int, FALSE, &sim->pairlist_update}, {"ptype", Int, FALSE, &sim->ptype}, {"wlm", Int2, FALSE, &sim->wlm}, {"wlmtype", Int, FALSE, &sim->wl.wlmtype}, {"press", Double, FALSE, &sim->press}, {"paralpress", Double, FALSE, &sim->paralpress}, {"edge_mx", Double, FALSE, &sim->edge.mx}, {"shave", Double, FALSE, &sim->shave}, {"chainprob", Double, FALSE, &sim->chainprob}, {"switchprob", Double, FALSE, &sim->switchprob}, {"temper", Double, FALSE, &sim->temper}, {"paraltemper", Double, FALSE, &sim->paraltemper}, {"transmx", Double, FALSE, &transmx}, {"rotmx", Double, FALSE, &rotmx}, {"chainmmx", Double, FALSE, &chainmmx}, {"chainrmx", Double, FALSE, &chainrmx}, {"last", Int, FALSE, NULL} }; while(options[++num_options].var != NULL) ; /*--- 1. Read in values ---*/ size_t line_size = (STRLEN + 1) * sizeof(char); line = (char *) malloc(line_size); infile = fopen(filename, "r"); if (infile == NULL) { fprintf (stderr, "\nERROR: Could not open options file.\n\n"); exit (1); } while(getline(&line, &line_size, infile) != -1){ // strip comments strip_comment(line); trim(line); if(strlen(line) == 0){ continue; } // tokenize tokLine = line; id = strtok(tokLine, "="); if(id == NULL){ fprintf(stderr, "error parsing Configuration line (%s)", line); free(line); exit(1); } trim(id); tokLine = NULL; value = strtok(tokLine, "="); trim(value); if(value == NULL){ fprintf(stderr, "error parsing Configuration line (%s)", line); free(line); exit(1); } //printf("id: %s; value: %s\n", id, value); for(i = 0; i < num_options; i++){ if(strcmp(id, options[i].id) == 0){ if(options[i].type == Int2){ readii(value,*((int (*)[2]) options[i].var)); options[i].set = TRUE; break; } if(options[i].type == Int){ *((int *) options[i].var) = readi(value); options[i].set = TRUE; break; } else if(options[i].type == Long){ *((long *) options[i].var) = readl(value); options[i].set = TRUE; break; } else if(options[i].type == Double){ *((double *) options[i].var) = readd(value); options[i].set = TRUE; break; } else { fprintf(stderr, "Could not determine type of %s!\n", id); free(line); exit(1); } } } if(i == num_options){ fprintf(stderr, "Unknown identifier %s!\nWill procede.\n", id); } } fclose (infile); free(line); /* Check, wheter all options have been readin */ for(i = 0; i < num_options; i++){ if(!options[i].set){ fprintf(stderr, "option '%s' is not set!\n", options[i].id); exit(1); } } /*--- 2. Summarize results on standard output ---*/ /* Density of close-packed spherocylinders */ // rho_cp = 2.0/(sqrt(2.0) + *length * sqrt(3.0)); printf (" Pressure coupling type: %d\n", sim->ptype); printf (" Pressure: %.8lf\n", sim->press); printf (" Replica exchange pressure: %.8lf\n", sim->paralpress); printf (" Average volume change attempts per sweep: %.8lf\n", sim->shave); printf (" Equilibration sweeps: %ld\n", sim->nequil); printf (" Sweeps between step size adjustments: %ld\n", sim->adjust); printf (" Production sweeps: %ld\n", sim->nsweeps); printf (" Sweeps between statistics samples: %ld\n", sim->paramfrq); printf (" Sweeps between statistics reports: %ld\n", sim->report); printf (" Average chain move attempts per sweep: %.8lf\n", sim->chainprob); printf (" Initial maximum displacement: %.8lf\n", transmx); printf (" Inititial maximum angular change (degrees): %.8lf\n", rotmx); printf (" Inititial maximum box edge change: %.8lf\n", sim->edge.mx); printf (" Initial maximum chain displacement: %.8lf\n", chainmmx); printf (" Inititial maximum chain angular change (degrees): %.8lf\n", chainrmx); printf (" Temperature in kT/e: %.8lf\n", sim->temper); printf (" Parallel tempering temperature in kT/e: %.8lf\n", sim->paraltemper); printf (" Sweeps between replica exchange: %ld\n", sim->nrepchange); printf (" Wang-Landau method: %d %d\n", sim->wlm[0],sim->wlm[1]); printf (" Calculate the Wang-Landau method for atom type: %d\n", sim->wl.wlmtype); printf (" Average type switch attempts per sweep: %.8lf\n", sim->switchprob); printf (" Number of Sweeps per pairlist update: %d\n", sim->pairlist_update); printf (" Random number seed: %ld\n", seed); printf (" Number of sweeps per writing out cluster info: %ld\n", sim->write_cluster); if (sim->movie > 0) { printf (" Sweeps between movie frames: %ld\n", sim->movie); } else { printf (" No movie\n"); } printf ("\n"); if(sim->pairlist_update){ printf(" A pairlist will be generated every %d steps. This is a greedy" " algorithm; make sure you don't have big chains etc.!\n", sim->pairlist_update); } /*--- 3. Validity checks ---*/ if (rotmx < 0.0 || rotmx > 180) { fprintf (stderr, "ERROR: Maximum orientation change must be in range 0 to 180.\n\n"); exit (1); } if (chainrmx < 0.0 || chainrmx > 180) { fprintf (stderr, "ERROR: Maximum orientation change for chains must be in range 0 to 180.\n\n"); exit (1); } if ( (sim->ptype <0) || (sim->ptype>3) ) { fprintf (stderr, "ERROR: Unknown pressure coupling %d. Program only knows: 0 - anisotropic coupling, \ 1 - isotropic coupling, 2 - isotropic in xy z=const, 3 - isotropic xy V=const.\n\n",sim->ptype); exit (1); } if ( (sim->wlm[0] <0) || (sim->wlm[0] > 7) || (sim->wlm[1] <0) || (sim->wlm[1] > 7) ) { fprintf (stderr, "ERROR: Unknown Wang-Landau method %d %d. Program only knows: 0 - none, \ 1 - z-direction od 1st particle, 2 - pore in membrane, 3 - zorientation of 0th particle,\ 4 - distance of fist two particles, 5 - pore around z-axis above CM,\ 6 - pore around z-axis above 0th particle, 7 - number of particles in contact \n\n",sim->wlm[0],sim->wlm[1]); exit (1); } if ( (sim->wlm[0] == 0) && (sim->wlm[1] > 0) ) { fprintf (stderr, "ERROR: Wang-Landau method has to be set for first order parameter and then for second order parameter\n\n"); exit (1); } if ( (sim->wlm[0] == 2) || (sim->wlm[0] == 5) || (sim->wlm[0] == 6) ) { if(sim->wl.wlmtype < 1){ fprintf (stderr, "ERROR: Atom type for the Wang-Landau Method (%d) was false defined.\n\n",sim->wl.wlmtype); exit (1); } if ( (sim->wlm[1] == 2) || (sim->wlm[1] == 5) || (sim->wlm[1] == 6) ) { fprintf (stderr, "ERROR: Simulaneous use of two pore order parameters has not been implemented yet.\n\n"); exit (1); } } /* we store maximum rotation as half angle - useful for quaterions*/ angle = rotmx / 180.0 * PIH *0.5; rotmx = cos((rotmx)/180.0*PIH); chain_angle = chainrmx / 180.0 * PIH; chainrmx = cos((chainrmx)/180.0*PIH); sim->edge.mx *= 2.0; /* The full range is -maxl to +maxl, i.e. spanning 2*maxl */ transmx *= 2.0; /* The full range is -maxr to +maxr, i.e. spanning 2*maxr */ chainmmx *= 2.0; /* The full range is -maxr to +maxr, i.e. spanning 2*maxr */ for (i=0;i<MAXT;i++) { sim->trans[i].mx = transmx; sim->rot[i].mx = rotmx; sim->rot[i].angle = angle; } for (i=0;i<MAXMT;i++) { sim->chainm[i].mx = chainmmx; sim->chainr[i].mx = chainrmx; sim->chainr[i].angle = chain_angle; } //parallel tempering #ifdef MPI if ( (sim->temper != sim->paraltemper) && (sim->mpinprocs <2) ) { printf("ERROR: Paralllel tempering at single core does not work.\n\n"); exit(1); } sim->dtemp = (sim->paraltemper - sim->temper )/(sim->mpinprocs-1); sim->temper += sim->dtemp * sim->mpirank; if ( (sim->press != sim->paralpress) && (sim->mpinprocs <2) ) { printf("ERROR: Pressure replica exchange at single core does not work.\n\n"); exit(1); } sim->dpress = (sim->paralpress - sim->press )/(sim->mpinprocs-1); sim->press += sim->dpress * sim->mpirank; seed += sim->mpirank; sim->mpiexch.mx = sim->dtemp; sim->mpiexch.angle = sim->dpress; #endif } /*..............................................................................*/ /* Used by read_options to read a long integer with error checking. NOT USED ANYMORE */ long read_long(FILE *infile, char *error) { char *gotline; char line[500]; int fields; long value; gotline = fgets(line, sizeof(line), infile); fields = sscanf(line, "%ld", &value); if (gotline == NULL || fields != 1) { fprintf (stdout, "\nERROR reading %s from options file.\n\n", error); exit (1); } return value; } /* Used by read_options to read a long integer with error checking. NOT USED ANYMORE */ int read_int(FILE *infile, char *error) { char *gotline; char line[500]; int fields; int value; gotline = fgets(line, sizeof(line), infile); fields = sscanf(line, "%d", &value); if (gotline == NULL || fields != 1) { fprintf (stdout, "\nERROR reading %s from options file.\n\n", error); exit (1); } return value; } /*..............................................................................*/ /* Used by read_options to read a double precision with error checking. NOT USED ANYMORE */ double read_double(FILE *infile, char *error) { char *gotline; char line[500]; int fields; double value; gotline = fgets(line, sizeof(line), infile); fields = sscanf(line, "%le", &value); if (gotline == NULL || fields != 1) { fprintf (stdout, "\nERROR reading %s from options file.\n\n", error); exit (1); } return value; } /*..............................................................................*/ /**************************************************************************** * CONFIG INITIALIZATION *****************************************************************************/ /* Reads in the initial configuration from the file "config.init". Each line contains the three components of the position vector and three components of the direction vector and three components of patch direction for a spherocylinder. The direction vector is normalised after being read in. The configuration is checked for particle overlaps. */ void init_config(struct topo * topo, struct conf * conf, struct sim * sim, char filename[30]) { int err,fields,tmp_type; long i,j,current,first; FILE * infile; char * line, line2[STRLEN]; size_t line_size = (STRLEN + 1) * sizeof(char); line = (char *) malloc(line_size); struct particles chorig[MAXCHL]; int overlap(struct particles, struct particles, struct vector, struct ia_param [MAXT][MAXT]); void normalise(struct vector *); void ortogonalise(struct vector *, struct vector); void usepbc(struct vector *, struct vector); double anint(double); void strip_comment (char *); void trim (char *); void aftercommand(char *, char *, char); double maxlength = 0; for(i = 0; i < MAXT; i++){ if(maxlength < topo->ia_params[i][i].len[0]) maxlength = topo->ia_params[i][i].len[0]; } infile = fopen(filename, "r"); if (infile == NULL) { fprintf (stderr, "\nERROR: Could not open config.init file.\n\n"); exit (1); } if(getline(&line, &line_size, infile) == -1){ fprintf (stderr, "ERROR: Could not read box size.\n\n"); exit (1); } strip_comment(line); trim(line); if (sscanf(line, "%le %le %le", &(conf->box.x), &(conf->box.y), &(conf->box.z)) != 3) { if(getline(&line, &line_size, infile) == -1){ fprintf (stderr, "ERROR: Could not read box size.\n\n"); exit (1); } aftercommand(line2,line,BOXSEP); strip_comment(line2); trim(line2); if (sscanf(line2, "%le %le %le", &(conf->box.x), &(conf->box.y), &(conf->box.z)) != 3) { fprintf (stderr, "ERROR: Could not read box size.\n\n"); exit (1); } } if (conf->box.x < maxlength * 2.0 + 2.0) { printf ("WARNING: x box length is less than two spherocylinders long.\n\n"); } if (conf->box.y < maxlength * 2.0 + 2.0) { printf ("WARNING: y box length is less than two spherocylinders long.\n\n"); } if (conf->box.z < maxlength * 2.0 + 2.0) { printf ("WARNING: z box length is less than two spherocylinders long.\n\n"); } DEBUG_INIT("Position of the particle"); for (i=0; i < topo->npart; i++) { if(getline(&line, &line_size, infile) == -1){ break; } strip_comment(line); trim(line); fields = sscanf(line, "%le %le %le %le %le %le %le %le %le %d", &conf->particle[i].pos.x, &conf->particle[i].pos.y, &conf->particle[i].pos.z, &conf->particle[i].dir.x, &conf->particle[i].dir.y, &conf->particle[i].dir.z, &conf->particle[i].patchdir[0].x, &conf->particle[i].patchdir[0].y, &conf->particle[i].patchdir[0].z, &conf->particle[i].switched); conf->particle[i].patchdir[1].x = conf->particle[i].patchdir[1].y = conf->particle[i].patchdir[1].z =0; conf->particle[i].chdir[0].x = conf->particle[i].chdir[0].y = conf->particle[i].chdir[0].z =0; conf->particle[i].chdir[1].x = conf->particle[i].chdir[1].y = conf->particle[i].chdir[1].z =0; DEBUG_INIT("Line: %s\nNumber of Fields: %d", line, fields); if (fields == 9){ conf->particle[i].switched = 0; fprintf(stdout, "WARNING: Particle %ld is assumed to be not switched!\n", i+1); fields++; } if (fields != 10) { fprintf (stderr, "ERROR: Could not read coordinates for particle %ld.\n \ Did you specify box size at the begining?\n\n", i+1); free(line); exit (1); } /* Scale position vector to the unit cube */ usepbc(&conf->particle[i].pos, conf->box ); conf->particle[i].pos.x /= conf->box.x; conf->particle[i].pos.y /= conf->box.y; conf->particle[i].pos.z /= conf->box.z; if ((topo->ia_params[conf->particle[i].type][conf->particle[i].type].geotype[0]<SP)&&( DOT(conf->particle[i].dir, conf->particle[i].dir) < ZEROTOL )) { //DEBUG_INIT("Geotype = %d < %d", conf->particle[i].geotype,SP); fprintf (stderr, "ERROR: Null direction vector supplied for particle %ld.\n\n", i+1); free(line); exit (1); } else { normalise(&conf->particle[i].dir); } if ((topo->ia_params[conf->particle[i].type][conf->particle[i].type].geotype[0]<SP)&&( DOT(conf->particle[i].patchdir[0], conf->particle[i].patchdir[0]) < ZEROTOL )) { fprintf (stderr, "ERROR: Null patch vector supplied for particle %ld.\n\n", i+1); free(line); exit (1); } else { ortogonalise(&conf->particle[i].patchdir[0],conf->particle[i].dir); normalise(&conf->particle[i].patchdir[0]); } // Switch the type if(conf->particle[i].switched){ if(conf->particle[i].switchtype == 0){ fprintf(stderr, "ERROR: Particle %ld switched even though it has no switchtype", i); free(line); exit(1); } tmp_type = conf->particle[i].type; conf->particle[i].type = conf->particle[i].switchtype; conf->particle[i].switchtype = tmp_type; } DEBUG_INIT("%ld:\t%lf\t%lf\t%lf", i, conf->particle[i].pos.x, conf->particle[i].pos.y, conf->particle[i].pos.z); } free(line); /*Make chains WHOLE*/ for (i=0;i<topo->chainnum;i++){ j=0; current = topo->chainlist[i][0]; first = current; chorig[0].pos = conf->particle[first].pos; while (current >=0 ) { /*shift the chain particle by first one*/ conf->particle[current].pos.x -= chorig[0].pos.x; conf->particle[current].pos.y -= chorig[0].pos.y; conf->particle[current].pos.z -= chorig[0].pos.z; /*put it in orig box*/ conf->particle[current].pos.x -= anint(conf->particle[current].pos.x); conf->particle[current].pos.y -= anint(conf->particle[current].pos.y); conf->particle[current].pos.z -= anint(conf->particle[current].pos.z); //printf("ant: %f %f %f\n",conf->particle[current].pos.x,conf->particle[current].pos.y,conf->particle[current].pos.z); /*shot it back*/ conf->particle[current].pos.x += chorig[0].pos.x; conf->particle[current].pos.y += chorig[0].pos.y; conf->particle[current].pos.z += chorig[0].pos.z; //printf("posstart: %f %f %f\n",conf->particle[current].pos.x,conf->particle[current].pos.y,conf->particle[current].pos.z); j++; current = topo->chainlist[i][j]; } } err = 0; //for (i=0; i < topo->npart-1; i++) { // for (j=i+1; j < topo->npart; j++) { // if ( overlap(conf->particle[i], conf->particle[j], conf->box, topo->ia_params) ) { // fprintf (stderr, // "ERROR: Overlap in initial coniguration between particles %ld and %ld.\n", // i+1, j+1); // err = 1; // } // } //} if (err) { printf ("\n"); exit (1); } fclose (infile); fflush (stdout); } /*..............................................................................*/ /**************************************************************************** * TOPOLOGY INITIALIZATION *****************************************************************************/ /* Create lists for chain operations: Connectivity list where it is written for each sc with which sc it is connected. The order is important because spherocylinders have direction First is interacting tail then head. Chain list where particles are assigned to chains to which they belong */ void init_top(struct topo * topo, struct conf * conf, struct sim * sim,char filename[30]) { long i,j,k,mol,maxch,maxpart; FILE *infile; char *pline=NULL, *dummy=NULL, *sysnames[MAXN]; char line[STRLEN], keystr[STRLEN], molname[STRLEN]; unsigned size; long *sysmoln /*[MAXN]*/; BOOL exclusions[MAXT][MAXT]; char *fgets2(char *, int , FILE *); void strip_comment (char *); void trim(char *); int continuing(char *); void upstring (char *); int filltypes(char **, struct topo * topo); int fillexter(char **, struct topo * topo); int fillexclusions(char **, BOOL (*exclusions)[MAXT][MAXT]); void beforecommand(char *, char *, char); int fillmol(char *, char *, struct molecule * molecules, struct topo * topo); int fillsystem(char *, char *[MAXN], long **); void initparams(struct topo * topo); void genparampairs(struct topo * topo, BOOL (*exclusions)[MAXT][MAXT]); int topdealoc(char **, char *[MAXN], long **, struct molecule *); struct molecule molecules[MAXMT]; if ((infile = fopen(filename, "r")) == NULL) { fprintf (stderr, "\nTOPOLOGY ERROR: Could not open top.init file.\n\n"); exit (1); } fprintf (stdout, "Initialize chainlist...\n"); fflush(stdout); sysmoln = malloc( sizeof(long)*MAXN); if(sysmoln == NULL){ fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for sysmoln"); exit(1); } struct particles tmp_particles[MAXN]; for (i=0;i<MAXN;i++) { if (i < MAXMT) { topo->chainparam[i].bond1eq = -1; topo->chainparam[i].bond1c = -1; topo->chainparam[i].bond2eq = -1; topo->chainparam[i].bond2c = -1; topo->chainparam[i].bonddc = -1; topo->chainparam[i].angle1eq = -1; topo->chainparam[i].angle1c = -1; topo->chainparam[i].angle2eq = -1; topo->chainparam[i].angle2c = -1; molecules[i].name = NULL; molecules[i].type = malloc(sizeof(long)*MAXN); molecules[i].switchtype = malloc(sizeof(long)*MAXN); molecules[i].delta_mu = malloc(sizeof(double)*MAXN); for (j=0;j<MAXN;j++) { molecules[i].type[j] = -1; } } for (j = 0; j < MAXCHL; j++){ topo->chainlist[i][j] = -1; } sysnames[i]=NULL; } for (i=0;i<MAXT;i++) { for (j=0;j<MAXT;j++) { exclusions[i][j]=FALSE; } } topo->exter.exist = FALSE; topo->exter.thickness = 0.0; topo->exter.epsilon = 0.0; topo->exter.attraction = 0.0; topo->exter.sqmaxcut = 0.0; for(i = 0; i < MAXT; i++){ for(j = 0; j < MAXT; j++){ for(k = 0; k < 2; k++){ topo->ia_params[i][j].geotype[k] = 0; } } } fprintf (stdout, "Reading topology...\n"); fflush(stdout); molname[0] = ' '; initparams(topo); pline=malloc((size_t)STRLEN); while (fgets2(line,STRLEN-2,infile) != NULL) { strcpy(pline,line); if (!pline) fprintf (stderr, "\nTOPOLOGY ERROR: Empty line in topology.\n\n"); /* build one long line from several fragments */ while (continuing(line) && (fgets2(line,STRLEN-1,infile) != NULL)) { size=strlen(pline)+strlen(line)+1; free(pline); pline=malloc((size_t)size); strcat(pline,line); } /* skip trailing and leading spaces and comment text */ strip_comment (pline); trim (pline); /* if there is something left... */ if ((int)strlen(pline) > 0) { // get the [COMMAND] key if (pline[0] == OPENKEY) { pline[0] = ' '; beforecommand(keystr,pline,CLOSEKEY); upstring (keystr); } else { //DEBUG fprintf (stdout, "Topology read type:%s, %s \n",keystr,pline); if (!strcmp(keystr,"TYPES")) { fflush(stdout); if (!filltypes(&pline, topo)) { DEBUG_INIT("Something went wrong with filltypes"); fprintf (stderr, "\nTOPOLOGY ERROR: in reading types\n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } DEBUG_INIT("back in init_top"); } else{ if (!strcmp(keystr,"MOLECULES")){ DEBUG_INIT("Let's go to the molecules"); if (molname[0] == ' ') { beforecommand(molname,pline,SEPARATOR); i=0; while (molecules[i].name != NULL) i++; DEBUG_INIT("in the middle of getting to fillmol"); molecules[i].name = malloc(strlen(molname)+1); strcpy(molecules[i].name, molname); fprintf (stdout, "Topology read for molecule: %s \n",molname); } if (!fillmol(molname, pline, molecules, topo)) { fprintf (stderr, "\nTOPOLOGY ERROR: in reading molecules\n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } if ((dummy = strchr (pline,CLOSEMOL)) != NULL) molname[0] = ' '; } else { if (!strcmp(keystr,"SYSTEM")) { if (!fillsystem(pline,sysnames,&sysmoln)) { fprintf (stderr, "\nTOPOLOGY ERROR: in reading system\n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } } else { if (!strcmp(keystr,"EXTER")) { fflush(stdout); if (!fillexter(&pline, topo)) { DEBUG_INIT("Something went wrong with external potential"); fprintf (stderr, "\nTOPOLOGY ERROR: in reading external potential\n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } } else { if (!strcmp(keystr,"EXCLUDE")) { fflush(stdout); if (!fillexclusions(&pline,&exclusions)) { DEBUG_INIT("Something went wrong with exclusions potential"); fprintf (stderr, "\nTOPOLOGY ERROR: in reading exclusions\n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } } else { fprintf (stderr, "\nTOPOLOGY ERROR: invalid keyword:%s.\n\n", keystr); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } } } } } } } } /*we have sucessfully read topology*/ if (pline !=NULL) free(pline); pline=NULL; fclose (infile); fflush (stdout); /*fill ia_params combinations*/ fprintf (stdout, "\nTopology succesfully read. Generating pair interactions...\n"); genparampairs(topo,&exclusions); double maxlength = 0; for(i = 0; i < MAXT; i++){ if(maxlength < topo->ia_params[i][i].len[0]) maxlength = topo->ia_params[i][i].len[0]; } topo->sqmaxcut += maxlength+2; topo->sqmaxcut *= 1.1; topo->maxcut = topo->sqmaxcut; topo->sqmaxcut = topo->sqmaxcut*topo->sqmaxcut; topo->exter.sqmaxcut += maxlength; topo->exter.sqmaxcut *= topo->exter.sqmaxcut*1.1; /*TODO fill chain list and maxch, park particle type*/ fprintf (stdout, "Generating chainlist...\n"); maxch=0; maxpart=0; i=0; while (sysnames[i]!=NULL) { mol=0; while (strcmp(molecules[mol].name,sysnames[i])) { mol++; if (molecules[mol].name == NULL) { fprintf (stderr, "TOPOLOGY ERROR: molecules %s is not defined.\n\n",sysnames[i]); topdealoc(&pline,sysnames,&sysmoln, molecules); exit(1); } } for (j=0;j<sysmoln[i];j++) { //DEBUG fprintf (stdout, "molnames %s sysname %s sysnum %ld \n",molnames[mol],sysnames[i],sysmoln[i]); k=0; while (molecules[mol].type[k] != -1) { tmp_particles[maxpart].type = molecules[mol].type[k]; tmp_particles[maxpart].switchtype = molecules[mol].switchtype[k]; tmp_particles[maxpart].delta_mu = molecules[mol].delta_mu[k]; tmp_particles[maxpart].chaint = mol; tmp_particles[maxpart].chainn = maxch; if (k > MAXCHL) { fprintf (stderr, "TOPOLOGY ERROR: more particles in chan (%ld) than allowed(%d).\n",k,MAXCHL); fprintf (stderr, "Change MAXCHL in source and recompile the program. \n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit(1); } if (molecules[mol].type[1] != -1) { topo->chainlist[maxch][k] = maxpart; } k++; maxpart++; if (maxpart > MAXN) { fprintf (stderr, "TOPOLOGY ERROR: more particles(%ld) than allowed(%d).\n",maxpart,MAXN); fprintf (stderr, "Change MAXN in source and recompile the program. \n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit(1); } } if (molecules[mol].type[1] != -1) { maxch++; } } i++; } topo->npart = maxpart; /* write the particles from the temporary to the "permanent" conf */ conf->particle = malloc(sizeof(struct particles) * topo->npart); if(conf->particle == NULL){ fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for conf->particle"); exit(1); } for(i = 0; i < topo->npart; i++){ conf->particle[i].type = tmp_particles[i].type; conf->particle[i].switchtype = tmp_particles[i].switchtype; conf->particle[i].delta_mu = tmp_particles[i].delta_mu; conf->particle[i].chaint = tmp_particles[i].chaint; conf->particle[i].chainn = tmp_particles[i].chainn; } /* Initialize the clusterlist */ sim->clusterlist = malloc(sizeof(long) * topo->npart); if(sim->clusterlist == NULL){ fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for sim->clusterlist!"); exit(1); } sim->clustersenergy = malloc(sizeof(double) * topo->npart); if(sim->clustersenergy== NULL){ fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for sim->clustersenergy!"); exit(1); } sim->clusters = NULL; /* get all the particles with switch type */ long switchlist[topo->npart]; long n_switch_part = 0; for(i = 0; i < topo->npart; i++){ if(conf->particle[i].type != conf->particle[i].switchtype){ switchlist[n_switch_part] = i; n_switch_part++; } } topo->n_switch_part = n_switch_part; if (n_switch_part == 0 && sim->switchprob > 0){ fprintf(stderr, "TOPOLOGY WARNING: No switchable particles found, but probability for a switch is not zero!\n"); sim->switchprob = 0; fprintf(stderr, "TOPOLOGY WARNING: We changed Switch Probability to zero in this run!\n"); } topo->switchlist=NULL; if (n_switch_part > 0){ topo->switchlist = malloc(sizeof(long) * n_switch_part); for(i = 0; i < n_switch_part; i++){ topo->switchlist[i] = switchlist[i]; //DEBUG //printf("%ld is in switchlist\n", switchlist[i]); } } j = 0; while (topo->chainlist[j][0] >= 0) { j++; } topo->chainnum = j; if (topo->chainnum != maxch) { fprintf (stderr, "TOPOLOGY ERROR: Maximum number of chains(%ld) does not agree with number of chains (%ld)\n\n",maxch,topo->chainnum); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } k=0; /*clear connectivity and then fill it from chain list*/ fprintf (stdout, "Generating connectivity...\n"); for (i=0; i<MAXN; i++) { topo->conlist[i][0] = -1; topo->conlist[i][1] = -1; topo->conlist[i][2] = -1; topo->conlist[i][3] = -1; } conf->sysvolume = 0; for (i=0; i<maxpart; i++) { for (j=0; j<MAXCHL; j++) { if (topo->chainlist[i][j] >= 0) { k = topo->chainlist[i][j]; if ((j+1 < MAXCHL)&&(topo->chainlist[i][j+1] >= 0)) topo->conlist[k][1] = topo->chainlist[i][j+1]; /*if there is a next particle fill it to head bond*/ if (j > 0) topo->conlist[k][0] = topo->chainlist[i][j-1]; /*if this is not first particle fill tail bond*/ if ((j+2 < MAXCHL)&& (topo->chainlist[i][j+2] >= 0)) topo->conlist[k][3] = topo->chainlist[i][j+2]; /*if there is a second next particle fill it second neighbour*/ if (j > 1) topo->conlist[k][2] = topo->chainlist[i][j-2]; /*if this is not second or first particle fill second tail bond*/ } } conf->sysvolume += topo->ia_params[conf->particle[i].type][conf->particle[i].type].volume; } /*DEBUG for (i=0; i<MAXN; i++) { for (j=0; j<MAXCHL; j++) { fprintf (stderr, " %d",chainlist[i][j]); } fprintf (stderr, " \n"); } for (i=0; i<MAXN; i++) { printf (" %ld %ld %ld %ld\n",conlist[i][0],conlist[i][1],conlist[i][2],conlist[i][3]); } */ // Mark particles as not switched for(i = 0; i < maxpart; i++){ conf->particle[i].switched = 0; } topdealoc(&pline,sysnames,&sysmoln, molecules); DEBUG_INIT("Finished with reading the topology"); /* Parallel tempering check */ #ifdef MPI // probability to switch replicas = exp ( -0.5 * dT*dT * N / (1 + dT) ) printf("Probability to switch replicas is roughly: %f\n",exp(-0.5 * maxpart * sim->dtemp * sim->dtemp / (1.0 + sim->dtemp)) ); #endif } /*..........................................................................*/ /*dealocting memory for init_top*/ int topdealoc(char **pline,char *sysnames[MAXN], long **sysmoln, struct molecule * molecules) { long i; if ((*pline) != NULL) free((*pline)); (*pline)=NULL; if ((*sysmoln) != NULL) free((*sysmoln)); (*sysmoln)=NULL; for (i=0;i<MAXN;i++) { if (i < MAXMT) { free(molecules[i].name); free(molecules[i].type); free(molecules[i].switchtype); free(molecules[i].delta_mu); } if ((sysnames[i]) != NULL) free(sysnames[i]); sysnames[i]=NULL; } return 0; } /* initiate vectors of a single particle*/ void int_partvec(long target, struct ia_param * ia_parami, struct conf * conf ) { struct quat quatrot; struct quat quat_create(struct vector, double, double); void vec_rotate(struct vector *, struct quat); void normalise(struct vector *); void ortogonalise(struct vector *,struct vector); if ( (ia_parami->geotype[0] == SCA) || (ia_parami->geotype[0] == SCN) ){ /*SCA and SCN are isotropic... nothing to initialize*/ return; } normalise (&conf->particle[target].dir); ortogonalise(&conf->particle[target].patchdir[0],conf->particle[target].dir); /*calculate patch sides*/ if ( (ia_parami->geotype[0] == PSC) || (ia_parami->geotype[0] == CPSC) || (ia_parami->geotype[0] == TPSC) || (ia_parami->geotype[0] == TCPSC) ){ /* rotate patch vector by half size of patch*/ conf->particle[target].patchsides[0] = conf->particle[target].patchdir[0]; quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[0], ia_parami->psinhalfi[0]); vec_rotate(&(conf->particle[target].patchsides[0]),quatrot); /*second side*/ conf->particle[target].patchsides[1] = conf->particle[target].patchdir[0]; quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[0], -1.0*ia_parami->psinhalfi[0]); vec_rotate(&(conf->particle[target].patchsides[1]),quatrot); } /*calculate second patchdir*/ if ( (ia_parami->geotype[0] == TPSC) || (ia_parami->geotype[0] == TCPSC) || (ia_parami->geotype[0] == TCHPSC) || (ia_parami->geotype[0] == TCHCPSC)){ conf->particle[target].patchdir[1] = conf->particle[target].patchdir[0]; quatrot=quat_create(conf->particle[target].dir, ia_parami->csecpatchrot[0], ia_parami->ssecpatchrot[0]); vec_rotate(&(conf->particle[target].patchdir[1]),quatrot); ortogonalise(&conf->particle[target].patchdir[1],conf->particle[target].dir); } /*calculate second patch sides*/ if ( (ia_parami->geotype[0] == TPSC) || (ia_parami->geotype[0] == TCPSC) ){ /* rotate patch vector by half size of patch*/ conf->particle[target].patchsides[2] = conf->particle[target].patchdir[1]; quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[2], ia_parami->psinhalfi[2]); vec_rotate(&(conf->particle[target].patchsides[2]),quatrot); /*second side*/ conf->particle[target].patchsides[3] = conf->particle[target].patchdir[1]; quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[2], -1.0*ia_parami->psinhalfi[2]); vec_rotate(&(conf->particle[target].patchsides[3]),quatrot); } /*calculate chdir vector*/ if ( (ia_parami->geotype[0] == CHPSC) || (ia_parami->geotype[0] == CHCPSC) || (ia_parami->geotype[0] == TCHPSC) || (ia_parami->geotype[0] == TCHCPSC)){ conf->particle[target].chdir[0] = conf->particle[target].dir; quatrot = quat_create(conf->particle[target].patchdir[0], ia_parami->chiral_cos[0], ia_parami->chiral_sin[0]); vec_rotate(&(conf->particle[target].chdir[0]), quatrot); /* rotate patch vector by half size of patch*/ conf->particle[target].patchsides[0] = conf->particle[target].patchdir[0]; quatrot=quat_create(conf->particle[target].chdir[0], ia_parami->pcoshalfi[0], ia_parami->psinhalfi[0]); vec_rotate(&(conf->particle[target].patchsides[0]),quatrot); /*second side*/ conf->particle[target].patchsides[1] = conf->particle[target].patchdir[0]; quatrot=quat_create(conf->particle[target].chdir[0], ia_parami->pcoshalfi[0], -1.0*ia_parami->psinhalfi[0]); vec_rotate(&(conf->particle[target].patchsides[1]),quatrot); } /*calculate chdir vector for seond patch*/ if ( (ia_parami->geotype[0] == TCHPSC) || (ia_parami->geotype[0] == TCHCPSC) ){ conf->particle[target].chdir[1] = conf->particle[target].dir; quatrot = quat_create(conf->particle[target].patchdir[1], ia_parami->chiral_cos[0], ia_parami->chiral_sin[0]); vec_rotate(&(conf->particle[target].chdir[1]), quatrot); /* rotate patch vector by half size of patch to get sides*/ conf->particle[target].patchsides[2] = conf->particle[target].patchdir[1]; quatrot=quat_create(conf->particle[target].chdir[1], ia_parami->pcoshalfi[2], ia_parami->psinhalfi[2]); vec_rotate(&(conf->particle[target].patchsides[2]),quatrot); /*second side*/ conf->particle[target].patchsides[3] = conf->particle[target].patchdir[1]; quatrot=quat_create(conf->particle[target].chdir[1], ia_parami->pcoshalfi[2], -1.0*ia_parami->psinhalfi[2]); vec_rotate(&(conf->particle[target].patchsides[3]),quatrot); } } /* calculate vectors on particles for speedup*/ void partvecinit(struct topo * topo, struct sim * sim, struct conf * conf ) { long i; void int_partvec(long target, struct ia_param *, struct conf * conf ); for(i = 0; i < topo->npart; i++){ if ( topo->ia_params[conf->particle[i].type][conf->particle[i].type].geotype[0] < SP) int_partvec(i,&(topo->ia_params[conf->particle[i].type][conf->particle[i].type]),conf); } } /*generate interations pairs*/ void genparampairs(struct topo * topo, BOOL (*exclusions)[MAXT][MAXT]) { int i,j,k; int a[2]; int len; double length = 0; // The length of a PSC, currently only one is allow, ie implemented for (i=0;i<MAXT;i++) { for (j=0;j<MAXT;j++) { if (i!=j) { if((topo->ia_params[j][j].geotype[0] != 0) && (topo->ia_params[i][i].geotype[0] != 0)){ a[0] = i; a[1] = j; for(k = 0; k < 2; k++){ topo->ia_params[i][j].geotype[k] = topo->ia_params[a[k]][a[k]].geotype[0]; topo->ia_params[i][j].len[k] = topo->ia_params[a[k]][a[k]].len[0]; if (topo->ia_params[a[k]][a[k]].len[0] > 0){ if (length == 0){ length = topo->ia_params[a[k]][a[k]].len[0]; } else if (length > 0){ if (length != topo->ia_params[a[k]][a[k]].len[0]){ fprintf(stderr, "Error: "); fprintf(stderr, "Different lengths for spherocylinders have not been implemented yet!\n"); fprintf(stderr, "\tCheck the length of type %d!\n", a[k]); exit(1); } } } topo->ia_params[i][j].half_len[k] = topo->ia_params[a[k]][a[k]].half_len[0]; /* Handle angles only, when geotype is a patchs sphero cylinder */ if(topo->ia_params[i][j].geotype[k] >= PSC && topo->ia_params[i][j].geotype[k] < SP){ topo->ia_params[i][j].pangl[k] = topo->ia_params[a[k]][a[k]].pangl[0]; topo->ia_params[i][j].panglsw[k] = topo->ia_params[a[k]][a[k]].panglsw[0]; topo->ia_params[i][j].pcangl[k] = cos(topo->ia_params[i][j].pangl[k]/2.0/180*PI); topo->ia_params[i][j].pcanglsw[k] = cos((topo->ia_params[i][j].pangl[k]/2.0+topo->ia_params[i][j].panglsw[k])/180*PI); topo->ia_params[i][j].pcoshalfi[k] = cos((topo->ia_params[i][j].pangl[k]/2.0+topo->ia_params[i][j].panglsw[k])/2.0/180*PI); topo->ia_params[i][j].psinhalfi[k] = sqrt(1.0 - topo->ia_params[i][j].pcoshalfi[k] * topo->ia_params[i][j].pcoshalfi[k]); } /* Only when the PSC is chiral */ if( (topo->ia_params[i][j].geotype[k] == CHCPSC) || (topo->ia_params[i][j].geotype[k] == CHPSC) \ || (topo->ia_params[i][j].geotype[k] == TCHCPSC) || (topo->ia_params[i][j].geotype[k] == TCHPSC) ){ topo->ia_params[i][j].chiral_cos[k] = topo->ia_params[a[k]][a[k]].chiral_cos[0]; topo->ia_params[i][j].chiral_sin[k] = topo->ia_params[a[k]][a[k]].chiral_sin[0]; } /* Information of two patches */ if( (topo->ia_params[i][j].geotype[k] == TCPSC) || (topo->ia_params[i][j].geotype[k] == TPSC) \ || (topo->ia_params[i][j].geotype[k] == TCHCPSC) || (topo->ia_params[i][j].geotype[k] == TCHPSC) ){ topo->ia_params[i][j].csecpatchrot[k] = topo->ia_params[a[k]][a[k]].csecpatchrot[0]; topo->ia_params[i][j].ssecpatchrot[k] = topo->ia_params[a[k]][a[k]].ssecpatchrot[0]; topo->ia_params[i][j].pangl[k+2] = topo->ia_params[a[k]][a[k]].pangl[2]; topo->ia_params[i][j].panglsw[k+2] = topo->ia_params[a[k]][a[k]].panglsw[2]; topo->ia_params[i][j].pcangl[k+2] = cos(topo->ia_params[i][j].pangl[k+2]/2.0/180*PI); topo->ia_params[i][j].pcanglsw[k+2] = cos((topo->ia_params[i][j].pangl[k+2]/2.0+topo->ia_params[i][j].panglsw[k+2])/180*PI); topo->ia_params[i][j].pcoshalfi[k+2] = cos((topo->ia_params[i][j].pangl[k+2]/2.0+topo->ia_params[i][j].panglsw[k+2])/2.0/180*PI); topo->ia_params[i][j].psinhalfi[k+2] = sqrt(1.0 - topo->ia_params[i][j].pcoshalfi[k+2] * topo->ia_params[i][j].pcoshalfi[k+2]); } } len = strlen(topo->ia_params[i][i].name); strncpy(topo->ia_params[i][j].name, topo->ia_params[i][i].name, len + 1); len = strlen(topo->ia_params[i][i].other_name); strncpy(topo->ia_params[i][j].other_name, topo->ia_params[i][i].other_name, len + 1); topo->ia_params[i][j].sigma = AVER(topo->ia_params[i][i].sigma,topo->ia_params[j][j].sigma); topo->ia_params[i][j].epsilon = sqrt(topo->ia_params[i][i].epsilon * topo->ia_params[j][j].epsilon); topo->ia_params[i][j].pswitch = AVER(topo->ia_params[i][i].pswitch,topo->ia_params[j][j].pswitch); topo->ia_params[i][j].rcutwca = (topo->ia_params[i][j].sigma)*pow(2.0,1.0/6.0); // Averaging of the flat part of attraction topo->ia_params[i][j].pdis = AVER(topo->ia_params[i][i].pdis - topo->ia_params[i][i].rcutwca, \ topo->ia_params[j][j].pdis - topo->ia_params[j][j].rcutwca) + topo->ia_params[i][j].rcutwca; topo->ia_params[i][j].rcut = topo->ia_params[i][j].pswitch+topo->ia_params[i][j].pdis; // if not non-attractive == if attractive if (!((topo->ia_params[i][j].geotype[0] % 10 == 0) || (topo->ia_params[i][j].geotype[1] % 10 == 0))){ if (topo->ia_params[i][j].rcutwca > topo->ia_params[i][j].rcut){ fprintf(stderr, "Error: Repulsive cutoff is larger than the attractive cutoff!\n"); fprintf(stderr, " between %d and %d: %lf > %lf\n", i, j, topo->ia_params[i][j].rcutwca, topo->ia_params[i][j].rcut); } } if ( topo->ia_params[i][j].rcutwca > topo->sqmaxcut ) topo->sqmaxcut = topo->ia_params[i][j].rcutwca; if ( topo->ia_params[i][j].rcut > topo->sqmaxcut ) topo->sqmaxcut = topo->ia_params[i][j].rcut; } } } /*filling interaction with external potential*/ if( (topo->exter.exist) && (topo->ia_params[i][i].geotype[0] != 0)){ /*use everything like for given particles except distance and attraction, which is generated as for other interactions*/ topo->exter.interactions[i] = topo->ia_params[i][i]; topo->exter.interactions[i].sigma = AVER(topo->ia_params[i][i].sigma, topo->exter.thickness); topo->exter.interactions[i].rcutwca = (topo->exter.interactions[i].sigma)*pow(2.0,1.0/6.0); topo->exter.interactions[i].epsilon = sqrt(topo->ia_params[i][i].epsilon * topo->exter.epsilon); topo->exter.interactions[i].pswitch = AVER(topo->ia_params[i][i].pswitch, topo->exter.attraction); topo->exter.interactions[i].pdis = AVER(topo->ia_params[i][i].pdis - topo->ia_params[i][i].rcutwca, 0.0) + topo->exter.interactions[i].rcutwca; topo->exter.interactions[i].rcut = topo->exter.interactions[i].pswitch + topo->exter.interactions[i].pdis; if (topo->exter.interactions[i].rcut > topo->exter.sqmaxcut ) topo->exter.sqmaxcut = topo->exter.interactions[i].rcut; } } for (i=0;i<MAXT;i++) { for (j=0;j<MAXT;j++) { if ( (*exclusions)[i][j] ) topo->ia_params[i][j].epsilon = 0.0; } } } /*initialize parameters for interactions*/ void initparams(struct topo * topo) { int i,j,k; for (i=0;i<MAXT;i++) { for (j=0;j<MAXT;j++) { for(k = 0; k < 2; k++){ topo->ia_params[i][j].geotype[k] = 0; topo->ia_params[i][j].len[k] = 0.0; topo->ia_params[i][j].half_len[k] = 0.0; topo->ia_params[i][j].chiral_cos[k] = 0.0; topo->ia_params[i][j].chiral_sin[k] = 0.0; topo->ia_params[i][j].csecpatchrot[k] = 0.0; topo->ia_params[i][j].ssecpatchrot[k] = 0.0; } for(k = 2; k < 4; k++){ topo->ia_params[i][j].pangl[k] = 0.0; topo->ia_params[i][j].panglsw[k] = 0.0; topo->ia_params[i][j].pcangl[k] = 0.0; topo->ia_params[i][j].pcanglsw[k] = 0.0; topo->ia_params[i][j].pcoshalfi[k] = 0.0; topo->ia_params[i][j].psinhalfi[k] = 0.0; } topo->ia_params[i][j].sigma = 0.0; topo->ia_params[i][j].epsilon = 0.0; topo->ia_params[i][j].rcutwca = 0.0; topo->ia_params[i][j].pdis = 0.0; topo->ia_params[i][j].pswitch = 0.0; topo->ia_params[i][j].rcut = 0.0; topo->ia_params[i][j].volume = 0.0; topo->ia_params[i][j].pvolscale = 0.0; } } topo->sqmaxcut = 0; } /*...........................................................................*/ /*filling the system parameters*/ int fillsystem(char *pline, char *sysnames[MAXN], long **sysmoln) { int i,fields; char zz[STRLEN]; void trim (char *); trim(pline); if (!pline) { fprintf (stderr, "TOPOLOGY ERROR: obtained empty line in fil system.\n\n"); return 0; } i=0; while (sysnames[i]!=NULL) i++; fields = sscanf(pline, "%s %ld", zz, &(*sysmoln)[i]); sysnames[i]=malloc(strlen(zz)+1); strcpy(sysnames[i],zz); if (fields != 2) { fprintf (stderr, "TOPOLOGY ERROR: failed reading system from (%s).\n\n", pline); return 0; } if ((*sysmoln)[i] < 1) { fprintf (stderr, "TOPOLOGY ERROR: cannot have %ld number of molecules.\n\n", (*sysmoln)[i]); return 0; } fprintf (stdout, "system: %s %ld\n",sysnames[i],(*sysmoln)[i]); return 1; } /*filling the parameters for molecules*/ int fillmol(char *molname, char *pline, struct molecule * molecules, struct topo * topo) { DEBUG_INIT("fillmol just has been called!"); char str[STRLEN],str2[STRLEN],molcommand[STRLEN],molparams[STRLEN]; int i,j,fields; double bondk,bonddist; void trim (char *); void upstring(char *); void beforecommand(char *, char *, char); void aftercommand(char *, char *, char); beforecommand(str2, pline, CLOSEMOL); aftercommand(str, str2, OPENMOL); trim(str); if (strlen(str) == 0) return 1; beforecommand(molcommand,str,SEPARATOR); aftercommand(molparams,str,SEPARATOR); trim(molcommand); trim(molparams); upstring (molcommand); DEBUG_INIT("molcommand: %s", molcommand); DEBUG_INIT("molparams: %s", molparams); i=0; while (strcmp(molecules[i].name, molname)) i++; j=0; while (molecules[i].type[j] != -1) j++; if (!strcmp(molcommand,"PARTICLES")) { fprintf (stdout, "particle %d: \t", j + 1); fields = sscanf(molparams,"%ld %ld %lf",molecules[i].type + j, molecules[i].switchtype + j, molecules[i].delta_mu + j); fprintf (stdout, "%ld ",molecules[i].type[j]); if (fields == 1){ (molecules[i].switchtype[j]) = (molecules[i].type[j]); (molecules[i].delta_mu[j]) = 0; fields = 3; } else{ fprintf(stdout, "(with switchtype: %ld and delta_mu: %lf)", molecules[i].switchtype[j], molecules[i].delta_mu[j]); } if (fields != 3) { fprintf (stderr, "TOPOLOGY ERROR: could not read a pacticle.\n\n"); return 0; } fflush(stdout); if (molecules[i].type[j] < 0) { fprintf (stderr, "TOPOLOGY ERROR: pacticles include negative type.\n\n"); return 0; } if (molecules[i].type[j] > MAXT) { fprintf (stderr, "TOPOLOGY ERROR: pacticles include type out of range 0-%ld.\n\n",(long)MAXT); return 0; } fprintf (stdout, "\n"); return 1; } if (!strcmp(molcommand,"BOND1")) { fields = sscanf(molparams, "%le %le ", &bondk, &bonddist); if (fields < 2) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for bond1, should be 2.\n\n"); return 0; } if (bonddist < 0) { fprintf (stderr, "TOPOLOGY ERROR: bonddist cannot be negative: %f \n\n",bonddist); return 0; } topo->chainparam[i].bond1c = bondk; topo->chainparam[i].bond1eq = bonddist; fprintf (stdout, "bond1: %f %f \n",topo->chainparam[i].bond1c,topo->chainparam[i].bond1eq); return 1; } if (!strcmp(molcommand,"BOND2")) { fields = sscanf(molparams, "%le %le ", &bondk, &bonddist); if (fields < 2) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for bond2, should be 2.\n\n"); return 0; } if (bonddist < 0) { fprintf (stderr, "TOPOLOGY ERROR: bonddist cannot be negative: %f \n\n",bonddist); return 0; } topo->chainparam[i].bond2c = bondk; topo->chainparam[i].bond2eq = bonddist; fprintf (stdout, "bond2: %f %f \n",topo->chainparam[i].bond2c,topo->chainparam[i].bond2eq); return 1; } if (!strcmp(molcommand,"BONDD")) { fields = sscanf(molparams, "%le %le ", &bondk, &bonddist); if (fields < 2) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for bondd, should be 2.\n\n"); return 0; } if (bonddist < 0) { fprintf (stderr, "TOPOLOGY ERROR: bonddist cannot be negative: %f \n\n",bonddist); return 0; } topo->chainparam[i].bonddc = bondk; topo->chainparam[i].bonddeq = bonddist; fprintf (stdout, "bondd: %f %f \n",topo->chainparam[i].bonddc,topo->chainparam[i].bonddeq); return 1; } if (!strcmp(molcommand,"ANGLE1")) { fields = sscanf(molparams, "%le %le ", &bondk, &bonddist); if (fields < 2) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for angle1, should be 2.\n\n"); return 0; } if (bonddist < 0) { fprintf (stderr, "TOPOLOGY ERROR: equilibrium angle cannot be negative: %f \n\n",bonddist); return 0; } topo->chainparam[i].angle1c = bondk; topo->chainparam[i].angle1eq = bonddist/180.0*PI; fprintf (stdout, "angle1: %f %f \n",topo->chainparam[i].angle1c,topo->chainparam[i].angle1eq); return 1; } if (!strcmp(molcommand,"ANGLE2")) { fields = sscanf(molparams, "%le %le ", &bondk, &bonddist); if (fields < 2) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for angle2, should be 2.\n\n"); return 0; } if (bonddist < 0) { fprintf (stderr, "TOPOLOGY ERROR: equilibrium angle cannot be negative: %f \n\n",bonddist); return 0; } topo->chainparam[i].angle2c = bondk; topo->chainparam[i].angle2eq = bonddist/180.0*PI; fprintf (stdout, "angle2: %f %f \n",topo->chainparam[i].angle2c,topo->chainparam[i].angle2eq); return 1; } fprintf (stderr, "TOPOLOGY ERROR: unknown parameter: %s.\n\n",molcommand); return 0; } /* Converts the geometrical type string into a number */ int convert_geotype(char * geotype){ if (strcmp(geotype, "CPSC") == 0) return CPSC; if (strcmp(geotype, "CHCPSC") == 0) return CHCPSC; if (strcmp(geotype, "SCA") == 0) return SCA; if (strcmp(geotype, "PSC") == 0) return PSC; if (strcmp(geotype, "CHPSC") == 0) return CHPSC; if (strcmp(geotype, "TCPSC") == 0) return TCPSC; if (strcmp(geotype, "TCHCPSC") == 0) return TCHCPSC; if (strcmp(geotype, "TPSC") == 0) return TPSC; if (strcmp(geotype, "TCHPSC") == 0) return TCHPSC; if (strcmp(geotype, "SPN") == 0) return SPN; if (strcmp(geotype, "SPA") == 0) return SPA; return 0; } /*filling the parameters of external potentail - wall. Returns 1 on succes.*/ int fillexter(char **pline, struct topo * topo) { int fields; double param[3]; /* 0: thickness * 1: epsilon * 2: attraction */ char typestr[STRLEN], paramstr[STRLEN]; void trim (char *); void beforecommand(char *, char *, char); void aftercommand(char *, char *, char); beforecommand(typestr, *pline, SEPARATOR); aftercommand(paramstr, *pline, SEPARATOR); fields = sscanf(paramstr, "%le %le %le", &param[0], &param[1], &param[2]); if (fields >3) { fprintf (stderr, "TOPOLOGY ERROR: too many parameters for external potential. We have \ thickness, epsilon, and attraction distance so far.\n\n"); return 0; } if (fields >0) { topo->exter.exist = TRUE; topo->exter.thickness = param[0]; fprintf(stdout, "External potential with thickness: %le ",topo->exter.thickness); if (fields >1) { topo->exter.epsilon = param[1]; fprintf(stdout, "epsilon: %le ",topo->exter.epsilon); if (fields >2) { topo->exter.attraction = param[2]; fprintf(stdout, "and range of attraction: %le ",topo->exter.attraction); } } } else{ topo->exter.exist = FALSE; fprintf(stdout, "No external potential "); } fprintf(stdout, " \n"); DEBUG_INIT("Finished filling external potential"); return 1; } /*filling pair for which we exlude attraction interaction. Returns 1 on succes.*/ int fillexclusions(char **pline, BOOL (*exlusions)[MAXT][MAXT]) { long num1,num2; char *pline1, *pline2; void trim (char *); num1 = strtol(*pline, &pline2, 10); trim(pline2); if ((int)strlen(pline2) > 0) { num2 = strtol(pline2, &pline1, 10); trim(pline1); (*exlusions)[num1][num2]=TRUE; (*exlusions)[num2][num1]=TRUE; fprintf(stderr, "Exclusions %ld %ld \n", num1, num2); } else { fprintf(stderr, "Error in readin Topology exclusions, probably there is not even number of types \n"); return 0; } while ((int)strlen(pline1) > 0) { num1 = strtol(pline1, &pline2, 10); trim(pline2); if ((int)strlen(pline2) > 0) { num2 = strtol(pline2, &pline1, 10); trim(pline1); (*exlusions)[num1][num2]=TRUE; (*exlusions)[num2][num1]=TRUE; fprintf(stderr, "Exclusions %ld %ld \n", num1, num2); } else { fprintf(stderr, "Error in readin Topology exclusions, probably there is not even number of types \n"); return 0; } } return 1; } /*filing the parameters for types from given strings. Returns 1 on succes.*/ int filltypes(char **pline, struct topo * topo) { int type; int geotype_i; int fields; char name[SMSTR]; char geotype[SMSTR]; double param[11]; /* 0: epsilon * 1: sigma * 2: attraction dist * 3: sttraction switch * 4: patch angle * 5: patch switch * 6: length * 7(optional): second patche rotation * 8(optional): second patch angle * 9(optional): second patch angle switch * +1: chirality */ char typestr[STRLEN], paramstr[STRLEN]; void trim (char *); void beforecommand(char *, char *, char); void aftercommand(char *, char *, char); beforecommand(typestr, *pline, SEPARATOR); aftercommand(paramstr, *pline, SEPARATOR); fields = sscanf(paramstr, "%s %d %s %le %le %le %le %le %le %le %le %le %le %le", name, &type, geotype, &param[0], &param[1], &param[2], &param[3], &param[4], &param[5], &param[6], &param[7], &param[8], &param[9], &param[10]); fields -= 5; // number of parameter fields => I am too lazy to adjust everywhere below the numbers //DEBUG fprintf (stdout, "Topology read geotype: %ld with parameters fields %d, str:%s and %s in pline %s\n",geotype,fields,geotypestr,paramstr,pline); geotype_i = convert_geotype(geotype); if(!geotype_i){ fprintf(stderr, "TOPOLOGY ERROR: Unknown GEOTYPE: %s!", geotype); return 0; } DEBUG_INIT("geotype_i: %d; fields = %d", geotype_i, fields); if (( (geotype_i == SCN) || (geotype_i == SPN) ) && (fields != 0)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 1.\n\n", geotype); return 0; } if (( (geotype_i == SCA) || (geotype_i == SPA)) && (fields != 2)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 3.\n\n", geotype); return 0; } if (( (geotype_i == PSC) || (geotype_i == CPSC) ) && (fields != 5)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 5.\n\n", geotype); return 0; } if (( (geotype_i == CHCPSC) || (geotype_i == CHCPSC) )&& ( fields != 6)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 6.\n\n", geotype); return 0; } if (( (geotype_i == TPSC) || (geotype_i == TCPSC) ) && (fields != 8)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 8.\n\n", geotype); return 0; } if (( (geotype_i == TCHCPSC) || (geotype_i == TCHCPSC) )&& ( fields != 9)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 9.\n\n", geotype); return 0; } if ((geotype_i < 0) || (geotype_i > (MAXT + 10))) { fprintf (stderr, "TOPOLOGY ERROR: geotype (%s) is out of range: 0 - %d.\n\n", geotype, MAXT + 10); return 0; } strcpy(topo->ia_params[type][type].name, name); strcpy(topo->ia_params[type][type].other_name, name); topo->ia_params[type][type].geotype[0] = geotype_i; topo->ia_params[type][type].geotype[1] = geotype_i; topo->ia_params[type][type].epsilon = param[0]; topo->ia_params[type][type].sigma = param[1]; topo->ia_params[type][type].rcutwca = (topo->ia_params[type][type].sigma)*pow(2.0,1.0/6.0); fprintf(stdout, "Topology read of %d: %s (geotype: %s, %d) with parameters %lf %lf", type, name, geotype, geotype_i, topo->ia_params[type][type].epsilon, topo->ia_params[type][type].sigma); if (fields > 0) { topo->ia_params[type][type].pdis = param[2]; topo->ia_params[type][type].pswitch = param[3]; topo->ia_params[type][type].rcut = topo->ia_params[type][type].pswitch+topo->ia_params[type][type].pdis; fprintf(stdout, " %f %f",topo->ia_params[type][type].pdis,topo->ia_params[type][type].pswitch); } if (fields > 2) { int i; for(i = 0; i < 2; i++){ topo->ia_params[type][type].len[i] = param[6]; topo->ia_params[type][type].half_len[i] = param[6] / 2; topo->ia_params[type][type].pangl[i] = param[4]; topo->ia_params[type][type].panglsw[i] = param[5]; topo->ia_params[type][type].pcangl[i] = cos(param[4]/2.0/180*PI); // C1 topo->ia_params[type][type].pcanglsw[i] = cos((param[4]/2.0+param[5])/180*PI); // C2 //topo->ia_params[type][type].pcangl[i] = topo->ia_params[type][type].pcangl[i]; //topo->ia_params[type][type].pcanglsw[i] = topo->ia_params[type][type].pcanglsw[i]; topo->ia_params[type][type].pcoshalfi[i] = cos((param[4]/2.0+param[5])/2.0/180*PI); topo->ia_params[type][type].psinhalfi[i] = sqrt(1.0 - topo->ia_params[type][type].pcoshalfi[i] * topo->ia_params[type][type].pcoshalfi[i]); } fprintf(stdout, " %f %f", topo->ia_params[type][type].pangl[0], topo->ia_params[type][type].panglsw[0]); } if(fields == 6){ int i; for(i = 0; i < 2; i++){ topo->ia_params[type][type].chiral_cos[i] = cos(param[7] / 360 * PI); topo->ia_params[type][type].chiral_sin[i] = sqrt(1 - topo->ia_params[type][type].chiral_cos[i] * topo->ia_params[type][type].chiral_cos[i]); fprintf(stdout, " %f ", param[7]); } } if ((fields == 8)||(fields == 9)) { int i; for(i = 0; i < 2; i++){ topo->ia_params[type][type].csecpatchrot[i] = cos(param[7] / 360 * PI); topo->ia_params[type][type].ssecpatchrot[i] = sqrt(1 - topo->ia_params[type][type].csecpatchrot[i] * topo->ia_params[type][type].csecpatchrot[i]); //fprintf(stdout, " %f %f", topo->ia_params[type][type].csecpatchrot[0], topo->ia_params[type][type].ssecpatchrot[0]); topo->ia_params[type][type].pangl[i+2] = param[8]; topo->ia_params[type][type].panglsw[i+2] = param[9]; topo->ia_params[type][type].pcangl[i+2] = cos(param[8]/2.0/180*PI); // C1 topo->ia_params[type][type].pcanglsw[i+2] = cos((param[8]/2.0+param[9])/180*PI); // C2 //topo->ia_params[type][type].pcangl[i] = topo->ia_params[type][type].pcangl[i]; //topo->ia_params[type][type].pcanglsw[i] = topo->ia_params[type][type].pcanglsw[i]; topo->ia_params[type][type].pcoshalfi[i+2] = cos((param[8]/2.0+param[9])/2.0/180*PI); topo->ia_params[type][type].psinhalfi[i+2] = sqrt(1.0 - topo->ia_params[type][type].pcoshalfi[i+2] * topo->ia_params[type][type].pcoshalfi[i+2]); } fprintf(stdout, " %f %f %f", param[7], topo->ia_params[type][type].pangl[2], topo->ia_params[type][type].panglsw[2]); } if(fields == 9){ int i; for(i = 0; i < 2; i++){ topo->ia_params[type][type].chiral_cos[i] = cos(param[10] / 360 * PI); topo->ia_params[type][type].chiral_sin[i] = sqrt(1 - topo->ia_params[type][type].chiral_cos[i] * topo->ia_params[type][type].chiral_cos[i]); fprintf(stdout, " %f ", param[9]); } } // Volume if (geotype_i < SP) topo->ia_params[type][type].volume = 4.0/3.0*PI*pow((topo->ia_params[type][type].sigma)/2.0,3.0) + PI/2.0*topo->ia_params[type][type].len[0]*pow((topo->ia_params[type][type].sigma)/2.0,2.0) ; else topo->ia_params[type][type].volume = 4.0/3.0*PI*pow((topo->ia_params[type][type].sigma)/2.0,3.0); if ( topo->ia_params[type][type].rcutwca > topo->sqmaxcut ) topo->sqmaxcut = topo->ia_params[type][type].rcutwca; if ( topo->ia_params[type][type].rcut > topo->sqmaxcut ) topo->sqmaxcut = topo->ia_params[type][type].rcut; fprintf(stdout, " \n"); DEBUG_INIT("Finished filltypes"); return 1; } /************************************************ * String Manipulation stuff for parsing files ************************************************/ /* return string that goes before comand character*/ void beforecommand(char *str,char *pline,char commandc) { char *dummy; void trim(char *); strcpy(str,pline); if ((dummy = strchr (str,commandc)) != NULL) (*dummy) = 0; trim (str); } /* return string that goes after command character */ void aftercommand(char *str, char *pline,char commandc) { char *dummy; int i; void trim(char *); strcpy(str,pline); if ((dummy = strchr (str,commandc)) != NULL) { i=0; while( (*dummy) != str[i]) { str[i] = ' '; i++; } str[i] = ' '; } trim (str); } /* reads a string from stream of max length n */ char *fgets2(char *line, int n, FILE *stream) { char *c; if (fgets(line,n,stream)==NULL) { return NULL; } if ((c=strchr(line,'\n'))!=NULL) *c=0; return line; } /* remove comments */ void strip_comment (char *line) { char *c; if (!line) return; /* search for a comment mark and replace it by a zero */ if ((c = strchr(line,COMMENTSIGN)) != NULL) (*c) = 0; } /*test is there is still something left in string*/ int continuing(char *s) { int sl; void rtrim (char *str); rtrim(s); sl = strlen(s); if ((sl > 0) && (s[sl-1] == CONTINUE)) { s[sl-1] = 0; return 1; /*true*/ } else return 0; /*false*/ } /*make strin uppercase*/ void upstring (char *str) { int i; for (i=0; (i < (int)strlen(str)); i++) str[i] = toupper(str[i]); } /*trim string from left*/ void ltrim (char *str) { char *tr; int c; if (!str) return; tr = strdup (str); c = 0; while ((tr[c] == ' ') || (tr[c] == '\n') || (tr[c] == '\t')) c++; strcpy (str,tr+c); free (tr); } /*trim string from right*/ void rtrim (char *str) { int nul; if (!str) return; nul = strlen(str)-1; while ((nul > 0) && ((str[nul] == ' ') || (str[nul] == '\t') || (str[nul] == '\n')) ) { str[nul] = '\0'; nul--; } } /*trim strin from left and right*/ void trim (char *str) { void ltrim (char *str); void rtrim (char *str); ltrim (str); rtrim (str); } /** * Dumps a configuration to the supplied file handle. */ void draw(FILE *outfile, /*struct vector box, long npart, struct particles *particle,*/ struct conf * conf, struct topo * topo) { long i; double anint(double); //fprintf (outfile, "%15.8le %15.8le %15.8le\n", box.x, box.y, box.z); for (i = 0; i < topo->npart; i++) { fprintf (outfile, "%15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %d\n", conf->box.x * ((conf->particle[i].pos.x) - anint(conf->particle[i].pos.x)), conf->box.y * ((conf->particle[i].pos.y) - anint(conf->particle[i].pos.y)), conf->box.z * ((conf->particle[i].pos.z) - anint(conf->particle[i].pos.z)), conf->particle[i].dir.x, conf->particle[i].dir.y, conf->particle[i].dir.z, conf->particle[i].patchdir[0].x, conf->particle[i].patchdir[0].y, conf->particle[i].patchdir[0].z, conf->particle[i].switched); } } /*............................................................................*/ /****************************************************************************/ /* Pairlist stuf */ /****************************************************************************/ /** * Initializes the pairlist and allocates memory */ void init_pairlist(struct topo * topo, struct sim * sim){ printf("\nAllocating memory for pairlist...\n"); sim->pairlist = xmalloc(sizeof(struct pairs) * topo->npart); // Highest guess: Every particle interacts with the others // TODO: Make it more sophisticated long i; for(i = 0; i < topo->npart; i++){ sim->pairlist[i].pairs = malloc(sizeof(long) * topo->npart); sim->pairlist[i].num_pairs = 0; } } /*............................................................................*/ /** * Cleans up: deallocates the memory for the pairlist */ int dealloc_pairlist(struct topo * topo, struct sim * sim){ long i; if(sim->pairlist != NULL){ for(i = 0; i < topo->npart; i++){ if(sim->pairlist[i].pairs != NULL){ free(sim->pairlist[i].pairs); } } free(sim->pairlist); } return 0; } /*............................................................................*/ /** * Generates a pairlist with a very basic alogrithm */ void gen_simple_pairlist(struct topo * topo, struct sim * sim, struct conf * conf){ struct vector r_cm; double r_cm2; double max_dist; // Set the pairlist to zero //DEBUG_INIT("Gen Pairlist") long i, j; for(i = 0; i < topo->npart; i++){ //DEBUG_INIT("%ld", i); sim->pairlist[i].num_pairs = 0; } long nj = topo->npart; long ni = nj - 1; for(i = 0; i < ni; i++){ for(j = i + 1; j < nj; j++){ r_cm.x = conf->particle[i].pos.x - conf->particle[j].pos.x; r_cm.y = conf->particle[i].pos.y - conf->particle[j].pos.y; r_cm.z = conf->particle[i].pos.z - conf->particle[j].pos.z; if ( r_cm.x < 0 ) r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x-0.5) ) ); else r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x+0.5) ) ); if ( r_cm.y < 0 ) r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y-0.5) ) ); else r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y+0.5) ) ); if ( r_cm.z < 0 ) r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z-0.5) ) ); else r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z+0.5) ) ); r_cm2 = DOT(r_cm,r_cm); max_dist = AVER(sim->trans[conf->particle[i].type].mx, \ sim->trans[conf->particle[j].type].mx); max_dist *= (1 + sim->pairlist_update) * 2; max_dist += topo->maxcut; max_dist *= max_dist; /* squared */ if (r_cm2 <= max_dist){ sim->pairlist[i].pairs[sim->pairlist[i].num_pairs++] = j; sim->pairlist[j].pairs[sim->pairlist[j].num_pairs++] = i; } } } ////Check for too many pairs //for(i = 0; i < topo->npart; i++){ // //if (sim->pairlist.list[i].num_pairs >= topo->npart) // if (sim->pairlist[i].num_pairs >= topo->npart){ // fprintf(stderr, "ERROR: Too many pairs for particle %ld!!!\n", i); // exit(1); // } //} } /*.............................................................................*/ /** * Interface for the generation of the pairlist. Define other pairlist * algorithms above. */ void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf){ gen_simple_pairlist(topo, sim, conf); } /*.............................................................................*/ /** * Print out the pairlist */ void print_pairlist(FILE * stream, struct sim * sim, struct topo * topo){ long i, j; for (i = 0; i < topo->npart; i++){ fprintf(stream, "%ld (%ld):", i, sim->pairlist[i].num_pairs); for(j = 0; j < sim->pairlist[i].num_pairs; j++){ fprintf(stream, " %ld", sim->pairlist[i].pairs[j]); } fprintf(stream, "\n"); } } /*..........................................................................*/ /****************************************************************************/ /* Cluster statistics stuf */ /****************************************************************************/ /** * determines, wheter two particles are in the same cluster */ int same_cluster(struct topo * topo, struct conf * conf, long fst, long snd, double (* intfce[MAXT][MAXT])(struct interacts *) ){ /*if two particles are bonded they belong to the same cluster*/ if ( ((topo->chainparam[conf->particle[fst].chaint]).bond1c >= 0) || ((topo->chainparam[conf->particle[fst].chaint]).bonddc >= 0) ){ if ( (snd == topo->conlist[fst][1]) || (snd == topo->conlist[fst][0]) ) { return TRUE; } } if ( ((topo->chainparam[conf->particle[snd].chaint]).bond1c >= 0) || ((topo->chainparam[conf->particle[snd].chaint]).bonddc >= 0) ){ if ( (fst == topo->conlist[snd][1]) || (fst == topo->conlist[snd][0]) ) { return TRUE; } } /*cluster is made of particles closer tna some distance*/ /* struct vector image(struct vector r1, struct vector r2, struct vector box); struct vector r_cm = image(conf->particle[fst].pos, conf->particle[snd].pos, conf->box); double dist2 = DOT(r_cm, r_cm); * TODO: Make it much more efficient => define cluster_dist!!! * if(dist2 > topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].sigma * topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].sigma*4.0){ return FALSE; } else { return TRUE; }*/ /*cluster is made of attractively interacting particles*/ double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct conf * conf); if(paire(fst, snd, intfce, topo, conf) > -0.10 ){ return FALSE; } else { return TRUE; } } /*............................................................................*/ /** * generate the clusterlist */ int gen_clusterlist(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)){ int change = TRUE; /* does it still change? */ //long neighbour; long i, j, fst, snd, tmp, minnumber, maxnumber; int same_cluster(struct topo * topo, struct conf * conf, long fst, long snd, double (* intfce[MAXT][MAXT])(struct interacts *)); // Set clusterindex to the corresponding index for( i = 0; i < topo->npart; i++){ sim->clusterlist[i] = i; } // Start determining the cluster while(change){ change = FALSE; for(i = 0; i < topo->npart; i++){ /*If nore pairlist go over all pairs*/ maxnumber = topo->npart; minnumber = i ; if (sim->pairlist_update) { maxnumber = sim->pairlist[i].num_pairs; minnumber=0; } /* Go over pairs to see if they are in the cluster */ for(j = minnumber; j < maxnumber; j++){ fst = i; snd = j; if (sim->pairlist_update) { snd = sim->pairlist[i].pairs[j]; } /*do cluster analysis only for spherocylinders*/ if ( (topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].geotype[0] < SP) && \ (topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].geotype[1] < SP) ) { /* if they are close to each other */ if(same_cluster(topo, conf, fst, snd, intfce)){ if(fst > snd){ tmp = snd; snd = fst; fst = tmp; } if(sim->clusterlist[fst] < sim->clusterlist[snd]){ sim->clusterlist[snd] = sim->clusterlist[fst]; change = TRUE; break; /* => will eventually start the i loop from new */ } if(sim->clusterlist[snd] < sim->clusterlist[fst]){ sim->clusterlist[fst] = sim->clusterlist[snd]; change = TRUE; break; /* => will eventually start the i loop from new */ } } } } if(change){ break; } } } return 0; } /*............................................................................*/ /** * sort the clusterlist */ int sort_clusterlist(struct topo * topo, struct sim * sim){ long cluster_indices[topo->npart]; /* holds the different cluster indices. (currently too much memory) */ long num_cluster = 0; /* number of clusters, temporary needed */ long i, j; /* how many clusters are there? */ long max_index = -1; for(i = 0; i < topo->npart; i++){ if(max_index < sim->clusterlist[i]){ max_index = sim->clusterlist[i]; cluster_indices[num_cluster++] = max_index; } } /* free the memory from the old clusters */ if(sim->clusters){ for(i = 0; i < sim->num_cluster; i++){ if(sim->clusters[i].particles){ free(sim->clusters[i].particles); } } free(sim->clusters); } /* Allocate memory for the clusters */ sim->clusters = xmalloc(sizeof(struct cluster) * num_cluster); for(i = 0; i < num_cluster; i++){ /* allocate maximal space for all the clusters */ sim->clusters[i].particles = xmalloc(sizeof(long) * topo->npart); sim->clusters[i].npart = 0; } /* fill in the particles belonging to one cluster */ for(i = 0; i < num_cluster; i++){ for(j = 0; j < topo->npart; j++){ if(sim->clusterlist[j] == cluster_indices[i]){ sim->clusters[i].particles[sim->clusters[i].npart++] = j; } } } sim->num_cluster = num_cluster; /* Find the biggest size */ sim->max_clust = 0; for(i = 0; i < num_cluster; i++){ if(sim->clusters[i].npart > sim->max_clust){ sim->max_clust = sim->clusters[i].npart; } } /* Set the statistics to zero */ sim->clusterstat = xmalloc(sizeof(long) * sim->max_clust); for(i = 0; i < sim->max_clust; i++){ sim->clusterstat[i] = 0; } /* Do the statistics */ for(i = 0; i < num_cluster; i++){ sim->clusterstat[sim->clusters[i].npart - 1]++; } return 0; } /*............................................................................*/ /** * calculate energies of clusters * */ int calc_clusterenergies(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)){ long i,j,k; double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct conf * conf); for(i = 0; i < sim->num_cluster; i++){ sim->clustersenergy[i]=0.0; for(j = 0; j < sim->clusters[i].npart; j++){ for(k = j+1; k < sim->clusters[i].npart; k++){ sim->clustersenergy[i]+= paire(sim->clusters[i].particles[j], sim->clusters[i].particles[k], intfce, topo, conf); } } } return 0; } /*............................................................................*/ /** * print the clusterlist * */ int print_clusterlist(FILE * stream, BOOL decor, struct topo * topo, struct sim * sim, struct conf * conf){ long i; if(decor){ fprintf(stream, "\n" "-----------------------------------------------------\n" " The Cluster List\n" " (Index starts with 1)\n" "-----------------------------------------------------\n"); } for(i = 0; i < topo->npart; i++){ fprintf(stream,"%3ld %3ld %8.4lf %8.4lf %8.4lf", i + 1, sim->clusterlist[i] + 1, conf->particle[i].pos.x, conf->particle[i].pos.y, conf->particle[i].pos.z); fprintf(stream,"\n"); } if(decor){ fprintf(stream,"-----------------------------------------------------\n"); } fflush(stream); return 0; } /*............................................................................*/ /** * print the clusters * */ int print_clusters(FILE * stream, BOOL decor, struct sim * sim){ long i, j; if(decor){ fprintf(stream, "\n" "-----------------------------------------------------\n" " The Clusters\n" " (Index starts with 1)\n" "-----------------------------------------------------\n"); } for(i = 0; i < sim->num_cluster; i++){ fprintf(stream, "%3ld(%f):", i + 1,sim->clustersenergy[i]); for(j = 0; j < sim->clusters[i].npart; j++){ fprintf(stream, "%5ld", sim->clusters[i].particles[j] + 1); } fprintf(stream, "\n"); } if(decor){ fprintf(stream,"---------------------------------------------------\n"); } fflush(stream); return 0; } /*............................................................................*/ /** * print a statistics for the clusters */ int print_clusterstat(FILE * stream, BOOL decor, struct sim * sim){ long i; if(decor){ fprintf(stream, "\n" "-----------------------------------------------------\n" " Cluster Distribution\n" "-----------------------------------------------------\n"); } for(i = 0; i < sim->max_clust; i++){ fprintf(stream, "%5ld\t%5ld\n", i + 1, sim->clusterstat[i]); } if(decor){ fprintf(stream, "--------------------------------------------------\n"); } fflush(stream); return 0; } /*............................................................................*/ /** * Alternative way of printing the cluster statistics: everything is on * one line. First monomers, then dimers etc. */ int print_clstat_oneline(FILE * stream, long sweep, struct sim * sim){ long i; fprintf(stream, "%ld: ", sweep); for(i = 0; i < sim->max_clust; i++){ fprintf(stream, "%5ld\t", sim->clusterstat[i]); } fprintf(stream, "\n"); fflush(stream); return 0; } /** * write out all the cluster stat in files, if file name is given */ int write_cluster(FILE * cl_stat, FILE * cl, FILE * cl_list, BOOL decor, long sweep, struct sim * sim, struct topo * topo, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)){ int gen_clusterlist(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); int sort_clusterlist(struct topo * topo, struct sim * sim); int print_clusters(FILE * stream, BOOL decor, struct sim * sim); int calc_clusterenergies(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); gen_clusterlist(topo, sim, conf, intfce); sort_clusterlist(topo, sim); calc_clusterenergies(topo, sim, conf, intfce); if(cl_stat){ if(decor == FALSE){ // if no decor, this means usually into a file. Hence print info // about number of line per frame fprintf(cl_stat, "Sweep: %ld | Maximal size: %ld\n", sweep, sim->max_clust); } print_clusterstat(cl_stat, decor, sim); /* print_clstat_oneline(cl_stat, sweep, sim); */ } if(cl){ if(decor == FALSE){ fprintf(cl, "Sweep: %ld | Number of clusters: %ld\n", sweep, sim->num_cluster); } print_clusters(cl, decor, sim); } if(cl_list){ if(decor == FALSE){ fprintf(cl_list, "Sweep: %ld | Number of particles: %ld\n", sweep, topo->npart); } print_clusterlist(cl, decor, topo, sim, conf); } return 0; } /*............................................................................*/ /****************************************************************************/ /* Wang-Landau stuf */ /****************************************************************************/ /* Initiate Wang-Landau calculation. */ int wlinit(struct wls *wl, char filename[30]) { long i,length,fields=0; double field[5]; FILE *infile; char line[STRLEN]; int wlend(struct wls *); void trim(char *); void strip_comment(char *); infile = fopen(filename, "r"); if (infile == NULL) { fprintf (stderr, "\nERROR: Could not open %s file.\n\n",filename); return 1; } length=0; while (fgets2(line,STRLEN-2,infile) != NULL) { strip_comment (line); trim (line); /* if there is something left... */ if ((int)strlen(line) > 0) { length++; } } length--; /*there is alpha at the first line*/ (*wl).weights = malloc( sizeof(double) * length ); (*wl).hist = malloc( sizeof(long) * length ); (*wl).length[1] = 0; (*wl).dorder[1] = 0; fseek(infile,0,SEEK_SET); i=0; while (fgets2(line,STRLEN-2,infile) != NULL) { strip_comment (line); trim (line); /* if there is something left... */ if ((int)strlen(line) > 0) { if (i == 0) { if (sscanf(line, "%le",&(*wl).alpha)!= 1) { fprintf (stderr, "ERROR: Could not read alpha at the begining.\n\n"); wlend(wl); return 1; } else i++; } else { fields = sscanf(line, "%le %le %le %le",&field[0],&field[1],&field[2],&field[3]); if ( fields == 3 ) { if (i==1) (*wl).minorder[0] = field[0]; (*wl).weights[i-1] = field[1]; (*wl).hist[i-1] = field[2]; (*wl).length[0]++; i++; } else if (fields == 4 ) { if (i==1) { (*wl).minorder[0] = field[0]; (*wl).minorder[1] = field[1]; } if ( (*wl).minorder[1] == field[1] ) (*wl).length[0]++; (*wl).weights[i-1] = field[2]; (*wl).hist[i-1] = field[3]; i++; } else { fprintf (stderr, "ERROR: Could not read order parameter at line %ld.\n\n", i); wlend(wl); return 1; } } } } if (fields == 4 ) { (*wl).length[1] = length / (*wl).length[0]; (*wl).dorder[1] = (field[1] - (*wl).minorder[1])/((*wl).length[1]-1); } (*wl).dorder[0] = (field[0] - (*wl).minorder[0])/((*wl).length[0]-1); if ( ( (i-1) != (*wl).length[0] ) && (fields==3) ) { fprintf (stderr, "ERROR: In reading order parameters length %ld does not fit number of lines %ld.\n\n", (*wl).length[0],i-1); wlend(wl); return 1; } if ( ( (i-1) != (*wl).length[0]*(*wl).length[1] ) && (fields==4) ) { fprintf (stderr, "ERROR: In reading order parameters lengths %ld %ld does not fit number of lines %ld.\n\n", (*wl).length[0],(*wl).length[1],i-1); wlend(wl); return 1; } /*DEBUG*/ printf("Wang-Landau method init:\n"); printf("alpha: %f\n",(*wl).alpha); /*int j=0; if ((*wl).length[1] == 0) { for (i=0; i<(*wl).length[0]; i++) { printf ("%15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).weights[i], (*wl).hist[i]); } } else { for (j=0; j<(*wl).length[1]; j++) { for (i=0; i<(*wl).length[0]; i++) { printf ("%15.8le %15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).minorder[1]+j*(*wl).dorder[1], (*wl).weights[i+(*wl).length[0]*j], (*wl).hist[i+(*wl).length[0]*j]); } printf (" \n"); } }*/ fclose(infile); fflush(stdout); /**/ return 0; } int wlwrite(struct wls *wl, char filename[30]) { long i,j; FILE *outfile; outfile = fopen(filename, "w"); if (outfile == NULL) { fprintf (stderr, "\nERROR: Could not open %s file.\n\n",filename); return 1; } fprintf (outfile, "%15.8le \n",(*wl).alpha); if ((*wl).length[1] == 0) { for (i=0; i<(*wl).length[0]; i++) { fprintf (outfile, "%15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).weights[i], (*wl).hist[i]); } } else { for (j=0; j<(*wl).length[1]; j++) { for (i=0; i<(*wl).length[0]; i++) { fprintf (outfile, "%15.8le %15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).minorder[1]+j*(*wl).dorder[1], (*wl).weights[i+(*wl).length[0]*j], (*wl).hist[i+(*wl).length[0]*j]); } fprintf (outfile, " \n"); } } fflush(outfile); fclose(outfile); return 0; } int wlend(struct wls *wl) { free((*wl).weights); free((*wl).hist); return 0; } void wlreject(struct sim *sim, long oldlength) { int mesh_cpy(struct meshs *, struct meshs *); int longarray_cpy (long **target, long **source,long,long); if ( sim->wlm[0] > 0 ) { sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]] -= sim->wl.alpha; sim->wl.hist[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]++; if ( (sim->wlm[0] == 2) || (sim->wlm[1] == 2) ) mesh_cpy(&sim->wl.mesh,&sim->wl.origmesh); if ( (sim->wlm[0] == 5) || (sim->wlm[1] == 5)||(sim->wlm[0] == 6) || (sim->wlm[1] == 6) ) { longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,oldlength); sim->wl.radiusholemax = oldlength; } sim->wl.partincontact = sim->wl.partincontactold; } } void wlaccept(int wlm,struct wls *wl) { int i; if ( wlm > 0 ) { for (i=0;i<2;i++) (*wl).currorder[i] = (*wl).neworder[i]; (*wl).weights[ (*wl).currorder[0] + (*wl).currorder[1] * (*wl).length[0]] -= (*wl).alpha; (*wl).hist[ (*wl).currorder[0] + (*wl).currorder[1] * (*wl).length[0]]++; } } /*..............................................................................*/ /*........................NAMETIC ORDER.........................................*/ /*..............................................................................*/ /* Calculates the instantaneous value of the nematic order parameter for the specified configuration. The nematic director is determined by diagonalisation of the tensor order parameter Q (see Allen & Tildesley p305). The order parameter is the corresponding eigenvalue. However, it is equivalent to take minus two times the middle eigenvalue (see Eppenga & Frenkel, Mol Phys vol. 52, p.1303-1334 [1984]), and this is more reliable for comparing the isotropic phase. This is the approach taken in this implementation. Routines from Numerical Recipes are used to perform the diagonalisation. Note that these routines expect an n*n matrix to be stored in elements [1...n][1...n], rather than [0...n-1][0...n-1], so the arrays must be declared with one more element in each dimension. */ double nematic(long npart, struct particles *p) { double q[4][4] = {{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}}; double d[4], e[4]; long i; void tred2(double [4][4], double [4], double [4]); void tqli(double [4], double [4]); for (i=0; i<npart; i++) { q[1][1] += p[i].dir.x * p[i].dir.x; q[1][2] += p[i].dir.x * p[i].dir.y; q[1][3] += p[i].dir.x * p[i].dir.z; q[2][1] += p[i].dir.y * p[i].dir.x; q[2][2] += p[i].dir.y * p[i].dir.y; q[2][3] += p[i].dir.y * p[i].dir.z; q[3][1] += p[i].dir.z * p[i].dir.x; q[3][2] += p[i].dir.z * p[i].dir.y; q[3][3] += p[i].dir.z * p[i].dir.z; } q[1][1] = (q[1][1] * 3.0 / npart - 1.0) / 2.0; q[1][2] = (q[1][2] * 3.0 / npart ) / 2.0; q[1][3] = (q[1][3] * 3.0 / npart ) / 2.0; q[2][1] = (q[2][1] * 3.0 / npart ) / 2.0; q[2][2] = (q[2][2] * 3.0 / npart - 1.0) / 2.0; q[2][3] = (q[2][3] * 3.0 / npart ) / 2.0; q[3][1] = (q[3][1] * 3.0 / npart ) / 2.0; q[3][2] = (q[3][2] * 3.0 / npart ) / 2.0; q[3][3] = (q[3][3] * 3.0 / npart - 1.0) / 2.0; tred2 (q, d, e); tqli (d, e); /* Sort eigenvalues */ if (d[1] > d[2]) { d[0]=d[1]; d[1]=d[2]; d[2]=d[0]; } if (d[2] > d[3]) { d[0]=d[2]; d[2]=d[3]; d[3]=d[0]; } if (d[1] > d[2]) { d[0]=d[1]; d[1]=d[2]; d[2]=d[0]; } return -2.0*d[2]; } /*..............................................................................*/ /* Returns the coefficient of the Fourier series term with period boxlength/n in the z direction. The coefficients of the sine and cosine terms are added in quadrature and returned, making the result independent of phase shifts in the z direction. A significantly non-zero value indicates layering of the particles in the z direction with periodicity boxlength/n. */ double smectic(long npart, struct particles *p, long n) { double a, b; double omega = 8.0*n*atan(1.0); long i; a = b = 0.0; for (i=0; i<npart; i++) { a += cos(omega * p[i].pos.z); b += sin(omega * p[i].pos.z); } a /= (double)npart; b /= (double)npart; return sqrt(a*a + b*b); } /*..............................................................................*/ /*........................Z ORDER PARAMETER.....................................*/ long z_order(struct wls *wl, struct conf * conf,int wli) { // printf("%f %ld\n",particle[0].pos.z * box.z,lround(particle[0].pos.z * box.z / wl.dorder[wli] - wl.minorder[wli])); /* Because older C compilators do not know lround we can use ceil as well return lround(particle[0].pos.z * box.z / wl.dorder[wli] - wl.minorder[wli]);*/ /* printf("%f ",conf->particle[0].pos.z ); printf("%f ",conf->syscm.z); printf("%f ",conf->box.z); printf("%f ", wl->minorder[wli]); printf("%f \n", wl->dorder[wli] );*/ return (long) ceil( ((conf->particle[0].pos.z - conf->syscm.z) * conf->box.z- wl->minorder[wli]) / wl->dorder[wli] ); } /*..............................................................................*/ /*........................2 particles distance.....................................*/ long twopartdist(struct wls *wl, struct conf * conf, int wli) { struct vector r_cm; r_cm.x = conf->particle[0].pos.x - conf->particle[1].pos.x; r_cm.y = conf->particle[0].pos.y - conf->particle[1].pos.y; r_cm.z = conf->particle[0].pos.z - conf->particle[1].pos.z; if ( r_cm.x < 0 ) r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x-0.5) ) ); else r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x+0.5) ) ); if ( r_cm.y < 0 ) r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y-0.5) ) ); else r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y+0.5) ) ); if ( r_cm.z < 0 ) r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z-0.5) ) ); else r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z+0.5) ) ); return (long) ceil( (( sqrt(r_cm.x*r_cm.x + r_cm.y*r_cm.y) ) - wl->minorder[wli]) / wl->dorder[wli] ); } /*..............................................................................*/ /*........................alignment ORDER PARAMETER.....................................*/ double alignment_order(struct conf * conf, struct topo * topo) { double sumdot=0; long i,j; struct vector r_cm; struct vector image(struct vector, struct vector, struct vector); for (i = 0; i < topo->npart - 1; i++) { for (j = i + 1; j < topo->npart; j++) { r_cm = image(conf->particle[i].pos, conf->particle[j].pos, conf->box); if ( DOT(r_cm,r_cm) < 1.5*1.5 ) { sumdot+= DOT(conf->particle[i].dir,conf->particle[j].dir); } } } return sumdot; } /*..............................................................................*/ /*........................HOLE IN MESH-MEMBRANE ORDER PARAM.....................*/ /* return change in order parameter when one particle moves*/ long meshorder_moveone(struct vector oldpos, struct vector newpos, struct meshs *mesh, long npart, long target, struct conf * conf, struct sim * sim, int wli) { int change; int nx,ny,ox,oy; /* position in mesh */ double resid; void mesh_fill(struct meshs *, long , struct particles *, struct sim * sim); int mesh_findholes(struct meshs *); int mesh_addpart(double, double, int **, int [2]); int mesh_removepart(double, double, int **, int [2]); if ( conf->particle[target].type != sim->wl.wlmtype ) return sim->wl.currorder[wli]; nx = (int) (INBOX(newpos.x,resid) * (*mesh).dim[0]); ny = (int) (INBOX(newpos.y,resid) * (*mesh).dim[1]); ox = (int) (INBOX(oldpos.x,resid) * (*mesh).dim[0]); oy = (int) (INBOX(oldpos.y,resid) * (*mesh).dim[1]); if ( (nx == ox) && (ny == oy) ) return sim->wl.currorder[wli]; /* particle stayed in the same mesh bin*/ change = mesh_addpart(newpos.x,newpos.y,&(*mesh).data,(*mesh).dim); if (change) { change = mesh_removepart(oldpos.x,oldpos.y,&(*mesh).data,(*mesh).dim); } if ( !change ) { /* fill the mesh with particles*/ mesh_fill(mesh,npart,conf->particle, sim); return (long) (mesh_findholes(mesh) - sim->wl.minorder[wli]); } return sim->wl.currorder[wli]; } /* return change in order parameter when chain moves*/ long meshorder_movechain(long chain[MAXN], struct meshs *mesh, long npart, struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL], int wli) { long i,current; int change; void mesh_fill(struct meshs *, long , struct particles *, struct sim * sim); int mesh_findholes(struct meshs *); int mesh_addpart(double, double, int **, int [2]); int mesh_removepart(double, double, int **, int [2]); change= 1; i = 0; current = chain[0]; while ( (current >=0 ) && (change) ) { if ( conf->particle[current].type == sim->wl.wlmtype ) change = mesh_addpart(conf->particle[current].pos.x, conf->particle[current].pos.y, &(*mesh).data, (*mesh).dim); i++; current = chain[i]; } i = 0; current = chain[0]; while ( (current >=0 ) && (change) ) { if ( conf->particle[current].type == sim->wl.wlmtype ) change = mesh_removepart(chorig[i].pos.x, chorig[i].pos.y, &(*mesh).data, (*mesh).dim); i++; current = chain[i]; } if ( !change ) { /* fill the mesh with particles*/ mesh_fill(mesh,npart,conf->particle, sim); return (long) (mesh_findholes(mesh) - sim->wl.minorder[wli]); } return sim->wl.currorder[wli]; } /* filling the mesh */ void mesh_fill(struct meshs *mesh, long npart, struct particles *particle, struct sim * sim) { long i; int mesh_addpart(double posx, double posy, int **mesh, int dim[2]); for ( i=0; i<((*mesh).dim[0] * (*mesh).dim[1]); i++) { (*mesh).data[i] = 0; } for (i=0; i<npart; i++) { /*calculate position of particle on mesh and add it to all where it belongs */ if (particle[i].type == sim->wl.wlmtype) mesh_addpart(particle[i].pos.x,particle[i].pos.y, &(*mesh).data, (*mesh).dim); } } /* add particle on coordinates posx posy to mesh return 0 if it was placed on empty spot*/ int mesh_addpart(double posx, double posy, int **mesh, int dim[2]) { int i, square[9], onhole; double resid; void mesh_square(int , int , int [2], int (*)[9]); onhole = 1; mesh_square( (int) (INBOX(posx,resid) * dim[0]), (int) (INBOX(posy,resid) * dim[1]) , dim, &square); for(i=0;i<9;i++) { if ( (square[i] >= dim[0]*dim[1])||(square[i] <0) ) { printf ("Error: trying to write to %d\n",square[i]); printf ("%d %d and %d\n", (int) (INBOX(posx,resid) * dim[0]), (int) (INBOX(posy,resid) * dim[1]),i ); fflush(stdout); } if ( ((*mesh)[ square[i] ]) >= 0 ) onhole = 0; (*mesh)[ square[i] ]--; } return onhole; } /* remove particle on coordinates posx posy from mesh and return 0 if there is a empty spot now*/ int mesh_removepart(double posx, double posy, int **mesh, int dim[2]) { int i, square[9]; double resid; void mesh_square(int , int , int [2], int (*)[9]); mesh_square((int) (INBOX(posx,resid) * dim[0]), (int) (INBOX(posy,resid) * dim[1]) , dim, &square); for(i=0;i<9;i++) { //DEBUG if (square[i] >= dim[0]*dim[1]) printf ("Error: trying to write to %d\n",square[i]); (*mesh)[ square[i] ]++; if ( ((*mesh)[ square[i] ]) == 0 ) return 0; } return 1; } void mesh_square(int x, int y, int dim[2], int (*square)[9]) { int a,b; b=y; (*square)[0] = x + dim[0]*b; a = x-1; if ( a<0 ) a = dim[0]-1; (*square)[1] = a + dim[0]*b; a = x+1; if ( a==dim[0] ) a = 0; (*square)[2] = a + dim[0]*b; b = y-1; if ( b<0 ) b = dim[1]-1; (*square)[3] = x + dim[0]*b; a = x-1; if ( a<0 ) a = dim[0]-1; (*square)[4] = a + dim[0]*b; a = x+1; if ( a==dim[0] ) a = 0; (*square)[5] = a + dim[0]*b; b = y+1; if ( b==dim[1] ) b = 0; (*square)[6] = x + dim[0]*b; a = x-1; if ( a<0 ) a = dim[0]-1; (*square)[7] = a + dim[0]*b; a = x+1; if ( a==dim[0] ) a = 0; (*square)[8] = a + dim[0]*b; } void mesh_neighbors(int pos, int dim[2], int neighbors[4]) { int x,y,a; x = pos % dim[0]; y = pos / dim[0]; a = x-1; if ( a<0 ) a = dim[0]-1; neighbors[0] = a + dim[0]*y; a = x+1; if ( a==dim[0] ) a = 0; neighbors[1] = a + dim[0]*y; a = y-1; if ( a<0 ) a = dim[1]-1; neighbors[2] = x + dim[0]*a; a = y+1; if ( a==dim[1] ) a = 0; neighbors[3] = x + dim[0]*a; } /* returns the number of holes and a list of mesh points belonging to each of them */ int mesh_findholes(struct meshs *mesh) { int i,j, k, n, size, li, maxsize; int neighbors[4]; void mesh_neighbors(int, int [2], int [4]); n=0; maxsize = 0; for (i=0;i<((*mesh).dim[0] * (*mesh).dim[1]);i++) { (*mesh).tmp[i] = 0; if ( (*mesh).data[i] > 0 ) (*mesh).data[i] = 0; } i=0; // go through all mesh points while ( i < ((*mesh).dim[0] * (*mesh).dim[1]) ) { // test if mesh point is occupied if ( (*mesh).data[i] != 0 ) { i++; } else { // mesh point is free, create a new cluster n++; (*mesh).data[i] = n; // start new cluster, put mesh point as first element, and set list pointer on first element //DEBUG if (n >= mesh.dim[0]*mesh.dim[1]) printf ("Error: trying to write to sizes position %d\n",n); size = 1; (*mesh).tmp[0] = i; li = 0; // go through all elements of the cluster while ( li < size ) { //go through all neighbors j = (*mesh).tmp[li]; mesh_neighbors(j, (*mesh).dim, neighbors); for ( k=0; k<4; k++ ) { // test if status is free and append it to the cluster if ( (*mesh).data[ neighbors[k] ] == 0 ) { (*mesh).data[ neighbors[k] ] = n; // append mesh point as element in the list (*mesh).tmp[size] = neighbors[k]; size++; } if ( (*mesh).data[ neighbors[k] ] > 0 && (*mesh).data[ neighbors[k] ]<n ) { fprintf(stderr,"Error: Mesh cluster out of range, propably going infinite through pbc."); fflush(stderr); } } li++; } if (size > maxsize) maxsize = size; } } return maxsize; } int mesh_init(struct meshs *mesh, double meshsize, long npart, struct conf * conf, struct sim * sim) { // int i; int maxsize,length; void mesh_fill(struct meshs *, long , struct particles *, struct sim * sim); int mesh_findholes(struct meshs *); (*mesh).dim[0] = (int)(conf->box.x/meshsize); (*mesh).dim[1] = (int)(conf->box.y/meshsize); if ( (*mesh).data != NULL ) free((*mesh).data); if ( (*mesh).tmp != NULL ) free((*mesh).tmp); length = (*mesh).dim[0] * (*mesh).dim[1]; (*mesh).data = malloc( sizeof(int)* (length)); (*mesh).tmp = malloc( sizeof(int)* (length+1)); /* fill the mesh with particles*/ mesh_fill(mesh, npart,conf->particle, sim); /* perfrom hole cluster algorithm */ maxsize = mesh_findholes(mesh); /*DEBUG printf("maxsize: %d\n",maxsize); printf("mesh:\n"); for (i=0;i<mesh.dim[0]*mesh.dim[1];i++) { printf("%d ",mesh.data[i]); if ( ((i+1) % mesh.dim[0]) == 0) printf("\n"); }*/ return maxsize; } void mesh_print (struct meshs *mesh) { int i; int mesh_findholes(struct meshs *); printf("mesh:\n"); for (i=0;i<(*mesh).dim[0] * (*mesh).dim[1];i++) { printf("%d ",(*mesh).data[i]); if ( ((i+1) % (*mesh).dim[0]) == 0) printf("\n"); } printf("hole %d:\n", mesh_findholes(mesh) ); printf("\n"); } int mesh_cpy (struct meshs *target, struct meshs *source) { if ( (*target).data != NULL) { if ( ((*target).dim[0] == (*source).dim[0]) && ((*target).dim[1] == (*source).dim[1]) ) { memcpy((*target).data,(*source).data, sizeof(int)* ((*target).dim[0] * (*target).dim[1]) ); return 0; } else { free ((*target).data); if ( (*source).dim[0] * (*source).dim[1] > (*target).dim[0] * (*target).dim[1] ) { if ((*target).tmp != NULL ) free ((*target).tmp); (*target).tmp = malloc( sizeof(int)* ((*source).dim[0] * (*source).dim[1] + 1)); } } } (*target).dim[0] = (*source).dim[0]; (*target).dim[1] = (*source).dim[1]; (*target).data = malloc( sizeof(int)* ((*target).dim[0] * (*target).dim[1])); if ((*target).tmp == NULL ) (*target).tmp = malloc( sizeof(int)* ((*source).dim[0] * (*source).dim[1] + 1)); memcpy((*target).data,(*source).data, sizeof(int)* ((*target).dim[0] * (*target).dim[1]) ); return 0; } int mesh_end(struct meshs *mesh) { /* free allocated memory */ if ( (*mesh).data!= NULL ) free((*mesh).data); if ( (*mesh).tmp!= NULL ) free((*mesh).tmp); return 0; } /*..............................................................................*/ /*........................RADIUS HOLE IN CENTER MEMBRANE ORDER PARAM............*/ /*return current bin of free radius*/ long radiushole_order(struct sim * sim) { long i; for (i=0;i<sim->wl.radiusholemax-3;i++){ if ((sim->wl.radiushole[i] >0 ) && (sim->wl.radiushole[i+1] >0 ) && (sim->wl.radiushole[i+2] >0 ) && (sim->wl.radiushole[i+3] >0 )) return i-1; } return -100; } /*return order of given radius */ long radiushole_position(double radius, struct sim * sim, int wli) { return (long) ceil( ( radius - sim->wl.minorder[wli]) / sim->wl.dorder[wli] ); } /* return change in order parameter when one particle moves*/ long radiusholeorder_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target,int wli, struct vector *position) { long nr,or; /* position in radiushole */ double rx,ry,z; BOOL oz,nz; long radiushole_position(double radius, struct sim * sim,int); long radiushole_order(struct sim *sim); double anint(double); void radiushole_print (long *radiushole, long length); if ( conf->particle[target].type != sim->wl.wlmtype ) return sim->wl.currorder[wli]; z=conf->particle[target].pos.z - position->z; /*if above position*/ if (z-anint(z) < 0) nz = FALSE; else nz=TRUE; z=oldpos->z - position->z; /*if above position*/ if (z-anint(z) < 0) oz = FALSE; else oz=TRUE; if ( !(nz) && !(oz) ) return sim->wl.currorder[wli]; rx = conf->box.x * (conf->particle[target].pos.x - anint(conf->particle[target].pos.x)); ry = conf->box.y * (conf->particle[target].pos.y - anint(conf->particle[target].pos.y)); nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli); if (nr < 0) return -100; /*particle move over radius bins*/ if (nz) { sim->wl.radiushole[nr]++; } if (oz) { rx = conf->box.x * (oldpos->x - anint(oldpos->x)); ry = conf->box.y * (oldpos->y - anint(oldpos->y)); or = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli); sim->wl.radiushole[or]--; if ( sim->wl.radiushole[or] < 0 ) { printf ("Error(single particle move): trying to make number of beads in radiuspore smaller than 0 at position %ld\n",or); radiushole_print(sim->wl.radiushole,sim->wl.radiusholemax); fflush(stdout); } if (sim->wl.radiushole[or] ==0) return radiushole_order(sim); } if ( (nz) && (sim->wl.radiushole[nr] ==1) ) { return radiushole_order(sim); } return sim->wl.currorder[wli]; } /* return change in order parameter when chain moves*/ long radiusholeorder_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli, struct vector *position) { long i,current,nr; double rx,ry,z; BOOL change=FALSE; long radiushole_position(double radius, struct sim * sim,int); long radiushole_order(struct sim *sim); double anint(double); void radiushole_print (long *radiushole, long length); i = 0; rx=0; current = chain[0]; while (current >=0 ) { if ( conf->particle[current].type == sim->wl.wlmtype ) { z=conf->particle[current].pos.z - position->z; /*if above system CM*/ if (z-anint(z) > 0) { rx = conf->box.x * (conf->particle[current].pos.x - anint(conf->particle[current].pos.x)); ry = conf->box.y * (conf->particle[current].pos.y - anint(conf->particle[current].pos.y)); nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli); if (nr < 0) return -100; sim->wl.radiushole[nr]++; if ( sim->wl.radiushole[nr] == 1 ) change = TRUE; } } i++; current = chain[i]; } i = 0; current = chain[0]; while (current >=0 ) { if ( conf->particle[current].type == sim->wl.wlmtype ) { z=chorig[i].pos.z - position->z; /*if above system CM*/ if (z-anint(z) > 0) { rx = conf->box.x * (chorig[i].pos.x - anint(chorig[i].pos.x)); ry = conf->box.y * (chorig[i].pos.y - anint(chorig[i].pos.y)); nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli); sim->wl.radiushole[nr]--; if ( sim->wl.radiushole[nr] < 0 ) { printf ("Error (chainmove): trying to make number of beads in radiuspore smaller than 0 at position %ld\n",nr); radiushole_print(sim->wl.radiushole,sim->wl.radiusholemax); fflush(stdout); } if ( sim->wl.radiushole[nr] == 0 ) change = TRUE; } } i++; current = chain[i]; } if ( change ) { return radiushole_order(sim); } return sim->wl.currorder[wli]; } /* filling the radiushole above vec*/ long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli, struct vector *position) { long i,nr,radiusholemax; double rx,ry,z; long radiushole_position(double radius, struct sim * sim,int); long radiushole_order(struct sim *sim); double anint(double); radiusholemax = radiushole_position(sqrt(conf->box.x*conf->box.x+conf->box.y*conf->box.y),sim,wli); if ( radiusholemax > sim->wl.radiusholemax ) { if (sim->wl.radiushole != NULL) free(sim->wl.radiushole); sim->wl.radiushole = malloc( sizeof(long)* (radiusholemax)); sim->wl.radiusholemax = radiusholemax; } for (i=0;i<radiusholemax;i++) { sim->wl.radiushole[i] = 0; } for (i=0; i< topo->npart; i++) { /*calculate position of particle from z axis, and add it in array */ if ( conf->particle[i].type == sim->wl.wlmtype ) { z=conf->particle[i].pos.z - (*position).z; /*if above position*/ if (z-anint(z) > 0) { rx = conf->box.x * (conf->particle[i].pos.x - anint(conf->particle[i].pos.x)); ry = conf->box.y * (conf->particle[i].pos.y - anint(conf->particle[i].pos.y)); nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli); if (nr < 0) return -100; sim->wl.radiushole[nr]++; } } } return radiushole_order(sim); } void radiushole_print (long *radiushole, long length) { long i; printf("radiushole:\n"); for (i=0;i<length;i++) { printf("%ld ",radiushole[i]); } printf("\n"); } int longarray_cpy (long **target, long **source, long targetlength, long sourcelength) { /*if ( (*target) != NULL) { if ( targetlength == sourcelength ) { memcpy((*target),(*source), sizeof(long)*(sourcelength)); return 0; } else { free(*target); } }*/ if ( (*target) != NULL) (*target) = (long*) realloc((*target), sizeof(long)*(sourcelength)); else (*target) = malloc( sizeof(long)*(sourcelength)); memcpy((*target),(*source), sizeof(long)*(sourcelength)); return 0; } /*..............................................................................*/ /* ............................... particles in contact ..................... */ /*return order for particles in contact */ long contparticles_order(struct sim * sim, int wli) { return (long) ceil( ( sim->wl.partincontact - sim->wl.minorder[wli]) / sim->wl.dorder[wli] ); } /*returns if particle is in contact*/ BOOL particleinncontact (struct vector *vec, struct conf *conf) { double x,y,z; double anint(double); x = vec->x - conf->particle[0].pos.x; y = vec->y - conf->particle[0].pos.y; z = vec->z - conf->particle[0].pos.z; x = conf->box.x * (x - anint(x)); y = conf->box.y * (y - anint(y)); z = conf->box.z * (z - anint(z)); if ( x*x + y*y + z*z < WL_CONTACTS) { return TRUE; } else { return FALSE; } } /* return change in number of particles in contact when one particle moves*/ long contparticles_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target,int wli) { long contparticles_order(struct sim * sim, int wli); BOOL particleinncontact (struct vector *vec, struct conf *conf); if ( conf->particle[target].type != sim->wl.wlmtype ) return sim->wl.currorder[wli]; if ( particleinncontact (&(conf->particle[target].pos),conf) ) sim->wl.partincontact++; if ( particleinncontact (oldpos,conf) ) sim->wl.partincontact--; return contparticles_order(sim,wli); } /* return change in order parameter when chain moves*/ long contparticles_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli) { long i,current; long contparticles_order(struct sim * sim, int wli); BOOL particleinncontact (struct vector *vec, struct conf *conf); i = 0; current = chain[0]; while (current >=0 ) { if ( conf->particle[current].type == sim->wl.wlmtype ) { if ( particleinncontact (&(conf->particle[current].pos),conf) ) sim->wl.partincontact++; } i++; current = chain[i]; } i = 0; current = chain[0]; while (current >=0 ) { if ( conf->particle[current].type == sim->wl.wlmtype ) { if ( particleinncontact (&(chorig[i].pos),conf) ) sim->wl.partincontact--; } i++; current = chain[i]; } return contparticles_order(sim,wli); } /* filling all particles in the contact */ long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli) { long i; long contparticles_order(struct sim * sim, int wli); BOOL particleinncontact (struct vector *vec, struct conf *conf); sim->wl.partincontact = 0; for (i=1; i< topo->npart; i++) { /*calculate position of particle and add it if in contact */ if ( conf->particle[i].type == sim->wl.wlmtype ) { if ( particleinncontact (&(conf->particle[i].pos),conf) ) sim->wl.partincontact++; } } return contparticles_order(sim,wli); } /*..............................................................................*/ /*........................GEOMETRIC STUFF.......................................*/ /*..............................................................................*/ /*..............................................................................*/ /* Find closest distance between line segments and return its vector gets orientations and lengths of line segments and the vector connecting their center os masses (from vec1 to vec2) */ // Copyright 2001, softSurfer (www.softsurfer.com) // This code may be freely used and modified for any purpose // providing that this copyright notice is included with it. // SoftSurfer makes no warranty for this code, and cannot be held // liable for any real or imagined damage resulting from its use. // Users of this code must verify correctness for their application. struct vector mindist_segments(struct vector dir1, double halfl1, struct vector dir2, double halfl2, struct vector r_cm) { struct vector u,v,w,vec; double a,b,c,d,e,D,sc,sN,sD,tc,tN,tD; struct vector vec_scale(struct vector, double); u = vec_scale(dir1,2.0*halfl1); //S1.P1 - S1.P0; v = vec_scale(dir2,2.0*halfl2); //S2.P1 - S2.P0; w.x = dir2.x*halfl2 - dir1.x*halfl1 - r_cm.x; w.y = dir2.y*halfl2 - dir1.y*halfl1 - r_cm.y; w.z = dir2.z*halfl2 - dir1.z*halfl1 - r_cm.z; //S1.P0 - S2.P0; a = DOT(u,u); // always >= 0 b = DOT(u,v); c = DOT(v,v); // always >= 0 d = DOT(u,w); e = DOT(v,w); D = a*c - b*b; // always >= 0 sc = D; sN = D; sD = D; // sc = sN / sD, default sD = D >= 0 tc = D; tN = D; tD = D; // tc = tN / tD, default tD = D >= 0 // compute the line parameters of the two closest points if (D < 0.00000001) { // the lines are almost parallel sN = 0.0; // force using point P0 on segment S1 sD = 1.0; // to prevent possible division by 0.0 later tN = e; tD = c; } else { // get the closest points on the infinite lines sN = (b*e - c*d); tN = (a*e - b*d); if (sN < 0.0) { // sc < 0 => the s=0 edge is visible sN = 0.0; tN = e; tD = c; } else if (sN > sD) { // sc > 1 => the s=1 edge is visible sN = sD; tN = e + b; tD = c; } } if (tN < 0.0) { // tc < 0 => the t=0 edge is visible tN = 0.0; // recompute sc for this edge if (-d < 0.0) sN = 0.0; else if (-d > a) sN = sD; else { sN = -d; sD = a; } } else if (tN > tD) { // tc > 1 => the t=1 edge is visible tN = tD; // recompute sc for this edge if ((-d + b) < 0.0) sN = 0; else if ((-d + b) > a) sN = sD; else { sN = (-d + b); sD = a; } } // finally do the division to get sc and tc if (fabs(sN) < 0.00000001) sc = 0.0 ; else sc = sN / sD; if (fabs(tN) < 0.00000001) tc = 0.0 ; else tc = tN / tD; // get the difference of the two closest points //Vector = w + (sc * u) - (tc * v); // = S1(sc) - S2(tc) vec.x = u.x*sc + w.x - v.x*tc; vec.y = u.y*sc + w.y - v.y*tc; vec.z = u.z*sc + w.z - v.z*tc; return vec; } /*..............................................................................*/ /* Find closest distance between line segment and point and return it as vector (from point to closest segment point) Function gets orientation and length of line segments and the vector connecting their center os masses (from segment to point) */ struct vector mindist_segmentpoint(struct vector dir1, double length, struct vector r_cm) { struct vector vec; double c,d,halfl; halfl=length*0.5; c = DOT(dir1,r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } vec.x = - r_cm.x + dir1.x * d; vec.y = - r_cm.y + dir1.y * d; vec.z = - r_cm.z + dir1.z * d; return vec; } /*..............................................................................*/ /* Determines whether two particles overlap. Returns 1 if there is an overlap, 0 if not. */ int overlap(struct particles part1, struct particles part2, struct vector box, struct ia_param ia_params[MAXT][MAXT]) { double b, c, d, e, f; /* Coefficients in distance quadratic */ double boundary; /* Half length of central boundary zone of quadratic */ double det; double halfl; /* Half length of cylinder */ double s0, t0; /* det times location of min separation of infinite lines */ double ss, tt; /* Location of min separation of line segments */ struct vector r_cm; /* Vector between centres of mass */ double dist; /* Distance between particles*/ struct vector distvec; /* Distance vector between particles*/ double linemin(double, double); struct vector image(struct vector, struct vector, struct vector); r_cm = image(part1.pos, part2.pos, box); if ((part1.type >= SP) && (part2.type >= SP)) { /*we have two spheres - most common, do nothing*/ dist=sqrt(DOT(r_cm,r_cm)); } else { if ((ia_params[part1.type][part2.type].geotype[0] < SP) && (ia_params[part1.type][part2.type].geotype[1] < SP)) { /*we have two spherocylinders*/ /*finding closes contact between them*/ b = -DOT(part1.dir, part2.dir); d = DOT(part1.dir, r_cm); e = -DOT(part2.dir, r_cm); f = DOT(r_cm, r_cm); det = 1.0 - b*b; //halfl = length / 2.0; // Just take the mean halfl = ia_params[part1.type][part2.type].half_len[0] = ia_params[part1.type][part2.type].half_len[1]; halfl /= 2; boundary = det * halfl; /* Location of smallest separation of the infinite lines */ s0 = b*e - d; t0 = b*d - e; /* Location of smallest separation of line segments */ if (s0 >= boundary) { if (t0 >= boundary) { /* Region 2 */ if ( d + halfl + halfl*b < 0.0 ) { ss = halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = halfl; ss = linemin( -tt*b - d, halfl ); } } else if (t0 >= -boundary) { /* Region 1 */ ss = halfl; tt = linemin( -ss*b - e, halfl ); } else { /* Region 8 */ if ( d + halfl - halfl*b < 0.0 ) { ss = halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = -halfl; ss = linemin( -tt*b - d, halfl ); } } } else if (s0 >= -boundary) { if (t0 >= boundary) { /* Region 3 */ tt = halfl; ss = linemin( -tt*b - d, halfl ); } else if (t0 >= -boundary) { /* Region 0 */ ss = s0/det; tt = t0/det; } else { /* Region 7 */ tt = -halfl; ss = linemin( -tt*b - d, halfl ); } } else { if (t0 >= boundary) { /* Region 4 */ if ( d - halfl + halfl*b > 0.0 ) { ss = -halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = halfl; ss = linemin( -tt*b - d, halfl ); } } else if (t0 >= -boundary) { /* Region 5 */ ss = -halfl; tt = linemin( -ss*b - e, halfl ); } else { /* Region 6 */ if ( d - halfl - halfl*b > 0.0 ) { ss = -halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = -halfl; ss = linemin( -tt*b - d, halfl ); } } } /*ss snd tt are Location of min separation of line segments */ dist=sqrt(f + ss*ss + tt*tt + 2.0*(ss*d + tt*e + ss*tt*b)); } else { if (ia_params[part1.type][part2.type].geotype[0] < SP) { /*We have one spherocylinder -it is first one*/ //halfl=length/2;/*finding closest vector from sphyrocylinder to sphere*/ halfl=ia_params[part1.type][part2.type].half_len[0];/*finding closest vector from sphyrocylinder to sphere*/ c = DOT(part1.dir,r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } distvec.x = - r_cm.x + part1.dir.x * d; distvec.y = - r_cm.y + part1.dir.y * d; distvec.z = - r_cm.z + part1.dir.z * d; dist=sqrt(DOT(distvec,distvec)); } else { /*lst option first one is sphere second one spherocylinder*/ //halfl=length/2; /*finding closest vector from sphyrocylinder to sphere*/ halfl=ia_params[part1.type][part2.type].half_len[1];/*finding closest vector from sphyrocylinder to sphere*/ c = DOT(part2.dir,r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } distvec.x = r_cm.x - part2.dir.x * d; distvec.y = r_cm.y - part2.dir.y * d; distvec.z = r_cm.z - part2.dir.z * d; dist=sqrt(DOT(distvec,distvec)); } } } /* Overlap exists if smallest separation is less than diameter of cylinder */ if (dist < ia_params[part1.type][part2.type].sigma*0.5 ) { return 1; } else { return 0; } } /*..............................................................................*/ double linemin(double criterion, double halfl) { if (criterion >= halfl) { return halfl; } else if (criterion >= -halfl) { return criterion; } else { return -halfl; } } /*..............................................................................*/ /*........................SOME USEFUL MATH......................................*/ /*..............................................................................*/ /* ran2 from Numerical Recipes. */ #define IM1 2147483563 #define IM2 2147483399 #define AM (1.0/IM1) #define IMM1 (IM1-1) #define IA1 40014 #define IA2 40692 #define IQ1 53668 #define IQ2 52774 #define IR1 12211 #define IR2 3791 #define NTAB 32 #define NDIV (1+IMM1/NTAB) #define EPS 1.2e-7 #define RNMX (1.0-EPS) double ran2(long *idum) { int j; long k; static long idum2=123456789; static long iy=0; static long iv[NTAB]; double temp; if (*idum <= 0) { if (-(*idum) < 1) *idum=1; else *idum = -(*idum); idum2=(*idum); for (j=NTAB+7;j>=0;j--) { k=(*idum)/IQ1; *idum=IA1*(*idum-k*IQ1)-k*IR1; if (*idum < 0) *idum += IM1; if (j < NTAB) iv[j] = *idum; } iy=iv[0]; } k=(*idum)/IQ1; *idum=IA1*(*idum-k*IQ1)-k*IR1; if (*idum < 0) *idum += IM1; k=idum2/IQ2; idum2=IA2*(idum2-k*IQ2)-k*IR2; if (idum2 < 0) idum2 += IM2; j=iy/NDIV; iy=iv[j]-idum2; iv[j] = *idum; if (iy < 1) iy += IMM1; if ((temp=AM*iy) > RNMX) return RNMX; else return temp; } #undef IM1 #undef IM2 #undef AM #undef IMM1 #undef IA1 #undef IA2 #undef IQ1 #undef IQ2 #undef IR1 #undef IR2 #undef NTAB #undef NDIV #undef EPS #undef RNMX /*..............................................................................*/ /* From Numerical Recipes. Simplified to deal specifically with 3*3 matrices (stored as elements [1...3][1...3] or a 4*4 array). */ void tred2(double a[4][4], double d[4], double e[4]) { int l, k, j, i; double scale, hh, h, g, f; for (i=3; i>=2; i--) { l=i-1; h=scale=0.0; if (l > 1) { for (k=1;k<=l;k++) scale += fabs(a[i][k]); if (scale == 0.0) e[i]=a[i][l]; else { for (k=1;k<=l;k++) { a[i][k] /= scale; h += a[i][k]*a[i][k]; } f=a[i][l]; g=(f >= 0.0 ? -sqrt(h) : sqrt(h)); e[i]=scale*g; h -= f*g; a[i][l]=f-g; f=0.0; for (j=1;j<=l;j++) { /* a[j][i]=a[i][j]/h; */ g=0.0; for (k=1;k<=j;k++) g += a[j][k]*a[i][k]; for (k=j+1;k<=l;k++) g += a[k][j]*a[i][k]; e[j]=g/h; f += e[j]*a[i][j]; } hh=f/(h+h); for (j=1;j<=l;j++) { f=a[i][j]; e[j]=g=e[j]-hh*f; for (k=1;k<=j;k++) a[j][k] -= (f*e[k]+g*a[i][k]); } } } else e[i]=a[i][l]; d[i]=h; } /* d[1]=0.0; */ e[1]=0.0; for (i=1; i<=3; i++) { /* l=i-1; if (d[i]) { for (j=1;j<=l;j++) { g=0.0; for (k=1;k<=l;k++) g += a[i][k]*a[k][j]; for (k=1;k<=l;k++) a[k][j] -= g*a[k][i]; } } */ d[i]=a[i][i]; /* a[i][i]=1.0; for (j=1;j<=l;j++) a[j][i]=a[i][j]=0.0; */ } } /*..............................................................................*/ /* From Numerical Recipes. Simplified to deal specifically with 3*3 matrices (stored as elements [1...3][1...3] or a 4*4 array). */ #define NRANSI #define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a)) void tqli(double d[4], double e[4]) { double pythag(double a, double b); int m, l, iter, i; /* int k; */ double s, r, p, g, f, dd, c, b; for (i=2; i<=3; i++) e[i-1] = e[i]; e[3] = 0.0; for (l=1; l<=3; l++) { iter = 0; do { for (m=l; m<=3-1; m++) { dd = fabs(d[m]) + fabs(d[m+1]); if ((double)(fabs(e[m])+dd) == dd) break; } if (m != l) { if (iter++ == 30) { fprintf(stderr, "Too many iterations in tqli\n"); exit (2); } g = (d[l+1] - d[l]) / (2.0*e[l]); r = pythag(g, 1.0); g = d[m] - d[l] + e[l] / (g + SIGN(r,g)); s = c = 1.0; p = 0.0; for (i=m-1; i>=l; i--) { f = s * e[i]; b = c * e[i]; e[i+1] = (r=pythag(f,g)); if (r == 0.0) { d[i+1] -= p; e[m] = 0.0; break; } s = f/r; c = g/r; g = d[i+1] - p; r = (d[i] - g)*s + 2.0*c*b; d[i+1] = g+(p=s*r); g = c*r - b; /* for (k=1; k<=3; k++) { f = z[k][i+1]; z[k][i+1] = s*z[k][i]+c*f; z[k][i] = c*z[k][i]i - s*f; } */ } if (r == 0.0 && i >= l) continue; d[l] -= p; e[l] = g; e[m] = 0.0; } } while (m != l); } } #undef NRANSI /*..............................................................................*/ /* From Numerical Recipes. Used by tqli. */ #define NRANSI static double sqrarg; #define SQR(a) ((sqrarg=(a)) == 0.0 ? 0.0 : sqrarg*sqrarg) double pythag(double a, double b) { double absa, absb; absa = fabs(a); absb = fabs(b); if (absa > absb) return absa*sqrt(1.0+SQR(absb/absa)); else return (absb == 0.0 ? 0.0 : absb*sqrt(1.0+SQR(absa/absb))); } #undef NRANSI /*..............................................................................*/ /* Normalise a vector to have unit length. For speed during heavy use, it is not checked that the supplied vector has non-zero length. */ void normalise(struct vector *u) { double tot; tot = sqrt( DOT(*u,*u) ); if (tot !=0.0) { tot=1/tot; (*u).x *= tot; (*u).y *= tot; (*u).z *= tot; } } /* Returns the vector pointing from the centre of mass of particle 2 to the centre of mass of the closest image of particle 1. */ struct vector image(struct vector r1, struct vector r2, struct vector box) { struct vector r12; double anint(double); r12.x = r1.x - r2.x; r12.y = r1.y - r2.y; r12.z = r1.z - r2.z; r12.x = box.x * (r12.x - anint(r12.x)); r12.y = box.y * (r12.y - anint(r12.y)); r12.z = box.z * (r12.z - anint(r12.z)); return r12; } /* Returns the nearest integer to its argument as a double precision number. e.g. anint(-0.49) = 0.0 and anint(-0.51) = -1.0. Equivalent to the Fortran intrinsic ANINT. */ double anint(double arg) { if (arg < 0) { return (double)( (long)(arg-0.5) ); } else { return (double)( (long)(arg+0.5) ); } } /*..............................................................................*/ /* Returns an evenly distributed random unit vector of unit length. See Allen & Tildesley p349 or Frenkel & Smit p410. RANDOM VECTOR ON UNIT SPHERE */ struct vector ranvec(void) { double a, b, xi1, xi2; struct vector unit; double ran2(long *); do { xi1 = 1.0 - 2.0*ran2(&seed); xi2 = 1.0 - 2.0*ran2(&seed); a = xi1*xi1 + xi2*xi2; } while (a > 1.0); b = 2.0 * sqrt(1.0 - a); unit.x = xi1 * b; unit.y = xi2 * b; unit.z = 1.0 - 2.0*a; return unit; } /** * returns a point randomly and evenly distributed inside of a unit sphere */ struct vector ranvecsph(void) { struct vector ranvec; double ran2(long *); do{ ranvec.x = 2 * ran2(&seed) - 1.0; ranvec.y = 2 * ran2(&seed) - 1.0; ranvec.z = 2 * ran2(&seed) - 1.0; } while(ranvec.x*ranvec.x + ranvec.y*ranvec.y + ranvec.z*ranvec.z >= 1); //printf("%lf\t%lf\t%lf\n", ranvec.x,ranvec.y,ranvec.z); return ranvec; } /**** some useful math *******/ struct vector vec_create(double x, double y, double z) { struct vector newvec; newvec.x=x; newvec.y=y; newvec.z=z; return newvec; } struct vector vec_createarr(double a[3]) { struct vector newvec; newvec.x=a[0]; newvec.y=a[1]; newvec.z=a[2]; return newvec; } double vec_dotproduct(struct vector A,struct vector B) { double dp; dp = A.x*B.x + A.y*B.y + A.z*B.z; return dp; } /* vector projection of vector A to direction of B*/ struct vector vec_project(struct vector* A,struct vector* B) { double dp; struct vector pr; dp = A->x*B->x + A->y*B->y + A->z*B->z; pr.x=B->x*dp; pr.y=B->y*dp; pr.z=B->z*dp; return pr; } void ortogonalise(struct vector *A, struct vector B) { double dp; double vec_dotproduct(struct vector A,struct vector B); dp=vec_dotproduct(*A,B); (*A).x -= B.x * dp; (*A).y -= B.y * dp; (*A).z -= B.z * dp; } /* vector projection of vector A perpendicular to direction of B*/ struct vector vec_perpproject(struct vector *A,struct vector *B) { struct vector pp; double dp; struct vector vec_project(struct vector *, struct vector*); dp=DOT((*A),(*B)); pp.x = A->x - B->x*dp; pp.y = A->y - B->y*dp; pp.z = A->z - B->z*dp; // fprintf (stderr, "pp x: %.8f y: %.8f z: %.8f \n",pp.x,pp.y,pp.z); return pp; } /* returns a vector perpendicular to A nothing special about the vector except that it's one of the perpendicular options and is normalized */ struct vector vec_perp(struct vector A) { double ratio,x,y; struct vector somevector; struct vector vec_create(double, double, double); struct vector vec_normalize(struct vector); void normalise(struct vector *); struct vector vec_crossproduct(struct vector, struct vector); x=A.x; y=A.y; if (x == 0) x=1; else { if (y == 0) y=1; else { ratio=y/x; y=x*ratio*2; } } somevector= vec_create(x, y, A.z); normalise(&somevector); return vec_crossproduct(A,somevector); } /* Perform the multiplication of a matrix A and a vector B where A is the first argument and B is the second argument. The routine will return AxB*/ struct vector matrix_vec_multiply(double A[3][3],struct vector B) { int i; double vecarr[3]; struct vector AB,RA; struct vector vec_createarr(double[3]); double vec_dotproduct(struct vector,struct vector); for (i=0;i<3;i++) { /* index the row vector from A*/ RA=vec_createarr(A[i]); /* Now find the dot product of this row with B*/ vecarr[i]=vec_dotproduct(RA,B); } AB=vec_createarr(vecarr); return AB; } /* Distance between two vectors*/ double vec_distance(struct vector vec1,struct vector vec2) { double sum; sum= (vec1.x-vec2.x)*(vec1.x-vec2.x)+(vec1.y-vec2.y)*(vec1.y-vec2.y)+(vec1.z-vec2.z)*(vec1.z-vec2.z); return pow(sum,0.5); } /* Vector size */ double vec_size(struct vector vec) { double size; size=sqrt(vec.x*vec.x+ vec.y*vec.y+ vec.z*vec.z); return size; } /* Normalize a vector*/ struct vector vec_normalize(struct vector vec) { double mag; struct vector newvec; double vec_size(struct vector); mag= vec_size (vec); mag=1/mag; newvec.x=vec.x*mag; newvec.y=vec.y*mag; newvec.z=vec.z*mag; return newvec; } /* Scale a vector */ struct vector vec_scale(struct vector vec, double scale) { vec.x=vec.x*scale; vec.y=vec.y*scale; vec.z=vec.z*scale; return vec; } /* cross_product*/ struct vector vec_crossproduct(struct vector A,struct vector B) { struct vector cp; cp.x=( A.y*B.z - A.z*B.y); cp.y=( -A.x*B.z + A.z*B.x); cp.z=( A.x*B.y - A.y*B.x); return cp; } /* addition of vectors*/ inline struct vector vec_sum(struct vector A,struct vector B) { struct vector C; C.x=(A.x + B.x); C.y=(A.y + B.y); C.z=(A.z + B.z); return C; } /* subtraction of vectors*/ inline struct vector vec_sub(struct vector A,struct vector B) { struct vector C; C.x=(A.x - B.x); C.y=(A.y - B.y); C.z=(A.z - B.z); return C; } /* asign vlues of vector A by values in vector B*/ inline void vec_asign(struct vector *A, struct vector B) { (*A).x=B.x; (*A).y=B.y; (*A).z=B.z; } /* generate random unit vector*/ struct vector vec_random(void) { struct vector newvec; struct vector ranvec(void); newvec=ranvec(); return newvec; } /*generate random unit quaternion*/ struct quat quat_random(void) { double cosv, sinv; struct quat newquat; struct vector newaxis; struct vector ranvec(void); /* generate quaternion for rotation*/ newaxis = ranvec(); /*random axes for rotation*/ cosv = cos(PIH * ran2(&seed) ); if (ran2(&seed) <0.5) sinv = sqrt(1.0 - cosv*cosv); else sinv = -sqrt(1.0 - cosv*cosv); newquat.w=cosv; newquat.x=newaxis.x*sinv; newquat.y=newaxis.y*sinv; newquat.z=newaxis.z*sinv; return newquat; } /* Create quaternion for rotation around vector "vec" of angle in degrees "angle" function need cos of half angle and its sin*/ struct quat quat_create(struct vector vec, double vc, double vs) { struct quat newquat; newquat.w=vc; newquat.x=vec.x*vs; newquat.y=vec.y*vs; newquat.z=vec.z*vs; return newquat; } /*rotate vector with quaternion*/ void vec_rotate(struct vector *vec, struct quat quat) { double t2,t3,t4,t5,t6,t7,t8,t9,t10,newx,newy,newz; /* t1 = quat.w * quat.w; */ t2 = quat.w * quat.x; t3 = quat.w * quat.y; t4 = quat.w * quat.z; t5 = -quat.x * quat.x; t6 = quat.x * quat.y; t7 = quat.x * quat.z; t8 = -quat.y * quat.y; t9 = quat.y * quat.z; t10 = -quat.z * quat.z; newx = 2.0 * ( (t8+t10)*(*vec).x + (t6-t4)*(*vec).y + (t3+t7)*(*vec).z ) + (*vec).x; newy = 2.0 * ( (t4+t6)*(*vec).x + (t5+t10)*(*vec).y + (t9-t2)*(*vec).z ) + (*vec).y; newz = 2.0 * ( (t7-t3)*(*vec).x + (t2+t9)*(*vec).y + (t5+t8)*(*vec).z ) + (*vec).z; (*vec).x = newx; (*vec).y = newy; (*vec).z = newz; } /* rotate spherocylinder by quaternion of random axis and angle smaller than maxcos(cosine of angle half), we do everything on site for speed */ void psc_rotate(struct particles *psc, double max_angle,int geotype) { double vc, vs, t2, t3, t4, t5, t6, t7, t8, t9, t10; double d1, d2, d3, d4, d5, d6, d7, d8, d9 , newx, newy, newz; int k,m; struct quat newquat; struct vector newaxis; struct vector ranvec(void); /* generate quaternion for rotation*/ newaxis = ranvec(); /*random axes for rotation*/ // maxcos = cos(maxorient/2/180*PI); // vc = maxcos + ran2(&seed)*(1-maxcos); /*cos of angle must be bigger than maxcos and smaller than one*/ vc = cos(max_angle * ran2(&seed) ); if (ran2(&seed) <0.5) vs = sqrt(1.0 - vc*vc); else vs = -sqrt(1.0 - vc*vc); /*randomly choose orientation of direction of rotation clockwise or counterclockwise*/ newquat.w=vc; newquat.x=newaxis.x*vs; newquat.y=newaxis.y*vs; newquat.z=newaxis.z*vs; /* do quaternion rotation*/ t2 = newquat.w * newquat.x; t3 = newquat.w * newquat.y; t4 = newquat.w * newquat.z; t5 = -newquat.x * newquat.x; t6 = newquat.x * newquat.y; t7 = newquat.x * newquat.z; t8 = -newquat.y * newquat.y; t9 = newquat.y * newquat.z; t10 = -newquat.z * newquat.z; d1 = t8 + t10; d2 = t6 - t4; d3 = t3 + t7; d4 = t4 + t6; d5 = t5 + t10; d6 = t9 - t2; d7 = t7 - t3; d8 = t2 + t9; d9 = t5 + t8; /*rotate spherocylinder direction vector*/ newx = 2.0 * ( d1*psc->dir.x + d2*psc->dir.y + d3*psc->dir.z ) + psc->dir.x; newy = 2.0 * ( d4*psc->dir.x + d5*psc->dir.y + d6*psc->dir.z ) + psc->dir.y; newz = 2.0 * ( d7*psc->dir.x + d8*psc->dir.y + d9*psc->dir.z ) + psc->dir.z; psc->dir.x = newx; psc->dir.y = newy; psc->dir.z = newz; m=1; if ( (geotype != SCN) && (geotype != SCA) ) { if ( (geotype == TPSC) || (geotype == TCPSC) || (geotype == TCHPSC) || (geotype == TCHCPSC) ) m=2; for (k=0;k<m;k++) { /*rotate patch direction vector*/ newx = 2.0 * ( d1*psc->patchdir[k].x + d2*psc->patchdir[k].y + d3*psc->patchdir[k].z ) + psc->patchdir[k].x; newy = 2.0 * ( d4*psc->patchdir[k].x + d5*psc->patchdir[k].y + d6*psc->patchdir[k].z ) + psc->patchdir[k].y; newz = 2.0 * ( d7*psc->patchdir[k].x + d8*psc->patchdir[k].y + d9*psc->patchdir[k].z ) + psc->patchdir[k].z; psc->patchdir[k].x = newx; psc->patchdir[k].y = newy; psc->patchdir[k].z = newz; /*rotate patch sides vectors*/ newx = 2.0 * ( d1*psc->patchsides[0+2*k].x + d2*psc->patchsides[0+2*k].y + d3*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].x; newy = 2.0 * ( d4*psc->patchsides[0+2*k].x + d5*psc->patchsides[0+2*k].y + d6*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].y; newz = 2.0 * ( d7*psc->patchsides[0+2*k].x + d8*psc->patchsides[0+2*k].y + d9*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].z; psc->patchsides[0+2*k].x = newx; psc->patchsides[0+2*k].y = newy; psc->patchsides[0+2*k].z = newz; newx = 2.0 * ( d1*psc->patchsides[1+2*k].x + d2*psc->patchsides[1+2*k].y + d3*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].x; newy = 2.0 * ( d4*psc->patchsides[1+2*k].x + d5*psc->patchsides[1+2*k].y + d6*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].y; newz = 2.0 * ( d7*psc->patchsides[1+2*k].x + d8*psc->patchsides[1+2*k].y + d9*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].z; psc->patchsides[1+2*k].x = newx; psc->patchsides[1+2*k].y = newy; psc->patchsides[1+2*k].z = newz; } } m=1; if ( (geotype == CHPSC) || (geotype == CHCPSC) || (geotype == TCHPSC) || (geotype == TCHCPSC) ) { if ( (geotype == TCHPSC) || (geotype == TCHCPSC) ) m=2; for (k=0;k<m;k++) { /*rotate chiral direction vector*/ newx = 2.0 * ( d1*psc->chdir[k].x + d2*psc->chdir[k].y + d3*psc->chdir[k].z ) + psc->chdir[k].x; newy = 2.0 * ( d4*psc->chdir[k].x + d5*psc->chdir[k].y + d6*psc->chdir[k].z ) + psc->chdir[k].y; newz = 2.0 * ( d7*psc->chdir[k].x + d8*psc->chdir[k].y + d9*psc->chdir[k].z ) + psc->chdir[k].z; psc->chdir[k].x = newx; psc->chdir[k].y = newy; psc->chdir[k].z = newz; } } } /*returns a position of center of mass of system*/ void masscenter(long npart, struct ia_param ia_params[MAXT][MAXT], struct conf * conf) { long i; double anint(double); conf->syscm.x = 0; conf->syscm.y = 0; conf->syscm.z = 0; for (i=0; i<npart; i++) { /*using periodic boundary conditions*/ conf->syscm.x += (conf->particle[i].pos.x - anint(conf->particle[i].pos.x) ) * ia_params[conf->particle[i].type][conf->particle[i].type].volume; conf->syscm.y += (conf->particle[i].pos.y - anint(conf->particle[i].pos.y) ) * ia_params[conf->particle[i].type][conf->particle[i].type].volume; conf->syscm.z += (conf->particle[i].pos.z - anint(conf->particle[i].pos.z) ) * ia_params[conf->particle[i].type][conf->particle[i].type].volume; } conf->syscm.x /= conf->sysvolume; conf->syscm.y /= conf->sysvolume; conf->syscm.z /= conf->sysvolume; return; } /* rotate cluster of particles by quaternion of random axis and angle smaller than maxcos(cosine of angle half), we do everything on site for speed */ void cluster_rotate(long target, struct vector gc, double max_angle, struct topo * topo, struct conf * conf) { long current,i; double vc,vs; //double quatsize; struct quat newquat; struct vector newaxis; struct vector ranvec(void); void vec_rotate(struct vector *, struct quat); // create rotation quaternion newaxis = ranvec(); /*random axes for rotation*/ // maxcos = cos(maxorient/2/180*PI); //vc = maxcos + ran2(&seed)*(1-maxcos); /*cos of angle must be bigger than maxcos and smaller than one*/ vc = cos(max_angle * ran2(&seed) ); if (ran2(&seed) <0.5) vs = sqrt(1.0 - vc*vc); else vs = -sqrt(1.0 - vc*vc); /*randomly choose orientation of direction of rotation clockwise or counterclockwise*/ newquat.w=vc; newquat.x=newaxis.x*vs; newquat.y=newaxis.y*vs; newquat.z=newaxis.z*vs; //quatsize=sqrt(newquat.w*newquat.w+newquat.x*newquat.x+newquat.y*newquat.y+newquat.z*newquat.z); //shift position to geometrical center i=0; current = topo->chainlist[target][0]; while (current >=0 ) { //shift position to geometrical center conf->particle[current].pos.x -= gc.x; conf->particle[current].pos.y -= gc.y; conf->particle[current].pos.z -= gc.z; //scale things by box not to have them distorted conf->particle[current].pos.x *= conf->box.x; conf->particle[current].pos.y *= conf->box.y; conf->particle[current].pos.z *= conf->box.z; //do rotation vec_rotate(&conf->particle[current].pos, newquat); vec_rotate(&conf->particle[current].dir, newquat); vec_rotate(&conf->particle[current].patchdir[0], newquat); vec_rotate(&conf->particle[current].patchdir[1], newquat); vec_rotate(&conf->particle[current].chdir[0], newquat); vec_rotate(&conf->particle[current].chdir[1], newquat); vec_rotate(&conf->particle[current].patchsides[0], newquat); vec_rotate(&conf->particle[current].patchsides[1], newquat); vec_rotate(&conf->particle[current].patchsides[2], newquat); vec_rotate(&conf->particle[current].patchsides[3], newquat); //sclae back conf->particle[current].pos.x /= conf->box.x; conf->particle[current].pos.y /= conf->box.y; conf->particle[current].pos.z /= conf->box.z; //shift positions back conf->particle[current].pos.x += gc.x; conf->particle[current].pos.y += gc.y; conf->particle[current].pos.z += gc.z; i++; current = topo->chainlist[target][i]; } } /* put the particle in the original box using periodic boundary conditions in our system the particle positions are scaled by box size so to get them into original obx is to get htem between 0 and 1 and then scale this back by size of box*/ void origbox(struct vector *pos,struct vector box) { double anint(double); (*pos).x = box.x * ((*pos).x - anint((*pos).x)); (*pos).y = box.y * ((*pos).y - anint((*pos).y)); (*pos).z = box.z * ((*pos).z - anint((*pos).z)); } /* use of periodic boundary conditions*/ void usepbc(struct vector *pos,struct vector pbc) { do { (*pos).x += pbc.x; } while ((*pos).x < 0.0); do { (*pos).x -= pbc.x; } while ((*pos).x > pbc.x); do { (*pos).y += pbc.y; } while ((*pos).y < 0.0); do { (*pos).y -= pbc.y; } while ((*pos).y > pbc.y); do { (*pos).z += pbc.z; } while ((*pos).z < 0.0); do { (*pos).z -= pbc.z; } while ((*pos).z > pbc.z); } /*..............................................................................*/ /*.......................TEMPLATE FILES.........................................*/ /*..............................................................................*/ /* # Template for the "options" file. Options start with an '#'. # Pressure couplings: # 0 = anisotropic coupling, 1 = isotropic coupling, 2 = isotropic in xy z=const, 3 = isotropic # xy and keep Volume constant # Wang-Landau method: (with constant decrease of bias addition by factor of 2, until less than WL_ALPHATOL) # O = none, 1 = z-direction of 1st paticle, 2 = hole in xyplane, 3 = z-orientation of 0th particle # 4 = distance of first two particles, 5 = pore around z axis and above CM, 6 = pore around z axis and above 0th particle # 7 = number of particles in contact (within distance sqrt(WL_CONTACTS)) ptype = 1 # Pressure coupling type (0-anisotropic xyz, 1-isotropic xyz, 2 - isotropic in xy z=const, 3 - isotropic in xy and V=const) press = 1 # Pressure paralpress = 1 # Parallel pressure for replica exchange shave = 0 # Average number of volume change attempts per sweep (usually 1) nequil = 0 # Number of equilibration sweeps adjust = 0 # Number of equilibration sweeps between step size adjustments nsweeps = 1000000 # Number of production sweeps paramfrq = 1000000 # Number of sweeps between order parameter samples report = 1000000 # Number of sweeps between statistics reports nrepchange = 1000 # Number of sweeps between replica exchanges movie = 100000 # Number of sweeps between movie frames (0 = no movie) chainprob = 0.0 # Probability of chain move attempts per sweep ( 0.25/number of particles in chain) transmx = 0.212 # Initial maximum displacement rotmx = 7.5 # Initial maximum orientation change (degrees) edge_mx = 0.0 # Initial maximum box length change chainmmx = 0.0 # Initial maximum chain displacement chainrmx = 0.0 # Initial maximum chain rotation change (degrees) temper = 1.0 # Temperature in units kT/e paraltemper = 1.5 # Temperature for parallel tempering in kT/e wlm = 0 # Wang-Landau method wlmtype = 0 # For which atomic type (from top.init) should the Wang-Landau method be calculated? switchprob = 0.0016 # Probability of type switch attempts per sweep pairlist_update = 8 # Number of sweeps after which the pairlist should be updated seed = 1 # Random number seed write_cluster = 10000 # Number of sweeps per writing out cluster info # End of the file */ /* Example of 'Config.init' file, but you must delete comments... there are only number in configuration file #box 10.0 10.0 10.0 #particles (x,y,z) (direction_x,direction_y, direction_z) (patchdirection_x,patchdirection_y,patchdirection_z) (switched) */ /* Template for the topology file 'top.init'. ( "\\" is symbol for line continue, "#" is symbol for comment, "[" is starting sign for keyword, "]" is ending sign for kyeword ) There are three keywords, types, molecules, and system. They should be given in this order. TYPES: spherocylinders SC - purely repulsive spherocylinder with WCA potential on closest distance SCA - isotropic cos^2 potential is acting isotropicaly dependent only on closest distance between spherocylinders.. PSC - Attractive potential in limited to an angular wedge on spherocylinder. Patch goes all the way through, making also hemispherical caps on end attractive CPSC - Attractive potential in limited to an angular wedge on cylindrical part of spherocylinders. The hemispherical caps on ends are repulsive spheres (T)(CH)PSC - T adds second patch, CH - adds chirality SP - purely repulsive shpere with WCA potential on closest distance SPA - isotropic cos^2 potential is acting isotropicaly dependent only on closest distance between obejcts [Types] # NAME NUMBER GEOTYPE EPSILON SIGMA ATTRACTION_DIST ATTRACTION_SWITCH PATCH_ANGLE PATCH_SWITCH SC_LENGTH (Optional second patch: PATCH_ROTATION PATCH_ANGLE PATCH_SWITCH )CHIRAL_ANGLE Prot1 1 PSC 1 1.2 1.346954458 1.0 80.0 5.0 3 Prot2 2 PSC 1 1.2 1.346954458 1.0 170.0 5.0 3 Prot3 3 CHCPSC 1 1.2 1.346954458 1.0 170.0 5.0 3 10 Prot4 4 TCHCPSC 1 1.2 1.346954458 1.0 170.0 5.0 3 90.0 90.0 5.0 10 [Molecules] # Molecules letter # bond1 - harmonic bond between nearest neighbours (end points for spherocylinders) (first constant then eq distance) # bond2 - harmonic bond between second nearest neighbours (their center of mass) (first constant then eq distance) # bondd - directional harmonic bond between nearest neighbours (end point of the second spherocylinder is attached to the point of bondlength extension of the first spherocylinder) (first constant then eq distance) # angle1 - angle between two spherocylinders -nearest neighbours (first constant then eq degrees 0-180.0) # angle2 - angle between two spherocylinder patches -nearest neighbours (first constant then eq degrees 0-180.0) # particles - types as they go in chain in molecule A: { #what: TYPE SWITCHTYPE DELTA_MU particles: 1 2 0.5 particles: 2 } B: { particles: 1 particles: 2 1 0.3 } [System] A 2 B 2 [EXTER] # wall interaction # THICKNESS EPSILON ATTRACTION_SWITCH 5.0 1.0 1.0 [EXCLUDE] #set pair types for which attraction will be excluded (reversepair is automaticaly added) 1 2 1 3 */
/* Find closest distance between line segment and point and return it as vector (from point to closest segment point) Function gets orientation and length of line segments and the vector connecting their center os masses (from segment to point) */ struct vector mindist_segmentpoint(struct vector dir1, double length, struct vector r_cm) { struct vector vec; double c,d,halfl; halfl=length*0.5; c = DOT(dir1,r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } vec.x = - r_cm.x + dir1.x * d; vec.y = - r_cm.y + dir1.y * d; vec.z = - r_cm.z + dir1.z * d; return vec; } /*..............................................................................*/ /* Determines whether two particles overlap. Returns 1 if there is an overlap, 0 if not. */ int overlap(struct particles part1, struct particles part2, struct vector box, struct ia_param ia_params[MAXT][MAXT]) { double b, c, d, e, f; /* Coefficients in distance quadratic */ double boundary; /* Half length of central boundary zone of quadratic */ double det; double halfl; /* Half length of cylinder */ double s0, t0; /* det times location of min separation of infinite lines */ double ss, tt; /* Location of min separation of line segments */ struct vector r_cm; /* Vector between centres of mass */ double dist; /* Distance between particles*/ struct vector distvec; /* Distance vector between particles*/ double linemin(double, double); struct vector image(struct vector, struct vector, struct vector); r_cm = image(part1.pos, part2.pos, box); if ((part1.type >= SP) && (part2.type >= SP)) { /*we have two spheres - most common, do nothing*/ dist=sqrt(DOT(r_cm,r_cm)); } else { if ((ia_params[part1.type][part2.type].geotype[0] < SP) && (ia_params[part1.type][part2.type].geotype[1] < SP)) { /*we have two spherocylinders*/ /*finding closes contact between them*/ b = -DOT(part1.dir, part2.dir); d = DOT(part1.dir, r_cm); e = -DOT(part2.dir, r_cm); f = DOT(r_cm, r_cm); det = 1.0 - b*b; //halfl = length / 2.0; // Just take the mean halfl = ia_params[part1.type][part2.type].half_len[0] = ia_params[part1.type][part2.type].half_len[1]; halfl /= 2; boundary = det * halfl; /* Location of smallest separation of the infinite lines */ s0 = b*e - d; t0 = b*d - e; /* Location of smallest separation of line segments */ if (s0 >= boundary) { if (t0 >= boundary) { /* Region 2 */ if ( d + halfl + halfl*b < 0.0 ) { ss = halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = halfl; ss = linemin( -tt*b - d, halfl ); } } else if (t0 >= -boundary) { /* Region 1 */ ss = halfl; tt = linemin( -ss*b - e, halfl ); } else { /* Region 8 */ if ( d + halfl - halfl*b < 0.0 ) { ss = halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = -halfl; ss = linemin( -tt*b - d, halfl ); } } } else if (s0 >= -boundary) { if (t0 >= boundary) { /* Region 3 */ tt = halfl; ss = linemin( -tt*b - d, halfl ); } else if (t0 >= -boundary) { /* Region 0 */ ss = s0/det; tt = t0/det; } else { /* Region 7 */ tt = -halfl; ss = linemin( -tt*b - d, halfl ); } } else { if (t0 >= boundary) { /* Region 4 */ if ( d - halfl + halfl*b > 0.0 ) { ss = -halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = halfl; ss = linemin( -tt*b - d, halfl ); } } else if (t0 >= -boundary) { /* Region 5 */ ss = -halfl; tt = linemin( -ss*b - e, halfl ); } else { /* Region 6 */ if ( d - halfl - halfl*b > 0.0 ) { ss = -halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = -halfl; ss = linemin( -tt*b - d, halfl ); } } } /*ss snd tt are Location of min separation of line segments */ dist=sqrt(f + ss*ss + tt*tt + 2.0*(ss*d + tt*e + ss*tt*b)); } else { if (ia_params[part1.type][part2.type].geotype[0] < SP) { /*We have one spherocylinder -it is first one*/ //halfl=length/2;/*finding closest vector from sphyrocylinder to sphere*/ halfl=ia_params[part1.type][part2.type].half_len[0];/*finding closest vector from sphyrocylinder to sphere*/ c = DOT(part1.dir,r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } distvec.x = - r_cm.x + part1.dir.x * d; distvec.y = - r_cm.y + part1.dir.y * d; distvec.z = - r_cm.z + part1.dir.z * d; dist=sqrt(DOT(distvec,distvec)); } else { /*lst option first one is sphere second one spherocylinder*/ //halfl=length/2; /*finding closest vector from sphyrocylinder to sphere*/ halfl=ia_params[part1.type][part2.type].half_len[1];/*finding closest vector from sphyrocylinder to sphere*/ c = DOT(part2.dir,r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } distvec.x = r_cm.x - part2.dir.x * d; distvec.y = r_cm.y - part2.dir.y * d; distvec.z = r_cm.z - part2.dir.z * d; dist=sqrt(DOT(distvec,distvec)); } } } /* Overlap exists if smallest separation is less than diameter of cylinder */ if (dist < ia_params[part1.type][part2.type].sigma*0.5 ) { return 1; } else { return 0; } } /*..............................................................................*/ double linemin(double criterion, double halfl) { if (criterion >= halfl) { return halfl; } else if (criterion >= -halfl) { return criterion; } else { return -halfl; } } /*..............................................................................*/ /*........................SOME USEFUL MATH......................................*/ /*..............................................................................*/ /* ran2 from Numerical Recipes. */ #define IM1 2147483563 #define IM2 2147483399 #define AM (1.0/IM1) #define IMM1 (IM1-1) #define IA1 40014 #define IA2 40692 #define IQ1 53668 #define IQ2 52774 #define IR1 12211 #define IR2 3791 #define NTAB 32 #define NDIV (1+IMM1/NTAB) #define EPS 1.2e-7 #define RNMX (1.0-EPS) double ran2(long *idum) { int j; long k; static long idum2=123456789; static long iy=0; static long iv[NTAB]; double temp; if (*idum <= 0) { if (-(*idum) < 1) *idum=1; else *idum = -(*idum); idum2=(*idum); for (j=NTAB+7;j>=0;j--) { k=(*idum)/IQ1; *idum=IA1*(*idum-k*IQ1)-k*IR1; if (*idum < 0) *idum += IM1; if (j < NTAB) iv[j] = *idum; } iy=iv[0]; } k=(*idum)/IQ1; *idum=IA1*(*idum-k*IQ1)-k*IR1; if (*idum < 0) *idum += IM1; k=idum2/IQ2; idum2=IA2*(idum2-k*IQ2)-k*IR2; if (idum2 < 0) idum2 += IM2; j=iy/NDIV; iy=iv[j]-idum2; iv[j] = *idum; if (iy < 1) iy += IMM1; if ((temp=AM*iy) > RNMX) return RNMX; else return temp; } #undef IM1 #undef IM2 #undef AM #undef IMM1 #undef IA1 #undef IA2 #undef IQ1 #undef IQ2 #undef IR1 #undef IR2 #undef NTAB #undef NDIV #undef EPS #undef RNMX /*..............................................................................*/ /* From Numerical Recipes. Simplified to deal specifically with 3*3 matrices (stored as elements [1...3][1...3] or a 4*4 array). */ void tred2(double a[4][4], double d[4], double e[4]) { int l, k, j, i; double scale, hh, h, g, f; for (i=3; i>=2; i--) { l=i-1; h=scale=0.0; if (l > 1) { for (k=1;k<=l;k++) scale += fabs(a[i][k]); if (scale == 0.0) e[i]=a[i][l]; else { for (k=1;k<=l;k++) { a[i][k] /= scale; h += a[i][k]*a[i][k]; } f=a[i][l]; g=(f >= 0.0 ? -sqrt(h) : sqrt(h)); e[i]=scale*g; h -= f*g; a[i][l]=f-g; f=0.0; for (j=1;j<=l;j++) { /* a[j][i]=a[i][j]/h; */ g=0.0; for (k=1;k<=j;k++) g += a[j][k]*a[i][k]; for (k=j+1;k<=l;k++) g += a[k][j]*a[i][k]; e[j]=g/h; f += e[j]*a[i][j]; } hh=f/(h+h); for (j=1;j<=l;j++) { f=a[i][j]; e[j]=g=e[j]-hh*f; for (k=1;k<=j;k++) a[j][k] -= (f*e[k]+g*a[i][k]); } } } else e[i]=a[i][l]; d[i]=h; } /* d[1]=0.0; */ e[1]=0.0; for (i=1; i<=3; i++) { /* l=i-1; if (d[i]) { for (j=1;j<=l;j++) { g=0.0; for (k=1;k<=l;k++) g += a[i][k]*a[k][j]; for (k=1;k<=l;k++) a[k][j] -= g*a[k][i]; } } */ d[i]=a[i][i]; /* a[i][i]=1.0; for (j=1;j<=l;j++) a[j][i]=a[i][j]=0.0; */ } } /*..............................................................................*/ /* From Numerical Recipes. Simplified to deal specifically with 3*3 matrices (stored as elements [1...3][1...3] or a 4*4 array). */ #define NRANSI #define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a)) void tqli(double d[4], double e[4]) { double pythag(double a, double b); int m, l, iter, i; /* int k; */ double s, r, p, g, f, dd, c, b; for (i=2; i<=3; i++) e[i-1] = e[i]; e[3] = 0.0; for (l=1; l<=3; l++) { iter = 0; do { for (m=l; m<=3-1; m++) { dd = fabs(d[m]) + fabs(d[m+1]); if ((double)(fabs(e[m])+dd) == dd) break; } if (m != l) { if (iter++ == 30) { fprintf(stderr, "Too many iterations in tqli\n"); exit (2); } g = (d[l+1] - d[l]) / (2.0*e[l]); r = pythag(g, 1.0); g = d[m] - d[l] + e[l] / (g + SIGN(r,g)); s = c = 1.0; p = 0.0; for (i=m-1; i>=l; i--) { f = s * e[i]; b = c * e[i]; e[i+1] = (r=pythag(f,g)); if (r == 0.0) { d[i+1] -= p; e[m] = 0.0; break; } s = f/r; c = g/r; g = d[i+1] - p; r = (d[i] - g)*s + 2.0*c*b; d[i+1] = g+(p=s*r); g = c*r - b; /* for (k=1; k<=3; k++) { f = z[k][i+1]; z[k][i+1] = s*z[k][i]+c*f; z[k][i] = c*z[k][i]i - s*f; } */ } if (r == 0.0 && i >= l) continue; d[l] -= p; e[l] = g; e[m] = 0.0; } } while (m != l); } } #undef NRANSI /*..............................................................................*/ /* From Numerical Recipes. Used by tqli. */ #define NRANSI static double sqrarg; #define SQR(a) ((sqrarg=(a)) == 0.0 ? 0.0 : sqrarg*sqrarg) double pythag(double a, double b) { double absa, absb; absa = fabs(a); absb = fabs(b); if (absa > absb) return absa*sqrt(1.0+SQR(absb/absa)); else return (absb == 0.0 ? 0.0 : absb*sqrt(1.0+SQR(absa/absb))); } #undef NRANSI /*..............................................................................*/ /* Normalise a vector to have unit length. For speed during heavy use, it is not checked that the supplied vector has non-zero length. */ void normalise(struct vector *u) { double tot; tot = sqrt( DOT(*u,*u) ); if (tot !=0.0) { tot=1/tot; (*u).x *= tot; (*u).y *= tot; (*u).z *= tot; } } /* Returns the vector pointing from the centre of mass of particle 2 to the centre of mass of the closest image of particle 1. */ struct vector image(struct vector r1, struct vector r2, struct vector box) { struct vector r12; double anint(double); r12.x = r1.x - r2.x; r12.y = r1.y - r2.y; r12.z = r1.z - r2.z; r12.x = box.x * (r12.x - anint(r12.x)); r12.y = box.y * (r12.y - anint(r12.y)); r12.z = box.z * (r12.z - anint(r12.z)); return r12; } /* Returns the nearest integer to its argument as a double precision number. e.g. anint(-0.49) = 0.0 and anint(-0.51) = -1.0. Equivalent to the Fortran intrinsic ANINT. */ double anint(double arg) { if (arg < 0) { return (double)( (long)(arg-0.5) ); } else { return (double)( (long)(arg+0.5) ); } } /*..............................................................................*/ /* Returns an evenly distributed random unit vector of unit length. See Allen & Tildesley p349 or Frenkel & Smit p410. RANDOM VECTOR ON UNIT SPHERE */ struct vector ranvec(void) { double a, b, xi1, xi2; struct vector unit; double ran2(long *); do { xi1 = 1.0 - 2.0*ran2(&seed); xi2 = 1.0 - 2.0*ran2(&seed); a = xi1*xi1 + xi2*xi2; } while (a > 1.0); b = 2.0 * sqrt(1.0 - a); unit.x = xi1 * b; unit.y = xi2 * b; unit.z = 1.0 - 2.0*a; return unit; } /** * returns a point randomly and evenly distributed inside of a unit sphere */ struct vector ranvecsph(void) { struct vector ranvec; double ran2(long *); do{ ranvec.x = 2 * ran2(&seed) - 1.0; ranvec.y = 2 * ran2(&seed) - 1.0; ranvec.z = 2 * ran2(&seed) - 1.0; } while(ranvec.x*ranvec.x + ranvec.y*ranvec.y + ranvec.z*ranvec.z >= 1); //printf("%lf\t%lf\t%lf\n", ranvec.x,ranvec.y,ranvec.z); return ranvec; } /**** some useful math *******/ struct vector vec_create(double x, double y, double z) { struct vector newvec; newvec.x=x; newvec.y=y; newvec.z=z; return newvec; } struct vector vec_createarr(double a[3]) { struct vector newvec; newvec.x=a[0]; newvec.y=a[1]; newvec.z=a[2]; return newvec; } double vec_dotproduct(struct vector A,struct vector B) { double dp; dp = A.x*B.x + A.y*B.y + A.z*B.z; return dp; } /* vector projection of vector A to direction of B*/ struct vector vec_project(struct vector* A,struct vector* B) { double dp; struct vector pr; dp = A->x*B->x + A->y*B->y + A->z*B->z; pr.x=B->x*dp; pr.y=B->y*dp; pr.z=B->z*dp; return pr; } void ortogonalise(struct vector *A, struct vector B) { double dp; double vec_dotproduct(struct vector A,struct vector B); dp=vec_dotproduct(*A,B); (*A).x -= B.x * dp; (*A).y -= B.y * dp; (*A).z -= B.z * dp; } /* vector projection of vector A perpendicular to direction of B*/ struct vector vec_perpproject(struct vector *A,struct vector *B) { struct vector pp; double dp; struct vector vec_project(struct vector *, struct vector*); dp=DOT((*A),(*B)); pp.x = A->x - B->x*dp; pp.y = A->y - B->y*dp; pp.z = A->z - B->z*dp; // fprintf (stderr, "pp x: %.8f y: %.8f z: %.8f \n",pp.x,pp.y,pp.z); return pp; } /* returns a vector perpendicular to A nothing special about the vector except that it's one of the perpendicular options and is normalized */ struct vector vec_perp(struct vector A) { double ratio,x,y; struct vector somevector; struct vector vec_create(double, double, double); struct vector vec_normalize(struct vector); void normalise(struct vector *); struct vector vec_crossproduct(struct vector, struct vector); x=A.x; y=A.y; if (x == 0) x=1; else { if (y == 0) y=1; else { ratio=y/x; y=x*ratio*2; } } somevector= vec_create(x, y, A.z); normalise(&somevector); return vec_crossproduct(A,somevector); } /* Perform the multiplication of a matrix A and a vector B where A is the first argument and B is the second argument. The routine will return AxB*/ struct vector matrix_vec_multiply(double A[3][3],struct vector B) { int i; double vecarr[3]; struct vector AB,RA; struct vector vec_createarr(double[3]); double vec_dotproduct(struct vector,struct vector); for (i=0;i<3;i++) { /* index the row vector from A*/ RA=vec_createarr(A[i]); /* Now find the dot product of this row with B*/ vecarr[i]=vec_dotproduct(RA,B); } AB=vec_createarr(vecarr); return AB; } /* Distance between two vectors*/ double vec_distance(struct vector vec1,struct vector vec2) { double sum; sum= (vec1.x-vec2.x)*(vec1.x-vec2.x)+(vec1.y-vec2.y)*(vec1.y-vec2.y)+(vec1.z-vec2.z)*(vec1.z-vec2.z); return pow(sum,0.5); } /* Vector size */ double vec_size(struct vector vec) { double size; size=sqrt(vec.x*vec.x+ vec.y*vec.y+ vec.z*vec.z); return size; } /* Normalize a vector*/ struct vector vec_normalize(struct vector vec) { double mag; struct vector newvec; double vec_size(struct vector); mag= vec_size (vec); mag=1/mag; newvec.x=vec.x*mag; newvec.y=vec.y*mag; newvec.z=vec.z*mag; return newvec; } /* Scale a vector */ struct vector vec_scale(struct vector vec, double scale) { vec.x=vec.x*scale; vec.y=vec.y*scale; vec.z=vec.z*scale; return vec; } /* cross_product*/ struct vector vec_crossproduct(struct vector A,struct vector B) { struct vector cp; cp.x=( A.y*B.z - A.z*B.y); cp.y=( -A.x*B.z + A.z*B.x); cp.z=( A.x*B.y - A.y*B.x); return cp; } /* addition of vectors*/ inline struct vector vec_sum(struct vector A,struct vector B) { struct vector C; C.x=(A.x + B.x); C.y=(A.y + B.y); C.z=(A.z + B.z); return C; } /* subtraction of vectors*/ inline struct vector vec_sub(struct vector A,struct vector B) { struct vector C; C.x=(A.x - B.x); C.y=(A.y - B.y); C.z=(A.z - B.z); return C; } /* asign vlues of vector A by values in vector B*/ inline void vec_asign(struct vector *A, struct vector B) { (*A).x=B.x; (*A).y=B.y; (*A).z=B.z; } /* generate random unit vector*/ struct vector vec_random(void) { struct vector newvec; struct vector ranvec(void); newvec=ranvec(); return newvec; } /*generate random unit quaternion*/ struct quat quat_random(void) { double cosv, sinv; struct quat newquat; struct vector newaxis; struct vector ranvec(void); /* generate quaternion for rotation*/ newaxis = ranvec(); /*random axes for rotation*/ cosv = cos(PIH * ran2(&seed) ); if (ran2(&seed) <0.5) sinv = sqrt(1.0 - cosv*cosv); else sinv = -sqrt(1.0 - cosv*cosv); newquat.w=cosv; newquat.x=newaxis.x*sinv; newquat.y=newaxis.y*sinv; newquat.z=newaxis.z*sinv; return newquat; } /* Create quaternion for rotation around vector "vec" of angle in degrees "angle" function need cos of half angle and its sin*/ struct quat quat_create(struct vector vec, double vc, double vs) { struct quat newquat; newquat.w=vc; newquat.x=vec.x*vs; newquat.y=vec.y*vs; newquat.z=vec.z*vs; return newquat; } /*rotate vector with quaternion*/ void vec_rotate(struct vector *vec, struct quat quat) { double t2,t3,t4,t5,t6,t7,t8,t9,t10,newx,newy,newz; /* t1 = quat.w * quat.w; */ t2 = quat.w * quat.x; t3 = quat.w * quat.y; t4 = quat.w * quat.z; t5 = -quat.x * quat.x; t6 = quat.x * quat.y; t7 = quat.x * quat.z; t8 = -quat.y * quat.y; t9 = quat.y * quat.z; t10 = -quat.z * quat.z; newx = 2.0 * ( (t8+t10)*(*vec).x + (t6-t4)*(*vec).y + (t3+t7)*(*vec).z ) + (*vec).x; newy = 2.0 * ( (t4+t6)*(*vec).x + (t5+t10)*(*vec).y + (t9-t2)*(*vec).z ) + (*vec).y; newz = 2.0 * ( (t7-t3)*(*vec).x + (t2+t9)*(*vec).y + (t5+t8)*(*vec).z ) + (*vec).z; (*vec).x = newx; (*vec).y = newy; (*vec).z = newz; } /* rotate spherocylinder by quaternion of random axis and angle smaller than maxcos(cosine of angle half), we do everything on site for speed */ void psc_rotate(struct particles *psc, double max_angle,int geotype) { double vc, vs, t2, t3, t4, t5, t6, t7, t8, t9, t10; double d1, d2, d3, d4, d5, d6, d7, d8, d9 , newx, newy, newz; int k,m; struct quat newquat; struct vector newaxis; struct vector ranvec(void); /* generate quaternion for rotation*/ newaxis = ranvec(); /*random axes for rotation*/ // maxcos = cos(maxorient/2/180*PI); // vc = maxcos + ran2(&seed)*(1-maxcos); /*cos of angle must be bigger than maxcos and smaller than one*/ vc = cos(max_angle * ran2(&seed) ); if (ran2(&seed) <0.5) vs = sqrt(1.0 - vc*vc); else vs = -sqrt(1.0 - vc*vc); /*randomly choose orientation of direction of rotation clockwise or counterclockwise*/ newquat.w=vc; newquat.x=newaxis.x*vs; newquat.y=newaxis.y*vs; newquat.z=newaxis.z*vs; /* do quaternion rotation*/ t2 = newquat.w * newquat.x; t3 = newquat.w * newquat.y; t4 = newquat.w * newquat.z; t5 = -newquat.x * newquat.x; t6 = newquat.x * newquat.y; t7 = newquat.x * newquat.z; t8 = -newquat.y * newquat.y; t9 = newquat.y * newquat.z; t10 = -newquat.z * newquat.z; d1 = t8 + t10; d2 = t6 - t4; d3 = t3 + t7; d4 = t4 + t6; d5 = t5 + t10; d6 = t9 - t2; d7 = t7 - t3; d8 = t2 + t9; d9 = t5 + t8; /*rotate spherocylinder direction vector*/ newx = 2.0 * ( d1*psc->dir.x + d2*psc->dir.y + d3*psc->dir.z ) + psc->dir.x; newy = 2.0 * ( d4*psc->dir.x + d5*psc->dir.y + d6*psc->dir.z ) + psc->dir.y; newz = 2.0 * ( d7*psc->dir.x + d8*psc->dir.y + d9*psc->dir.z ) + psc->dir.z; psc->dir.x = newx; psc->dir.y = newy; psc->dir.z = newz; m=1; if ( (geotype != SCN) && (geotype != SCA) ) { if ( (geotype == TPSC) || (geotype == TCPSC) || (geotype == TCHPSC) || (geotype == TCHCPSC) ) m=2; for (k=0;k<m;k++) { /*rotate patch direction vector*/ newx = 2.0 * ( d1*psc->patchdir[k].x + d2*psc->patchdir[k].y + d3*psc->patchdir[k].z ) + psc->patchdir[k].x; newy = 2.0 * ( d4*psc->patchdir[k].x + d5*psc->patchdir[k].y + d6*psc->patchdir[k].z ) + psc->patchdir[k].y; newz = 2.0 * ( d7*psc->patchdir[k].x + d8*psc->patchdir[k].y + d9*psc->patchdir[k].z ) + psc->patchdir[k].z; psc->patchdir[k].x = newx; psc->patchdir[k].y = newy; psc->patchdir[k].z = newz; /*rotate patch sides vectors*/ newx = 2.0 * ( d1*psc->patchsides[0+2*k].x + d2*psc->patchsides[0+2*k].y + d3*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].x; newy = 2.0 * ( d4*psc->patchsides[0+2*k].x + d5*psc->patchsides[0+2*k].y + d6*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].y; newz = 2.0 * ( d7*psc->patchsides[0+2*k].x + d8*psc->patchsides[0+2*k].y + d9*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].z; psc->patchsides[0+2*k].x = newx; psc->patchsides[0+2*k].y = newy; psc->patchsides[0+2*k].z = newz; newx = 2.0 * ( d1*psc->patchsides[1+2*k].x + d2*psc->patchsides[1+2*k].y + d3*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].x; newy = 2.0 * ( d4*psc->patchsides[1+2*k].x + d5*psc->patchsides[1+2*k].y + d6*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].y; newz = 2.0 * ( d7*psc->patchsides[1+2*k].x + d8*psc->patchsides[1+2*k].y + d9*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].z; psc->patchsides[1+2*k].x = newx; psc->patchsides[1+2*k].y = newy; psc->patchsides[1+2*k].z = newz; } } m=1; if ( (geotype == CHPSC) || (geotype == CHCPSC) || (geotype == TCHPSC) || (geotype == TCHCPSC) ) { if ( (geotype == TCHPSC) || (geotype == TCHCPSC) ) m=2; for (k=0;k<m;k++) { /*rotate chiral direction vector*/ newx = 2.0 * ( d1*psc->chdir[k].x + d2*psc->chdir[k].y + d3*psc->chdir[k].z ) + psc->chdir[k].x; newy = 2.0 * ( d4*psc->chdir[k].x + d5*psc->chdir[k].y + d6*psc->chdir[k].z ) + psc->chdir[k].y; newz = 2.0 * ( d7*psc->chdir[k].x + d8*psc->chdir[k].y + d9*psc->chdir[k].z ) + psc->chdir[k].z; psc->chdir[k].x = newx; psc->chdir[k].y = newy; psc->chdir[k].z = newz; } } } /*returns a position of center of mass of system*/ void masscenter(long npart, struct ia_param ia_params[MAXT][MAXT], struct conf * conf) { long i; double anint(double); conf->syscm.x = 0; conf->syscm.y = 0; conf->syscm.z = 0; for (i=0; i<npart; i++) { /*using periodic boundary conditions*/ conf->syscm.x += (conf->particle[i].pos.x - anint(conf->particle[i].pos.x) ) * ia_params[conf->particle[i].type][conf->particle[i].type].volume; conf->syscm.y += (conf->particle[i].pos.y - anint(conf->particle[i].pos.y) ) * ia_params[conf->particle[i].type][conf->particle[i].type].volume; conf->syscm.z += (conf->particle[i].pos.z - anint(conf->particle[i].pos.z) ) * ia_params[conf->particle[i].type][conf->particle[i].type].volume; } conf->syscm.x /= conf->sysvolume; conf->syscm.y /= conf->sysvolume; conf->syscm.z /= conf->sysvolume; return; } /* rotate cluster of particles by quaternion of random axis and angle smaller than maxcos(cosine of angle half), we do everything on site for speed */ void cluster_rotate(long target, struct vector gc, double max_angle, struct topo * topo, struct conf * conf) { long current,i; double vc,vs; //double quatsize; struct quat newquat; struct vector newaxis; struct vector ranvec(void); void vec_rotate(struct vector *, struct quat); // create rotation quaternion newaxis = ranvec(); /*random axes for rotation*/ // maxcos = cos(maxorient/2/180*PI); //vc = maxcos + ran2(&seed)*(1-maxcos); /*cos of angle must be bigger than maxcos and smaller than one*/ vc = cos(max_angle * ran2(&seed) ); if (ran2(&seed) <0.5) vs = sqrt(1.0 - vc*vc); else vs = -sqrt(1.0 - vc*vc); /*randomly choose orientation of direction of rotation clockwise or counterclockwise*/ newquat.w=vc; newquat.x=newaxis.x*vs; newquat.y=newaxis.y*vs; newquat.z=newaxis.z*vs; //quatsize=sqrt(newquat.w*newquat.w+newquat.x*newquat.x+newquat.y*newquat.y+newquat.z*newquat.z); //shift position to geometrical center i=0; current = topo->chainlist[target][0]; while (current >=0 ) { //shift position to geometrical center conf->particle[current].pos.x -= gc.x; conf->particle[current].pos.y -= gc.y; conf->particle[current].pos.z -= gc.z; //scale things by box not to have them distorted conf->particle[current].pos.x *= conf->box.x; conf->particle[current].pos.y *= conf->box.y; conf->particle[current].pos.z *= conf->box.z; //do rotation vec_rotate(&conf->particle[current].pos, newquat); vec_rotate(&conf->particle[current].dir, newquat); vec_rotate(&conf->particle[current].patchdir[0], newquat); vec_rotate(&conf->particle[current].patchdir[1], newquat); vec_rotate(&conf->particle[current].chdir[0], newquat); vec_rotate(&conf->particle[current].chdir[1], newquat); vec_rotate(&conf->particle[current].patchsides[0], newquat); vec_rotate(&conf->particle[current].patchsides[1], newquat); vec_rotate(&conf->particle[current].patchsides[2], newquat); vec_rotate(&conf->particle[current].patchsides[3], newquat); //sclae back conf->particle[current].pos.x /= conf->box.x; conf->particle[current].pos.y /= conf->box.y; conf->particle[current].pos.z /= conf->box.z; //shift positions back conf->particle[current].pos.x += gc.x; conf->particle[current].pos.y += gc.y; conf->particle[current].pos.z += gc.z; i++; current = topo->chainlist[target][i]; } } /* put the particle in the original box using periodic boundary conditions in our system the particle positions are scaled by box size so to get them into original obx is to get htem between 0 and 1 and then scale this back by size of box*/ void origbox(struct vector *pos,struct vector box) { double anint(double); (*pos).x = box.x * ((*pos).x - anint((*pos).x)); (*pos).y = box.y * ((*pos).y - anint((*pos).y)); (*pos).z = box.z * ((*pos).z - anint((*pos).z)); } /* use of periodic boundary conditions*/ void usepbc(struct vector *pos,struct vector pbc) { do { (*pos).x += pbc.x; } while ((*pos).x < 0.0); do { (*pos).x -= pbc.x; } while ((*pos).x > pbc.x); do { (*pos).y += pbc.y; } while ((*pos).y < 0.0); do { (*pos).y -= pbc.y; } while ((*pos).y > pbc.y); do { (*pos).z += pbc.z; } while ((*pos).z < 0.0); do { (*pos).z -= pbc.z; } while ((*pos).z > pbc.z); } /*..............................................................................*/ /*.......................TEMPLATE FILES.........................................*/ /*..............................................................................*/ /* # Template for the "options" file. Options start with an '#'. # Pressure couplings: # 0 = anisotropic coupling, 1 = isotropic coupling, 2 = isotropic in xy z=const, 3 = isotropic # xy and keep Volume constant # Wang-Landau method: (with constant decrease of bias addition by factor of 2, until less than WL_ALPHATOL) # O = none, 1 = z-direction of 1st paticle, 2 = hole in xyplane, 3 = z-orientation of 0th particle # 4 = distance of first two particles, 5 = pore around z axis and above CM, 6 = pore around z axis and above 0th particle # 7 = number of particles in contact (within distance sqrt(WL_CONTACTS)) ptype = 1 # Pressure coupling type (0-anisotropic xyz, 1-isotropic xyz, 2 - isotropic in xy z=const, 3 - isotropic in xy and V=const) press = 1 # Pressure paralpress = 1 # Parallel pressure for replica exchange shave = 0 # Average number of volume change attempts per sweep (usually 1) nequil = 0 # Number of equilibration sweeps adjust = 0 # Number of equilibration sweeps between step size adjustments nsweeps = 1000000 # Number of production sweeps paramfrq = 1000000 # Number of sweeps between order parameter samples report = 1000000 # Number of sweeps between statistics reports nrepchange = 1000 # Number of sweeps between replica exchanges movie = 100000 # Number of sweeps between movie frames (0 = no movie) chainprob = 0.0 # Probability of chain move attempts per sweep ( 0.25/number of particles in chain) transmx = 0.212 # Initial maximum displacement rotmx = 7.5 # Initial maximum orientation change (degrees) edge_mx = 0.0 # Initial maximum box length change chainmmx = 0.0 # Initial maximum chain displacement chainrmx = 0.0 # Initial maximum chain rotation change (degrees) temper = 1.0 # Temperature in units kT/e paraltemper = 1.5 # Temperature for parallel tempering in kT/e wlm = 0 # Wang-Landau method wlmtype = 0 # For which atomic type (from top.init) should the Wang-Landau method be calculated? switchprob = 0.0016 # Probability of type switch attempts per sweep pairlist_update = 8 # Number of sweeps after which the pairlist should be updated seed = 1 # Random number seed write_cluster = 10000 # Number of sweeps per writing out cluster info # End of the file */ /* Example of 'Config.init' file, but you must delete comments... there are only number in configuration file #box 10.0 10.0 10.0 #particles (x,y,z) (direction_x,direction_y, direction_z) (patchdirection_x,patchdirection_y,patchdirection_z) (switched) */ /* Template for the topology file 'top.init'. ( "\\" is symbol for line continue, "#" is symbol for comment, "[" is starting sign for keyword, "]" is ending sign for kyeword ) There are three keywords, types, molecules, and system. They should be given in this order. TYPES: spherocylinders SC - purely repulsive spherocylinder with WCA potential on closest distance SCA - isotropic cos^2 potential is acting isotropicaly dependent only on closest distance between spherocylinders.. PSC - Attractive potential in limited to an angular wedge on spherocylinder. Patch goes all the way through, making also hemispherical caps on end attractive CPSC - Attractive potential in limited to an angular wedge on cylindrical part of spherocylinders. The hemispherical caps on ends are repulsive spheres (T)(CH)PSC - T adds second patch, CH - adds chirality SP - purely repulsive shpere with WCA potential on closest distance SPA - isotropic cos^2 potential is acting isotropicaly dependent only on closest distance between obejcts [Types] # NAME NUMBER GEOTYPE EPSILON SIGMA ATTRACTION_DIST ATTRACTION_SWITCH PATCH_ANGLE PATCH_SWITCH SC_LENGTH (Optional second patch: PATCH_ROTATION PATCH_ANGLE PATCH_SWITCH )CHIRAL_ANGLE Prot1 1 PSC 1 1.2 1.346954458 1.0 80.0 5.0 3 Prot2 2 PSC 1 1.2 1.346954458 1.0 170.0 5.0 3 Prot3 3 CHCPSC 1 1.2 1.346954458 1.0 170.0 5.0 3 10 Prot4 4 TCHCPSC 1 1.2 1.346954458 1.0 170.0 5.0 3 90.0 90.0 5.0 10 [Molecules] # Molecules letter # bond1 - harmonic bond between nearest neighbours (end points for spherocylinders) (first constant then eq distance) # bond2 - harmonic bond between second nearest neighbours (their center of mass) (first constant then eq distance) # bondd - directional harmonic bond between nearest neighbours (end point of the second spherocylinder is attached to the point of bondlength extension of the first spherocylinder) (first constant then eq distance) # angle1 - angle between two spherocylinders -nearest neighbours (first constant then eq degrees 0-180.0) # angle2 - angle between two spherocylinder patches -nearest neighbours (first constant then eq degrees 0-180.0) # particles - types as they go in chain in molecule A: { #what: TYPE SWITCHTYPE DELTA_MU particles: 1 2 0.5 particles: 2 } B: { particles: 1 particles: 2 1 0.3 } [System] A 2 B 2 [EXTER] # wall interaction # THICKNESS EPSILON ATTRACTION_SWITCH 5.0 1.0 1.0 [EXCLUDE] #set pair types for which attraction will be excluded (reversepair is automaticaly added) 1 2 1 3 */
/* Find closest distance between line segment and point and return it as vector (from point to closest segment point) Function gets orientation and length of line segments and the vector connecting their center os masses (from segment to point) */ struct vector mindist_segmentpoint(struct vector dir1, double length, struct vector r_cm) { struct vector vec; double c,d,halfl; halfl=length*0.5; c = DOT(dir1,r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } vec.x = - r_cm.x + dir1.x * d; vec.y = - r_cm.y + dir1.y * d; vec.z = - r_cm.z + dir1.z * d; return vec; } /*..............................................................................*/ /* Determines whether two particles overlap. Returns 1 if there is an overlap, 0 if not. */ int overlap(struct particles part1, struct particles part2, struct vector box, struct ia_param ia_params[MAXT][MAXT]) { double b, c, d, e, f; /* Coefficients in distance quadratic */ double boundary; /* Half length of central boundary zone of quadratic */ double det; double halfl; /* Half length of cylinder */ double s0, t0; /* det times location of min separation of infinite lines */ double ss, tt; /* Location of min separation of line segments */ struct vector r_cm; /* Vector between centres of mass */ double dist; /* Distance between particles*/ struct vector distvec; /* Distance vector between particles*/ double linemin(double, double); struct vector image(struct vector, struct vector, struct vector); r_cm = image(part1.pos, part2.pos, box); if ((part1.type >= SP) && (part2.type >= SP)) { /*we have two spheres - most common, do nothing*/ dist=sqrt(DOT(r_cm,r_cm)); } else { if ((ia_params[part1.type][part2.type].geotype[0] < SP) && (ia_params[part1.type][part2.type].geotype[1] < SP)) { /*we have two spherocylinders*/ /*finding closes contact between them*/ b = -DOT(part1.dir, part2.dir); d = DOT(part1.dir, r_cm); e = -DOT(part2.dir, r_cm); f = DOT(r_cm, r_cm); det = 1.0 - b*b; //halfl = length / 2.0; // Just take the mean halfl = ia_params[part1.type][part2.type].half_len[0] = ia_params[part1.type][part2.type].half_len[1]; halfl /= 2; boundary = det * halfl; /* Location of smallest separation of the infinite lines */ s0 = b*e - d; t0 = b*d - e; /* Location of smallest separation of line segments */ if (s0 >= boundary) { if (t0 >= boundary) { /* Region 2 */ if ( d + halfl + halfl*b < 0.0 ) { ss = halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = halfl; ss = linemin( -tt*b - d, halfl ); } } else if (t0 >= -boundary) { /* Region 1 */ ss = halfl; tt = linemin( -ss*b - e, halfl ); } else { /* Region 8 */ if ( d + halfl - halfl*b < 0.0 ) { ss = halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = -halfl; ss = linemin( -tt*b - d, halfl ); } } } else if (s0 >= -boundary) { if (t0 >= boundary) { /* Region 3 */ tt = halfl; ss = linemin( -tt*b - d, halfl ); } else if (t0 >= -boundary) { /* Region 0 */ ss = s0/det; tt = t0/det; } else { /* Region 7 */ tt = -halfl; ss = linemin( -tt*b - d, halfl ); } } else { if (t0 >= boundary) { /* Region 4 */ if ( d - halfl + halfl*b > 0.0 ) { ss = -halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = halfl; ss = linemin( -tt*b - d, halfl ); } } else if (t0 >= -boundary) { /* Region 5 */ ss = -halfl; tt = linemin( -ss*b - e, halfl ); } else { /* Region 6 */ if ( d - halfl - halfl*b > 0.0 ) { ss = -halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = -halfl; ss = linemin( -tt*b - d, halfl ); } } } /*ss snd tt are Location of min separation of line segments */ dist=sqrt(f + ss*ss + tt*tt + 2.0*(ss*d + tt*e + ss*tt*b)); } else { if (ia_params[part1.type][part2.type].geotype[0] < SP) { /*We have one spherocylinder -it is first one*/ //halfl=length/2;/*finding closest vector from sphyrocylinder to sphere*/ halfl=ia_params[part1.type][part2.type].half_len[0];/*finding closest vector from sphyrocylinder to sphere*/ c = DOT(part1.dir,r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } distvec.x = - r_cm.x + part1.dir.x * d; distvec.y = - r_cm.y + part1.dir.y * d; distvec.z = - r_cm.z + part1.dir.z * d; dist=sqrt(DOT(distvec,distvec)); } else { /*lst option first one is sphere second one spherocylinder*/ //halfl=length/2; /*finding closest vector from sphyrocylinder to sphere*/ halfl=ia_params[part1.type][part2.type].half_len[1];/*finding closest vector from sphyrocylinder to sphere*/ c = DOT(part2.dir,r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } distvec.x = r_cm.x - part2.dir.x * d; distvec.y = r_cm.y - part2.dir.y * d; distvec.z = r_cm.z - part2.dir.z * d; dist=sqrt(DOT(distvec,distvec)); } } } /* Overlap exists if smallest separation is less than diameter of cylinder */ if (dist < ia_params[part1.type][part2.type].sigma*0.5 ) { return 1; } else { return 0; } } /*..............................................................................*/ double linemin(double criterion, double halfl) { if (criterion >= halfl) { return halfl; } else if (criterion >= -halfl) { return criterion; } else { return -halfl; } } /*..............................................................................*/ /*........................SOME USEFUL MATH......................................*/ /*..............................................................................*/ /* ran2 from Numerical Recipes. */ #define IM1 2147483563 #define IM2 2147483399 #define AM (1.0/IM1) #define IMM1 (IM1-1) #define IA1 40014 #define IA2 40692 #define IQ1 53668 #define IQ2 52774 #define IR1 12211 #define IR2 3791 #define NTAB 32 #define NDIV (1+IMM1/NTAB) #define EPS 1.2e-7 #define RNMX (1.0-EPS) double ran2(long *idum) { int j; long k; static long idum2=123456789; static long iy=0; static long iv[NTAB]; double temp; if (*idum <= 0) { if (-(*idum) < 1) *idum=1; else *idum = -(*idum); idum2=(*idum); for (j=NTAB+7;j>=0;j--) { k=(*idum)/IQ1; *idum=IA1*(*idum-k*IQ1)-k*IR1; if (*idum < 0) *idum += IM1; if (j < NTAB) iv[j] = *idum; } iy=iv[0]; } k=(*idum)/IQ1; *idum=IA1*(*idum-k*IQ1)-k*IR1; if (*idum < 0) *idum += IM1; k=idum2/IQ2; idum2=IA2*(idum2-k*IQ2)-k*IR2; if (idum2 < 0) idum2 += IM2; j=iy/NDIV; iy=iv[j]-idum2; iv[j] = *idum; if (iy < 1) iy += IMM1; if ((temp=AM*iy) > RNMX) return RNMX; else return temp; } #undef IM1 #undef IM2 #undef AM #undef IMM1 #undef IA1 #undef IA2 #undef IQ1 #undef IQ2 #undef IR1 #undef IR2 #undef NTAB #undef NDIV #undef EPS #undef RNMX /*..............................................................................*/ /* From Numerical Recipes. Simplified to deal specifically with 3*3 matrices (stored as elements [1...3][1...3] or a 4*4 array). */ void tred2(double a[4][4], double d[4], double e[4]) { int l, k, j, i; double scale, hh, h, g, f; for (i=3; i>=2; i--) { l=i-1; h=scale=0.0; if (l > 1) { for (k=1;k<=l;k++) scale += fabs(a[i][k]); if (scale == 0.0) e[i]=a[i][l]; else { for (k=1;k<=l;k++) { a[i][k] /= scale; h += a[i][k]*a[i][k]; } f=a[i][l]; g=(f >= 0.0 ? -sqrt(h) : sqrt(h)); e[i]=scale*g; h -= f*g; a[i][l]=f-g; f=0.0; for (j=1;j<=l;j++) { /* a[j][i]=a[i][j]/h; */ g=0.0; for (k=1;k<=j;k++) g += a[j][k]*a[i][k]; for (k=j+1;k<=l;k++) g += a[k][j]*a[i][k]; e[j]=g/h; f += e[j]*a[i][j]; } hh=f/(h+h); for (j=1;j<=l;j++) { f=a[i][j]; e[j]=g=e[j]-hh*f; for (k=1;k<=j;k++) a[j][k] -= (f*e[k]+g*a[i][k]); } } } else e[i]=a[i][l]; d[i]=h; } /* d[1]=0.0; */ e[1]=0.0; for (i=1; i<=3; i++) { /* l=i-1; if (d[i]) { for (j=1;j<=l;j++) { g=0.0; for (k=1;k<=l;k++) g += a[i][k]*a[k][j]; for (k=1;k<=l;k++) a[k][j] -= g*a[k][i]; } } */ d[i]=a[i][i]; /* a[i][i]=1.0; for (j=1;j<=l;j++) a[j][i]=a[i][j]=0.0; */ } } /*..............................................................................*/ /* From Numerical Recipes. Simplified to deal specifically with 3*3 matrices (stored as elements [1...3][1...3] or a 4*4 array). */ #define NRANSI #define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a)) void tqli(double d[4], double e[4]) { double pythag(double a, double b); int m, l, iter, i; /* int k; */ double s, r, p, g, f, dd, c, b; for (i=2; i<=3; i++) e[i-1] = e[i]; e[3] = 0.0; for (l=1; l<=3; l++) { iter = 0; do { for (m=l; m<=3-1; m++) { dd = fabs(d[m]) + fabs(d[m+1]); if ((double)(fabs(e[m])+dd) == dd) break; } if (m != l) { if (iter++ == 30) { fprintf(stderr, "Too many iterations in tqli\n"); exit (2); } g = (d[l+1] - d[l]) / (2.0*e[l]); r = pythag(g, 1.0); g = d[m] - d[l] + e[l] / (g + SIGN(r,g)); s = c = 1.0; p = 0.0; for (i=m-1; i>=l; i--) { f = s * e[i]; b = c * e[i]; e[i+1] = (r=pythag(f,g)); if (r == 0.0) { d[i+1] -= p; e[m] = 0.0; break; } s = f/r; c = g/r; g = d[i+1] - p; r = (d[i] - g)*s + 2.0*c*b; d[i+1] = g+(p=s*r); g = c*r - b; /* for (k=1; k<=3; k++) { f = z[k][i+1]; z[k][i+1] = s*z[k][i]+c*f; z[k][i] = c*z[k][i]i - s*f; } */ } if (r == 0.0 && i >= l) continue; d[l] -= p; e[l] = g; e[m] = 0.0; } } while (m != l); } } #undef NRANSI /*..............................................................................*/ /* From Numerical Recipes. Used by tqli. */ #define NRANSI static double sqrarg; #define SQR(a) ((sqrarg=(a)) == 0.0 ? 0.0 : sqrarg*sqrarg) double pythag(double a, double b) { double absa, absb; absa = fabs(a); absb = fabs(b); if (absa > absb) return absa*sqrt(1.0+SQR(absb/absa)); else return (absb == 0.0 ? 0.0 : absb*sqrt(1.0+SQR(absa/absb))); } #undef NRANSI /*..............................................................................*/ /* Normalise a vector to have unit length. For speed during heavy use, it is not checked that the supplied vector has non-zero length. */ void normalise(struct vector *u) { double tot; tot = sqrt( DOT(*u,*u) ); if (tot !=0.0) { tot=1/tot; (*u).x *= tot; (*u).y *= tot; (*u).z *= tot; } } /* Returns the vector pointing from the centre of mass of particle 2 to the centre of mass of the closest image of particle 1. */ struct vector image(struct vector r1, struct vector r2, struct vector box) { struct vector r12; double anint(double); r12.x = r1.x - r2.x; r12.y = r1.y - r2.y; r12.z = r1.z - r2.z; r12.x = box.x * (r12.x - anint(r12.x)); r12.y = box.y * (r12.y - anint(r12.y)); r12.z = box.z * (r12.z - anint(r12.z)); return r12; } /* Returns the nearest integer to its argument as a double precision number. e.g. anint(-0.49) = 0.0 and anint(-0.51) = -1.0. Equivalent to the Fortran intrinsic ANINT. */ double anint(double arg) { if (arg < 0) { return (double)( (long)(arg-0.5) ); } else { return (double)( (long)(arg+0.5) ); } } /*..............................................................................*/ /* Returns an evenly distributed random unit vector of unit length. See Allen & Tildesley p349 or Frenkel & Smit p410. RANDOM VECTOR ON UNIT SPHERE */ struct vector ranvec(void) { double a, b, xi1, xi2; struct vector unit; double ran2(long *); do { xi1 = 1.0 - 2.0*ran2(&seed); xi2 = 1.0 - 2.0*ran2(&seed); a = xi1*xi1 + xi2*xi2; } while (a > 1.0); b = 2.0 * sqrt(1.0 - a); unit.x = xi1 * b; unit.y = xi2 * b; unit.z = 1.0 - 2.0*a; return unit; } /** * returns a point randomly and evenly distributed inside of a unit sphere */ struct vector ranvecsph(void) { struct vector ranvec; double ran2(long *); do{ ranvec.x = 2 * ran2(&seed) - 1.0; ranvec.y = 2 * ran2(&seed) - 1.0; ranvec.z = 2 * ran2(&seed) - 1.0; } while(ranvec.x*ranvec.x + ranvec.y*ranvec.y + ranvec.z*ranvec.z >= 1); //printf("%lf\t%lf\t%lf\n", ranvec.x,ranvec.y,ranvec.z); return ranvec; } /**** some useful math *******/ struct vector vec_create(double x, double y, double z) { struct vector newvec; newvec.x=x; newvec.y=y; newvec.z=z; return newvec; } struct vector vec_createarr(double a[3]) { struct vector newvec; newvec.x=a[0]; newvec.y=a[1]; newvec.z=a[2]; return newvec; } double vec_dotproduct(struct vector A,struct vector B) { double dp; dp = A.x*B.x + A.y*B.y + A.z*B.z; return dp; } /* vector projection of vector A to direction of B*/ struct vector vec_project(struct vector* A,struct vector* B) { double dp; struct vector pr; dp = A->x*B->x + A->y*B->y + A->z*B->z; pr.x=B->x*dp; pr.y=B->y*dp; pr.z=B->z*dp; return pr; } void ortogonalise(struct vector *A, struct vector B) { double dp; double vec_dotproduct(struct vector A,struct vector B); dp=vec_dotproduct(*A,B); (*A).x -= B.x * dp; (*A).y -= B.y * dp; (*A).z -= B.z * dp; } /* vector projection of vector A perpendicular to direction of B*/ struct vector vec_perpproject(struct vector *A,struct vector *B) { struct vector pp; double dp; struct vector vec_project(struct vector *, struct vector*); dp=DOT((*A),(*B)); pp.x = A->x - B->x*dp; pp.y = A->y - B->y*dp; pp.z = A->z - B->z*dp; // fprintf (stderr, "pp x: %.8f y: %.8f z: %.8f \n",pp.x,pp.y,pp.z); return pp; } /* returns a vector perpendicular to A nothing special about the vector except that it's one of the perpendicular options and is normalized */ struct vector vec_perp(struct vector A) { double ratio,x,y; struct vector somevector; struct vector vec_create(double, double, double); struct vector vec_normalize(struct vector); void normalise(struct vector *); struct vector vec_crossproduct(struct vector, struct vector); x=A.x; y=A.y; if (x == 0) x=1; else { if (y == 0) y=1; else { ratio=y/x; y=x*ratio*2; } } somevector= vec_create(x, y, A.z); normalise(&somevector); return vec_crossproduct(A,somevector); } /* Perform the multiplication of a matrix A and a vector B where A is the first argument and B is the second argument. The routine will return AxB*/ struct vector matrix_vec_multiply(double A[3][3],struct vector B) { int i; double vecarr[3]; struct vector AB,RA; struct vector vec_createarr(double[3]); double vec_dotproduct(struct vector,struct vector); for (i=0;i<3;i++) { /* index the row vector from A*/ RA=vec_createarr(A[i]); /* Now find the dot product of this row with B*/ vecarr[i]=vec_dotproduct(RA,B); } AB=vec_createarr(vecarr); return AB; } /* Distance between two vectors*/ double vec_distance(struct vector vec1,struct vector vec2) { double sum; sum= (vec1.x-vec2.x)*(vec1.x-vec2.x)+(vec1.y-vec2.y)*(vec1.y-vec2.y)+(vec1.z-vec2.z)*(vec1.z-vec2.z); return pow(sum,0.5); } /* Vector size */ double vec_size(struct vector vec) { double size; size=sqrt(vec.x*vec.x+ vec.y*vec.y+ vec.z*vec.z); return size; } /* Normalize a vector*/ struct vector vec_normalize(struct vector vec) { double mag; struct vector newvec; double vec_size(struct vector); mag= vec_size (vec); mag=1/mag; newvec.x=vec.x*mag; newvec.y=vec.y*mag; newvec.z=vec.z*mag; return newvec; } /* Scale a vector */ struct vector vec_scale(struct vector vec, double scale) { vec.x=vec.x*scale; vec.y=vec.y*scale; vec.z=vec.z*scale; return vec; } /* cross_product*/ struct vector vec_crossproduct(struct vector A,struct vector B) { struct vector cp; cp.x=( A.y*B.z - A.z*B.y); cp.y=( -A.x*B.z + A.z*B.x); cp.z=( A.x*B.y - A.y*B.x); return cp; } /* addition of vectors*/ inline struct vector vec_sum(struct vector A,struct vector B) { struct vector C; C.x=(A.x + B.x); C.y=(A.y + B.y); C.z=(A.z + B.z); return C; } /* subtraction of vectors*/ inline struct vector vec_sub(struct vector A,struct vector B) { struct vector C; C.x=(A.x - B.x); C.y=(A.y - B.y); C.z=(A.z - B.z); return C; } /* asign vlues of vector A by values in vector B*/ inline void vec_asign(struct vector *A, struct vector B) { (*A).x=B.x; (*A).y=B.y; (*A).z=B.z; } /* generate random unit vector*/ struct vector vec_random(void) { struct vector newvec; struct vector ranvec(void); newvec=ranvec(); return newvec; } /*generate random unit quaternion*/ struct quat quat_random(void) { double cosv, sinv; struct quat newquat; struct vector newaxis; struct vector ranvec(void); /* generate quaternion for rotation*/ newaxis = ranvec(); /*random axes for rotation*/ cosv = cos(PIH * ran2(&seed) ); if (ran2(&seed) <0.5) sinv = sqrt(1.0 - cosv*cosv); else sinv = -sqrt(1.0 - cosv*cosv); newquat.w=cosv; newquat.x=newaxis.x*sinv; newquat.y=newaxis.y*sinv; newquat.z=newaxis.z*sinv; return newquat; } /* Create quaternion for rotation around vector "vec" of angle in degrees "angle" function need cos of half angle and its sin*/ struct quat quat_create(struct vector vec, double vc, double vs) { struct quat newquat; newquat.w=vc; newquat.x=vec.x*vs; newquat.y=vec.y*vs; newquat.z=vec.z*vs; return newquat; } /*rotate vector with quaternion*/ void vec_rotate(struct vector *vec, struct quat quat) { double t2,t3,t4,t5,t6,t7,t8,t9,t10,newx,newy,newz; /* t1 = quat.w * quat.w; */ t2 = quat.w * quat.x; t3 = quat.w * quat.y; t4 = quat.w * quat.z; t5 = -quat.x * quat.x; t6 = quat.x * quat.y; t7 = quat.x * quat.z; t8 = -quat.y * quat.y; t9 = quat.y * quat.z; t10 = -quat.z * quat.z; newx = 2.0 * ( (t8+t10)*(*vec).x + (t6-t4)*(*vec).y + (t3+t7)*(*vec).z ) + (*vec).x; newy = 2.0 * ( (t4+t6)*(*vec).x + (t5+t10)*(*vec).y + (t9-t2)*(*vec).z ) + (*vec).y; newz = 2.0 * ( (t7-t3)*(*vec).x + (t2+t9)*(*vec).y + (t5+t8)*(*vec).z ) + (*vec).z; (*vec).x = newx; (*vec).y = newy; (*vec).z = newz; } /* rotate spherocylinder by quaternion of random axis and angle smaller than maxcos(cosine of angle half), we do everything on site for speed */ void psc_rotate(struct particles *psc, double max_angle,int geotype) { double vc, vs, t2, t3, t4, t5, t6, t7, t8, t9, t10; double d1, d2, d3, d4, d5, d6, d7, d8, d9 , newx, newy, newz; int k,m; struct quat newquat; struct vector newaxis; struct vector ranvec(void); /* generate quaternion for rotation*/ newaxis = ranvec(); /*random axes for rotation*/ // maxcos = cos(maxorient/2/180*PI); // vc = maxcos + ran2(&seed)*(1-maxcos); /*cos of angle must be bigger than maxcos and smaller than one*/ vc = cos(max_angle * ran2(&seed) ); if (ran2(&seed) <0.5) vs = sqrt(1.0 - vc*vc); else vs = -sqrt(1.0 - vc*vc); /*randomly choose orientation of direction of rotation clockwise or counterclockwise*/ newquat.w=vc; newquat.x=newaxis.x*vs; newquat.y=newaxis.y*vs; newquat.z=newaxis.z*vs; /* do quaternion rotation*/ t2 = newquat.w * newquat.x; t3 = newquat.w * newquat.y; t4 = newquat.w * newquat.z; t5 = -newquat.x * newquat.x; t6 = newquat.x * newquat.y; t7 = newquat.x * newquat.z; t8 = -newquat.y * newquat.y; t9 = newquat.y * newquat.z; t10 = -newquat.z * newquat.z; d1 = t8 + t10; d2 = t6 - t4; d3 = t3 + t7; d4 = t4 + t6; d5 = t5 + t10; d6 = t9 - t2; d7 = t7 - t3; d8 = t2 + t9; d9 = t5 + t8; /*rotate spherocylinder direction vector*/ newx = 2.0 * ( d1*psc->dir.x + d2*psc->dir.y + d3*psc->dir.z ) + psc->dir.x; newy = 2.0 * ( d4*psc->dir.x + d5*psc->dir.y + d6*psc->dir.z ) + psc->dir.y; newz = 2.0 * ( d7*psc->dir.x + d8*psc->dir.y + d9*psc->dir.z ) + psc->dir.z; psc->dir.x = newx; psc->dir.y = newy; psc->dir.z = newz; m=1; if ( (geotype != SCN) && (geotype != SCA) ) { if ( (geotype == TPSC) || (geotype == TCPSC) || (geotype == TCHPSC) || (geotype == TCHCPSC) ) m=2; for (k=0;k<m;k++) { /*rotate patch direction vector*/ newx = 2.0 * ( d1*psc->patchdir[k].x + d2*psc->patchdir[k].y + d3*psc->patchdir[k].z ) + psc->patchdir[k].x; newy = 2.0 * ( d4*psc->patchdir[k].x + d5*psc->patchdir[k].y + d6*psc->patchdir[k].z ) + psc->patchdir[k].y; newz = 2.0 * ( d7*psc->patchdir[k].x + d8*psc->patchdir[k].y + d9*psc->patchdir[k].z ) + psc->patchdir[k].z; psc->patchdir[k].x = newx; psc->patchdir[k].y = newy; psc->patchdir[k].z = newz; /*rotate patch sides vectors*/ newx = 2.0 * ( d1*psc->patchsides[0+2*k].x + d2*psc->patchsides[0+2*k].y + d3*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].x; newy = 2.0 * ( d4*psc->patchsides[0+2*k].x + d5*psc->patchsides[0+2*k].y + d6*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].y; newz = 2.0 * ( d7*psc->patchsides[0+2*k].x + d8*psc->patchsides[0+2*k].y + d9*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].z; psc->patchsides[0+2*k].x = newx; psc->patchsides[0+2*k].y = newy; psc->patchsides[0+2*k].z = newz; newx = 2.0 * ( d1*psc->patchsides[1+2*k].x + d2*psc->patchsides[1+2*k].y + d3*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].x; newy = 2.0 * ( d4*psc->patchsides[1+2*k].x + d5*psc->patchsides[1+2*k].y + d6*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].y; newz = 2.0 * ( d7*psc->patchsides[1+2*k].x + d8*psc->patchsides[1+2*k].y + d9*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].z; psc->patchsides[1+2*k].x = newx; psc->patchsides[1+2*k].y = newy; psc->patchsides[1+2*k].z = newz; } } m=1; if ( (geotype == CHPSC) || (geotype == CHCPSC) || (geotype == TCHPSC) || (geotype == TCHCPSC) ) { if ( (geotype == TCHPSC) || (geotype == TCHCPSC) ) m=2; for (k=0;k<m;k++) { /*rotate chiral direction vector*/ newx = 2.0 * ( d1*psc->chdir[k].x + d2*psc->chdir[k].y + d3*psc->chdir[k].z ) + psc->chdir[k].x; newy = 2.0 * ( d4*psc->chdir[k].x + d5*psc->chdir[k].y + d6*psc->chdir[k].z ) + psc->chdir[k].y; newz = 2.0 * ( d7*psc->chdir[k].x + d8*psc->chdir[k].y + d9*psc->chdir[k].z ) + psc->chdir[k].z; psc->chdir[k].x = newx; psc->chdir[k].y = newy; psc->chdir[k].z = newz; } } } /*returns a position of center of mass of system*/ void masscenter(long npart, struct ia_param ia_params[MAXT][MAXT], struct conf * conf) { long i; double anint(double); conf->syscm.x = 0; conf->syscm.y = 0; conf->syscm.z = 0; for (i=0; i<npart; i++) { /*using periodic boundary conditions*/ conf->syscm.x += (conf->particle[i].pos.x - anint(conf->particle[i].pos.x) ) * ia_params[conf->particle[i].type][conf->particle[i].type].volume; conf->syscm.y += (conf->particle[i].pos.y - anint(conf->particle[i].pos.y) ) * ia_params[conf->particle[i].type][conf->particle[i].type].volume; conf->syscm.z += (conf->particle[i].pos.z - anint(conf->particle[i].pos.z) ) * ia_params[conf->particle[i].type][conf->particle[i].type].volume; } conf->syscm.x /= conf->sysvolume; conf->syscm.y /= conf->sysvolume; conf->syscm.z /= conf->sysvolume; return; } /* rotate cluster of particles by quaternion of random axis and angle smaller than maxcos(cosine of angle half), we do everything on site for speed */ void cluster_rotate(long target, struct vector gc, double max_angle, struct topo * topo, struct conf * conf) { long current,i; double vc,vs; //double quatsize; struct quat newquat; struct vector newaxis; struct vector ranvec(void); void vec_rotate(struct vector *, struct quat); // create rotation quaternion newaxis = ranvec(); /*random axes for rotation*/ // maxcos = cos(maxorient/2/180*PI); //vc = maxcos + ran2(&seed)*(1-maxcos); /*cos of angle must be bigger than maxcos and smaller than one*/ vc = cos(max_angle * ran2(&seed) ); if (ran2(&seed) <0.5) vs = sqrt(1.0 - vc*vc); else vs = -sqrt(1.0 - vc*vc); /*randomly choose orientation of direction of rotation clockwise or counterclockwise*/ newquat.w=vc; newquat.x=newaxis.x*vs; newquat.y=newaxis.y*vs; newquat.z=newaxis.z*vs; //quatsize=sqrt(newquat.w*newquat.w+newquat.x*newquat.x+newquat.y*newquat.y+newquat.z*newquat.z); //shift position to geometrical center i=0; current = topo->chainlist[target][0]; while (current >=0 ) { //shift position to geometrical center conf->particle[current].pos.x -= gc.x; conf->particle[current].pos.y -= gc.y; conf->particle[current].pos.z -= gc.z; //scale things by box not to have them distorted conf->particle[current].pos.x *= conf->box.x; conf->particle[current].pos.y *= conf->box.y; conf->particle[current].pos.z *= conf->box.z; //do rotation vec_rotate(&conf->particle[current].pos, newquat); vec_rotate(&conf->particle[current].dir, newquat); vec_rotate(&conf->particle[current].patchdir[0], newquat); vec_rotate(&conf->particle[current].patchdir[1], newquat); vec_rotate(&conf->particle[current].chdir[0], newquat); vec_rotate(&conf->particle[current].chdir[1], newquat); vec_rotate(&conf->particle[current].patchsides[0], newquat); vec_rotate(&conf->particle[current].patchsides[1], newquat); vec_rotate(&conf->particle[current].patchsides[2], newquat); vec_rotate(&conf->particle[current].patchsides[3], newquat); //sclae back conf->particle[current].pos.x /= conf->box.x; conf->particle[current].pos.y /= conf->box.y; conf->particle[current].pos.z /= conf->box.z; //shift positions back conf->particle[current].pos.x += gc.x; conf->particle[current].pos.y += gc.y; conf->particle[current].pos.z += gc.z; i++; current = topo->chainlist[target][i]; } } /* put the particle in the original box using periodic boundary conditions in our system the particle positions are scaled by box size so to get them into original obx is to get htem between 0 and 1 and then scale this back by size of box*/ void origbox(struct vector *pos,struct vector box) { double anint(double); (*pos).x = box.x * ((*pos).x - anint((*pos).x)); (*pos).y = box.y * ((*pos).y - anint((*pos).y)); (*pos).z = box.z * ((*pos).z - anint((*pos).z)); } /* use of periodic boundary conditions*/ void usepbc(struct vector *pos,struct vector pbc) { do { (*pos).x += pbc.x; } while ((*pos).x < 0.0); do { (*pos).x -= pbc.x; } while ((*pos).x > pbc.x); do { (*pos).y += pbc.y; } while ((*pos).y < 0.0); do { (*pos).y -= pbc.y; } while ((*pos).y > pbc.y); do { (*pos).z += pbc.z; } while ((*pos).z < 0.0); do { (*pos).z -= pbc.z; } while ((*pos).z > pbc.z); } /*..............................................................................*/ /*.......................TEMPLATE FILES.........................................*/ /*..............................................................................*/ /* # Template for the "options" file. Options start with an '#'. # Pressure couplings: # 0 = anisotropic coupling, 1 = isotropic coupling, 2 = isotropic in xy z=const, 3 = isotropic # xy and keep Volume constant # Wang-Landau method: (with constant decrease of bias addition by factor of 2, until less than WL_ALPHATOL) # O = none, 1 = z-direction of 1st paticle, 2 = hole in xyplane, 3 = z-orientation of 0th particle # 4 = distance of first two particles, 5 = pore around z axis and above CM, 6 = pore around z axis and above 0th particle # 7 = number of particles in contact (within distance sqrt(WL_CONTACTS)) ptype = 1 # Pressure coupling type (0-anisotropic xyz, 1-isotropic xyz, 2 - isotropic in xy z=const, 3 - isotropic in xy and V=const) press = 1 # Pressure paralpress = 1 # Parallel pressure for replica exchange shave = 0 # Average number of volume change attempts per sweep (usually 1) nequil = 0 # Number of equilibration sweeps adjust = 0 # Number of equilibration sweeps between step size adjustments nsweeps = 1000000 # Number of production sweeps paramfrq = 1000000 # Number of sweeps between order parameter samples report = 1000000 # Number of sweeps between statistics reports nrepchange = 1000 # Number of sweeps between replica exchanges movie = 100000 # Number of sweeps between movie frames (0 = no movie) chainprob = 0.0 # Probability of chain move attempts per sweep ( 0.25/number of particles in chain) transmx = 0.212 # Initial maximum displacement rotmx = 7.5 # Initial maximum orientation change (degrees) edge_mx = 0.0 # Initial maximum box length change chainmmx = 0.0 # Initial maximum chain displacement chainrmx = 0.0 # Initial maximum chain rotation change (degrees) temper = 1.0 # Temperature in units kT/e paraltemper = 1.5 # Temperature for parallel tempering in kT/e wlm = 0 # Wang-Landau method wlmtype = 0 # For which atomic type (from top.init) should the Wang-Landau method be calculated? switchprob = 0.0016 # Probability of type switch attempts per sweep pairlist_update = 8 # Number of sweeps after which the pairlist should be updated seed = 1 # Random number seed write_cluster = 10000 # Number of sweeps per writing out cluster info # End of the file */ /* Example of 'Config.init' file, but you must delete comments... there are only number in configuration file #box 10.0 10.0 10.0 #particles (x,y,z) (direction_x,direction_y, direction_z) (patchdirection_x,patchdirection_y,patchdirection_z) (switched) */ /* Template for the topology file 'top.init'. ( "\\" is symbol for line continue, "#" is symbol for comment, "[" is starting sign for keyword, "]" is ending sign for kyeword ) There are three keywords, types, molecules, and system. They should be given in this order. TYPES: spherocylinders SC - purely repulsive spherocylinder with WCA potential on closest distance SCA - isotropic cos^2 potential is acting isotropicaly dependent only on closest distance between spherocylinders.. PSC - Attractive potential in limited to an angular wedge on spherocylinder. Patch goes all the way through, making also hemispherical caps on end attractive CPSC - Attractive potential in limited to an angular wedge on cylindrical part of spherocylinders. The hemispherical caps on ends are repulsive spheres (T)(CH)PSC - T adds second patch, CH - adds chirality SP - purely repulsive shpere with WCA potential on closest distance SPA - isotropic cos^2 potential is acting isotropicaly dependent only on closest distance between obejcts [Types] # NAME NUMBER GEOTYPE EPSILON SIGMA ATTRACTION_DIST ATTRACTION_SWITCH PATCH_ANGLE PATCH_SWITCH SC_LENGTH (Optional second patch: PATCH_ROTATION PATCH_ANGLE PATCH_SWITCH )CHIRAL_ANGLE Prot1 1 PSC 1 1.2 1.346954458 1.0 80.0 5.0 3 Prot2 2 PSC 1 1.2 1.346954458 1.0 170.0 5.0 3 Prot3 3 CHCPSC 1 1.2 1.346954458 1.0 170.0 5.0 3 10 Prot4 4 TCHCPSC 1 1.2 1.346954458 1.0 170.0 5.0 3 90.0 90.0 5.0 10 [Molecules] # Molecules letter # bond1 - harmonic bond between nearest neighbours (end points for spherocylinders) (first constant then eq distance) # bond2 - harmonic bond between second nearest neighbours (their center of mass) (first constant then eq distance) # bondd - directional harmonic bond between nearest neighbours (end point of the second spherocylinder is attached to the point of bondlength extension of the first spherocylinder) (first constant then eq distance) # angle1 - angle between two spherocylinders -nearest neighbours (first constant then eq degrees 0-180.0) # angle2 - angle between two spherocylinder patches -nearest neighbours (first constant then eq degrees 0-180.0) # particles - types as they go in chain in molecule A: { #what: TYPE SWITCHTYPE DELTA_MU particles: 1 2 0.5 particles: 2 } B: { particles: 1 particles: 2 1 0.3 } [System] A 2 B 2 [EXTER] # wall interaction # THICKNESS EPSILON ATTRACTION_SWITCH 5.0 1.0 1.0 [EXCLUDE] #set pair types for which attraction will be excluded (reversepair is automaticaly added) 1 2 1 3 */
compare.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP AAA RRRR EEEEE % % C O O MM MM P P A A R R E % % C O O M M M PPPP AAAAA RRRR EEE % % C O O M M P A A R R E % % CCCC OOO M M P A A R R EEEEE % % % % % % MagickCore Image Comparison Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/statistic.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p a r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompareImages() compares one or more pixel channels of an image to a % reconstructed image and returns the difference image. % % The format of the CompareImages method is: % % Image *CompareImages(const Image *image,const Image *reconstruct_image, % const MetricType metric,double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static size_t GetImageChannels(const Image *image) { register ssize_t i; size_t channels; channels=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) != 0) channels++; } return(channels == 0 ? (size_t) 1 : channels); } MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image, const MetricType metric,double *distortion,ExceptionInfo *exception) { CacheView *highlight_view, *image_view, *reconstruct_view; const char *artifact; double fuzz; Image *clone_image, *difference_image, *highlight_image; MagickBooleanType status; PixelInfo highlight, lowlight, masklight; RectangleInfo geometry; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageDistortion(image,reconstruct_image,metric,distortion, exception); if (status == MagickFalse) return((Image *) NULL); columns=MagickMax(image->columns,reconstruct_image->columns); rows=MagickMax(image->rows,reconstruct_image->rows); SetGeometry(image,&geometry); geometry.width=columns; geometry.height=rows; clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); (void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception); difference_image=ExtentImage(clone_image,&geometry,exception); clone_image=DestroyImage(clone_image); if (difference_image == (Image *) NULL) return((Image *) NULL); (void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception); highlight_image=CloneImage(image,columns,rows,MagickTrue,exception); if (highlight_image == (Image *) NULL) { difference_image=DestroyImage(difference_image); return((Image *) NULL); } status=SetImageStorageClass(highlight_image,DirectClass,exception); if (status == MagickFalse) { difference_image=DestroyImage(difference_image); highlight_image=DestroyImage(highlight_image); return((Image *) NULL); } (void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception); (void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception); (void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception); artifact=GetImageArtifact(image,"compare:highlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception); (void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception); artifact=GetImageArtifact(image,"compare:lowlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception); (void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception); artifact=GetImageArtifact(image,"compare:masklight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception); /* Generate difference image. */ status=MagickTrue; fuzz=GetFuzzyColorDistance(image,reconstruct_image); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); highlight_view=AcquireAuthenticCacheView(highlight_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,highlight_image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p, *magick_restrict q; register Quantum *magick_restrict r; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) || (r == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; MagickStatusType difference; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { SetPixelViaPixelInfo(highlight_image,&masklight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); continue; } difference=MagickFalse; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q); else distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); if ((distance*distance) > fuzz) { difference=MagickTrue; break; } } if (difference == MagickFalse) SetPixelViaPixelInfo(highlight_image,&lowlight,r); else SetPixelViaPixelInfo(highlight_image,&highlight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); } sync=SyncCacheViewAuthenticPixels(highlight_view,exception); if (sync == MagickFalse) status=MagickFalse; } highlight_view=DestroyCacheView(highlight_view); reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); (void) CompositeImage(difference_image,highlight_image,image->compose, MagickTrue,0,0,exception); highlight_image=DestroyImage(highlight_image); if (status == MagickFalse) difference_image=DestroyImage(difference_image); return(difference_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortion() compares one or more pixel channels of an image to a % reconstructed image and returns the specified distortion metric. % % The format of the GetImageDistortion method is: % % MagickBooleanType GetImageDistortion(const Image *image, % const Image *reconstruct_image,const MetricType metric, % double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double fuzz; MagickBooleanType status; size_t columns, rows; ssize_t y; /* Compute the absolute difference in pixels between two images. */ status=MagickTrue; fuzz=(double) MagickMin(GetPixelChannels(image), GetPixelChannels(reconstruct_image))* GetFuzzyColorDistance(image,reconstruct_image); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, distance, Sa; MagickBooleanType difference; register ssize_t i; difference=MagickFalse; distance=0.0; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q); else pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); distance+=pixel*pixel; if (distance > fuzz) { channel_distortion[i]++; difference=MagickTrue; } } if (difference != MagickFalse) channel_distortion[CompositePixelChannel]++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetAbsoluteDistortion) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType GetFuzzDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image, channel,q)); else distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetFuzzDistortion) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=(double) GetImageChannels(image); distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]); return(status); } static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*fabs((double) p[i]- GetPixelChannel(reconstruct_image,channel,q)); else distance=QuantumScale*fabs(Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q)); channel_distortion[i]+=distance; channel_distortion[CompositePixelChannel]+=distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=(double) GetImageChannels(image); return(status); } static MagickBooleanType GetMeanErrorPerPixel(Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; double area, maximum_error, mean_error; size_t columns, rows; ssize_t y; status=MagickTrue; area=0.0; maximum_error=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=fabs((double) p[i]- GetPixelChannel(reconstruct_image,channel,q)); else distance=fabs(Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q)); distortion[i]+=distance; distortion[CompositePixelChannel]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area; image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area; image->error.normalized_maximum_error=QuantumScale*maximum_error; return(status); } static MagickBooleanType GetMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image, channel,q)); else distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanSquaredError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=GetImageChannels(image); return(status); } static MagickBooleanType GetNormalizedCrossCorrelationDistortion( const Image *image,const Image *reconstruct_image,double *distortion, ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *image_view, *reconstruct_view; ChannelStatistics *image_statistics, *reconstruct_statistics; double area; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t columns, rows; ssize_t y; /* Normalize to account for variation due to lighting and exposure condition. */ image_statistics=GetImageStatistics(image,exception); reconstruct_statistics=GetImageStatistics(reconstruct_image,exception); if ((image_statistics == (ChannelStatistics *) NULL) || (reconstruct_statistics == (ChannelStatistics *) NULL)) { if (image_statistics != (ChannelStatistics *) NULL) image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); if (reconstruct_statistics != (ChannelStatistics *) NULL) reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); return(MagickFalse); } status=MagickTrue; progress=0; for (i=0; i <= MaxPixelChannels; i++) distortion[i]=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } area=PerceptibleReciprocal(area); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) { distortion[i]+=area*QuantumScale*(p[i]- image_statistics[channel].mean)*(GetPixelChannel( reconstruct_image,channel,q)- reconstruct_statistics[channel].mean); } else { distortion[i]+=area*QuantumScale*(Sa*p[i]- image_statistics[channel].mean)*(Da*GetPixelChannel( reconstruct_image,channel,q)- reconstruct_statistics[channel].mean); } } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SimilarityImageTag,progress,rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); /* Divide by the standard deviation. */ distortion[CompositePixelChannel]=0.0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma; PixelChannel channel = GetPixelChannelChannel(image,i); gamma=image_statistics[channel].standard_deviation* reconstruct_statistics[channel].standard_deviation; gamma=PerceptibleReciprocal(gamma); distortion[i]=QuantumRange*gamma*distortion[i]; distortion[CompositePixelChannel]+=distortion[i]*distortion[i]; } distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/ GetImageChannels(image)); /* Free resources. */ reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); return(status); } static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*fabs((double) p[i]- GetPixelChannel(reconstruct_image,channel,q)); else distance=QuantumScale*fabs(Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q)); if (distance > channel_distortion[i]) channel_distortion[i]=distance; if (distance > channel_distortion[CompositePixelChannel]) channel_distortion[CompositePixelChannel]=distance; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPeakAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) if (channel_distortion[j] > distortion[j]) distortion[j]=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; register ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) if (fabs(distortion[i]) < MagickEpsilon) distortion[i]=INFINITY; else distortion[i]=10.0*MagickLog10(1.0)-10.0*MagickLog10(distortion[i]); return(status); } static MagickBooleanType GetPerceptualHashDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { ChannelPerceptualHash *channel_phash, *reconstruct_phash; const char *artifact; MagickBooleanType normalize; ssize_t channel; /* Compute perceptual hash in the sRGB colorspace. */ channel_phash=GetImagePerceptualHash(image,exception); if (channel_phash == (ChannelPerceptualHash *) NULL) return(MagickFalse); reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception); if (reconstruct_phash == (ChannelPerceptualHash *) NULL) { channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory( channel_phash); return(MagickFalse); } artifact=GetImageArtifact(image,"phash:normalize"); normalize=(artifact == (const char *) NULL) || (IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (channel=0; channel < MaxPixelChannels; channel++) { double difference; register ssize_t i; difference=0.0; for (i=0; i < MaximumNumberOfImageMoments; i++) { double alpha, beta; register ssize_t j; for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++) { alpha=channel_phash[channel].phash[j][i]; beta=reconstruct_phash[channel].phash[j][i]; if (normalize == MagickFalse) difference+=(beta-alpha)*(beta-alpha); else difference=sqrt((beta-alpha)*(beta-alpha)/ channel_phash[0].number_channels); } } distortion[channel]+=difference; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPerceptualHashDistortion) #endif distortion[CompositePixelChannel]+=difference; } /* Free resources. */ reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory( reconstruct_phash); channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash); return(MagickTrue); } static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; register ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) distortion[i]=sqrt(distortion[i]); return(status); } static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { #define SSIMRadius 5.0 #define SSIMSigma 1.5 #define SSIMBlocksize 8 #define SSIMK1 0.01 #define SSIMK2 0.03 #define SSIML 1.0 CacheView *image_view, *reconstruct_view; char geometry[MagickPathExtent]; const char *artifact; double c1, c2, radius, sigma; KernelInfo *kernel_info; MagickBooleanType status; register ssize_t i; size_t columns, rows; ssize_t y; /* Compute structural similarity index @ https://en.wikipedia.org/wiki/Structural_similarity. */ radius=SSIMRadius; artifact=GetImageArtifact(image,"compare:ssim-radius"); if (artifact != (const char *) NULL) radius=StringToDouble(artifact,(char **) NULL); sigma=SSIMSigma; artifact=GetImageArtifact(image,"compare:ssim-sigma"); if (artifact != (const char *) NULL) sigma=StringToDouble(artifact,(char **) NULL); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); c1=pow(SSIMK1*SSIML,2.0); artifact=GetImageArtifact(image,"compare:ssim-k1"); if (artifact != (const char *) NULL) c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0); c2=pow(SSIMK2*SSIML,2.0); artifact=GetImageArtifact(image,"compare:ssim-k2"); if (artifact != (const char *) NULL) c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0); status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,reconstruct_image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y- ((ssize_t) kernel_info->height/2L),columns+kernel_info->width, kernel_info->height,exception); q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/ 2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width, kernel_info->height,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double x_pixel_mu[MaxPixelChannels+1], x_pixel_sigma_squared[MaxPixelChannels+1], xy_sigma[MaxPixelChannels+1], y_pixel_mu[MaxPixelChannels+1], y_pixel_sigma_squared[MaxPixelChannels+1]; register const Quantum *magick_restrict reference, *magick_restrict target; register MagickRealType *k; ssize_t v; (void) memset(x_pixel_mu,0,sizeof(x_pixel_mu)); (void) memset(x_pixel_sigma_squared,0,sizeof(x_pixel_sigma_squared)); (void) memset(xy_sigma,0,sizeof(xy_sigma)); (void) memset(x_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared)); (void) memset(y_pixel_mu,0,sizeof(y_pixel_mu)); (void) memset(y_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared)); k=kernel_info->values; reference=p; target=q; for (v=0; v < (ssize_t) kernel_info->height; v++) { register ssize_t u; for (u=0; u < (ssize_t) kernel_info->width; u++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double x_pixel, y_pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits( reconstruct_image,channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; x_pixel=QuantumScale*reference[i]; x_pixel_mu[i]+=(*k)*x_pixel; x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel; y_pixel=QuantumScale* GetPixelChannel(reconstruct_image,channel,target); y_pixel_mu[i]+=(*k)*y_pixel; y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel; xy_sigma[i]+=(*k)*x_pixel*y_pixel; } k++; reference+=GetPixelChannels(image); target+=GetPixelChannels(reconstruct_image); } reference+=GetPixelChannels(image)*columns; target+=GetPixelChannels(reconstruct_image)*columns; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double ssim, x_pixel_mu_squared, x_pixel_sigmas_squared, xy_mu, xy_sigmas, y_pixel_mu_squared, y_pixel_sigmas_squared; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits( reconstruct_image,channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i]; y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i]; xy_mu=x_pixel_mu[i]*y_pixel_mu[i]; xy_sigmas=xy_sigma[i]-xy_mu; x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared; y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared; ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/ ((x_pixel_mu_squared+y_pixel_mu_squared+c1)* (x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2)); channel_distortion[i]+=ssim; channel_distortion[CompositePixelChannel]+=ssim; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetStructuralSimilarityDistortion) #endif for (i=0; i <= MaxPixelChannels; i++) distortion[i]+=channel_distortion[i]; } image_view=DestroyCacheView(image_view); reconstruct_view=DestroyCacheView(reconstruct_view); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; distortion[i]/=((double) columns*rows); } distortion[CompositePixelChannel]/=((double) columns*rows); distortion[CompositePixelChannel]/=(double) GetImageChannels(image); kernel_info=DestroyKernelInfo(kernel_info); return(status); } static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; register ssize_t i; status=GetStructuralSimilarityDistortion(image,reconstruct_image, distortion,exception); for (i=0; i <= MaxPixelChannels; i++) distortion[i]=(1.0-(distortion[i]))/2.0; return(status); } MagickExport MagickBooleanType GetImageDistortion(Image *image, const Image *reconstruct_image,const MetricType metric,double *distortion, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_distortion,0,length* sizeof(*channel_distortion)); switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetPerceptualHashDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralSimilarityErrorMetric: { status=GetStructuralSimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralDissimilarityErrorMetric: { status=GetStructuralDisimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } } *distortion=channel_distortion[CompositePixelChannel]; channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); (void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(), *distortion); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortions() compares the pixel channels of an image to a % reconstructed image and returns the specified distortion metric for each % channel. % % The format of the GetImageDistortions method is: % % double *GetImageDistortions(const Image *image, % const Image *reconstruct_image,const MetricType metric, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o exception: return any errors or warnings in this structure. % */ MagickExport double *GetImageDistortions(Image *image, const Image *reconstruct_image,const MetricType metric, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_distortion,0,length* sizeof(*channel_distortion)); status=MagickTrue; switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralSimilarityErrorMetric: { status=GetStructuralSimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralDissimilarityErrorMetric: { status=GetStructuralDisimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } } if (status == MagickFalse) { channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); return((double *) NULL); } return(channel_distortion); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e s E q u a l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImagesEqual() compare the pixels of two images and returns immediately % if any pixel is not identical. % % The format of the IsImagesEqual method is: % % MagickBooleanType IsImagesEqual(const Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImagesEqual(const Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image, channel,q)); if (distance >= MagickEpsilon) break; } if (i < (ssize_t) GetPixelChannels(image)) break; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (x < (ssize_t) columns) break; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(y < (ssize_t) rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r M e t r i c % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorMetric() measures the difference between colors at each pixel % location of two images. A value other than 0 means the colors match % exactly. Otherwise an error measure is computed by summing over all % pixels in an image the distance squared in RGB space between each image % pixel and its corresponding pixel in the reconstruct image. The error % measure is assigned to these image members: % % o mean_error_per_pixel: The mean error for any single pixel in % the image. % % o normalized_mean_error: The normalized mean quantization error for % any single pixel in the image. This distance measure is normalized to % a range between 0 and 1. It is independent of the range of red, green, % and blue values in the image. % % o normalized_maximum_error: The normalized maximum quantization % error for any single pixel in the image. This distance measure is % normalized to a range between 0 and 1. It is independent of the range % of red, green, and blue values in your image. % % A small normalized mean square error, accessed as % image->normalized_mean_error, suggests the images are very similar in % spatial layout and color. % % The format of the SetImageColorMetric method is: % % MagickBooleanType SetImageColorMetric(Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorMetric(Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area, maximum_error, mean_error, mean_error_per_pixel; MagickBooleanType status; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); area=0.0; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image, channel,q)); if (distance >= MagickEpsilon) { mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; } area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area); image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale* mean_error/area); image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error); status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i m i l a r i t y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SimilarityImage() compares the reference image of the image and returns the % best match offset. In addition, it returns a similarity image such that an % exact match location is completely white and if none of the pixels match, % black, otherwise some gray level in-between. % % The format of the SimilarityImageImage method is: % % Image *SimilarityImage(const Image *image,const Image *reference, % const MetricType metric,const double similarity_threshold, % RectangleInfo *offset,double *similarity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reference: find an area of the image that closely resembles this image. % % o metric: the metric. % % o similarity_threshold: minimum distortion for (sub)image match. % % o offset: the best match offset of the reference image within the image. % % o similarity: the computed similarity between the images. % % o exception: return any errors or warnings in this structure. % */ static double GetSimilarityMetric(const Image *image,const Image *reference, const MetricType metric,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { double distortion; Image *similarity_image; MagickBooleanType status; RectangleInfo geometry; SetGeometry(reference,&geometry); geometry.x=x_offset; geometry.y=y_offset; similarity_image=CropImage(image,&geometry,exception); if (similarity_image == (Image *) NULL) return(0.0); distortion=0.0; status=GetImageDistortion(similarity_image,reference,metric,&distortion, exception); similarity_image=DestroyImage(similarity_image); if (status == MagickFalse) return(0.0); return(distortion); } MagickExport Image *SimilarityImage(const Image *image,const Image *reference, const MetricType metric,const double similarity_threshold, RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *similarity_view; Image *similarity_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(offset != (RectangleInfo *) NULL); SetGeometry(reference,offset); *similarity_metric=MagickMaximumValue; similarity_image=CloneImage(image,image->columns-reference->columns+1, image->rows-reference->rows+1,MagickTrue,exception); if (similarity_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(similarity_image,DirectClass,exception); if (status == MagickFalse) { similarity_image=DestroyImage(similarity_image); return((Image *) NULL); } (void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel, exception); /* Measure similarity of reference image against image. */ status=MagickTrue; progress=0; similarity_view=AcquireAuthenticCacheView(similarity_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ shared(progress,status,similarity_metric) \ magick_number_threads(image,image,image->rows-reference->rows+1,1) #endif for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++) { double similarity; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) continue; q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) break; similarity=GetSimilarityMetric(image,reference,metric,x,y,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif if ((metric == NormalizedCrossCorrelationErrorMetric) || (metric == UndefinedErrorMetric)) similarity=1.0-similarity; if (similarity < *similarity_metric) { offset->x=x; offset->y=y; *similarity_metric=similarity; } if (metric == PerceptualHashErrorMetric) similarity=MagickMin(0.01*similarity,1.0); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image, channel); if ((traits == UndefinedPixelTrait) || (similarity_traits == UndefinedPixelTrait) || ((similarity_traits & UpdatePixelTrait) == 0)) continue; SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange- QuantumRange*similarity),q); } q+=GetPixelChannels(similarity_image); } if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SimilarityImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } similarity_view=DestroyCacheView(similarity_view); if (status == MagickFalse) similarity_image=DestroyImage(similarity_image); return(similarity_image); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/statistic.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o m p a r e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CompareImages() compares one or more pixel channels of an image * to a % reconstructed image and returns the difference image. % % The * format of the CompareImages method is: % % Image *CompareImages(const * Image *image,const Image *reconstruct_image, % const MetricType * metric,double *distortion,ExceptionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o * reconstruct_image: the reconstruct image. % % o metric: the metric. % % * o distortion: the computed distortion between the images. % % o * exception: return any errors or warnings in this structure. % */ static size_t GetImageChannels(const Image * image) { register ssize_t i; size_t channels; channels = 0; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) != 0) channels++; } return (channels == 0 ? (size_t) 1 : channels); } MagickExport Image * CompareImages(Image * image, const Image * reconstruct_image, const MetricType metric, double *distortion, ExceptionInfo * exception) { CacheView * highlight_view, *image_view, *reconstruct_view; const char *artifact; double fuzz; Image * clone_image, *difference_image, *highlight_image; MagickBooleanType status; PixelInfo highlight, lowlight, masklight; RectangleInfo geometry; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(reconstruct_image != (const Image *)NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *)NULL); *distortion = 0.0; if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); status = GetImageDistortion(image, reconstruct_image, metric, distortion, exception); if (status == MagickFalse) return ((Image *) NULL); columns = MagickMax(image->columns, reconstruct_image->columns); rows = MagickMax(image->rows, reconstruct_image->rows); SetGeometry(image, &geometry); geometry.width = columns; geometry.height = rows; clone_image = CloneImage(image, 0, 0, MagickTrue, exception); if (clone_image == (Image *) NULL) return ((Image *) NULL); (void)SetImageMask(clone_image, ReadPixelMask, (Image *) NULL, exception); difference_image = ExtentImage(clone_image, &geometry, exception); clone_image = DestroyImage(clone_image); if (difference_image == (Image *) NULL) return ((Image *) NULL); (void)SetImageAlphaChannel(difference_image, OpaqueAlphaChannel, exception); highlight_image = CloneImage(image, columns, rows, MagickTrue, exception); if (highlight_image == (Image *) NULL) { difference_image = DestroyImage(difference_image); return ((Image *) NULL); } status = SetImageStorageClass(highlight_image, DirectClass, exception); if (status == MagickFalse) { difference_image = DestroyImage(difference_image); highlight_image = DestroyImage(highlight_image); return ((Image *) NULL); } (void)SetImageMask(highlight_image, ReadPixelMask, (Image *) NULL, exception); (void)SetImageAlphaChannel(highlight_image, OpaqueAlphaChannel, exception); (void)QueryColorCompliance("#f1001ecc", AllCompliance, &highlight, exception); artifact = GetImageArtifact(image, "compare:highlight-color"); if (artifact != (const char *)NULL) (void)QueryColorCompliance(artifact, AllCompliance, &highlight, exception); (void)QueryColorCompliance("#ffffffcc", AllCompliance, &lowlight, exception); artifact = GetImageArtifact(image, "compare:lowlight-color"); if (artifact != (const char *)NULL) (void)QueryColorCompliance(artifact, AllCompliance, &lowlight, exception); (void)QueryColorCompliance("#888888cc", AllCompliance, &masklight, exception); artifact = GetImageArtifact(image, "compare:masklight-color"); if (artifact != (const char *)NULL) (void)QueryColorCompliance(artifact, AllCompliance, &masklight, exception); /* * Generate difference image. */ status = MagickTrue; fuzz = GetFuzzyColorDistance(image, reconstruct_image); image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); highlight_view = AcquireAuthenticCacheView(highlight_image, exception); for (y = 0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const Quantum * magick_restrict p, *magick_restrict q; register Quantum * magick_restrict r; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); r = QueueCacheViewAuthenticPixels(highlight_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL) || (r == (Quantum *) NULL)) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) columns; x++) { double Da, Sa; MagickStatusType difference; register ssize_t i; if ((GetPixelReadMask(image, p) <= (QuantumRange / 2)) || (GetPixelReadMask(reconstruct_image, q) <= (QuantumRange / 2))) { SetPixelViaPixelInfo(highlight_image, &masklight, r); p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); r += GetPixelChannels(highlight_image); continue; } difference = MagickFalse; Sa = QuantumScale * GetPixelAlpha(image, p); Da = QuantumScale * GetPixelAlpha(reconstruct_image, q); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance = (double)p[i] - GetPixelChannel(reconstruct_image, channel, q); else distance = Sa * p[i] - Da * GetPixelChannel(reconstruct_image, channel, q); if ((distance * distance) > fuzz) { difference = MagickTrue; break; } } if (difference == MagickFalse) SetPixelViaPixelInfo(highlight_image, &lowlight, r); else SetPixelViaPixelInfo(highlight_image, &highlight, r); p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); r += GetPixelChannels(highlight_image); } sync = SyncCacheViewAuthenticPixels(highlight_view, exception); if (sync == MagickFalse) status = MagickFalse; } highlight_view = DestroyCacheView(highlight_view); reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); (void)CompositeImage(difference_image, highlight_image, image->compose, MagickTrue, 0, 0, exception); highlight_image = DestroyImage(highlight_image); if (status == MagickFalse) difference_image = DestroyImage(difference_image); return (difference_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e D i s t o r t i o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageDistortion() compares one or more pixel channels of an * image to a % reconstructed image and returns the specified distortion * metric. % % The format of the GetImageDistortion method is: % % * MagickBooleanType GetImageDistortion(const Image *image, % const * Image *reconstruct_image,const MetricType metric, % double * *distortion,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o reconstruct_image: the * reconstruct image. % % o metric: the metric. % % o distortion: the * computed distortion between the images. % % o exception: return any * errors or warnings in this structure. % */ static MagickBooleanType GetAbsoluteDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { CacheView * image_view, *reconstruct_view; double fuzz; MagickBooleanType status; size_t columns, rows; ssize_t y; /* * Compute the absolute difference in pixels between two images. */ status = MagickTrue; fuzz = (double)MagickMin(GetPixelChannels(image), GetPixelChannels(reconstruct_image)) * GetFuzzyColorDistance(image, reconstruct_image); rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); for (y = 0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels + 1]; register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t j, x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL)) { status = MagickFalse; continue; } (void)memset(channel_distortion, 0, sizeof(channel_distortion)); for (x = 0; x < (ssize_t) columns; x++) { double Da, distance, Sa; MagickBooleanType difference; register ssize_t i; difference = MagickFalse; distance = 0.0; Sa = QuantumScale * GetPixelAlpha(image, p); Da = QuantumScale * GetPixelAlpha(reconstruct_image, q); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) pixel = (double)p[i] - GetPixelChannel(reconstruct_image, channel, q); else pixel = Sa * p[i] - Da * GetPixelChannel(reconstruct_image, channel, q); distance += pixel * pixel; if (distance > fuzz) { channel_distortion[i]++; difference = MagickTrue; } } if (difference != MagickFalse) channel_distortion[CompositePixelChannel]++; p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } for (j = 0; j <= MaxPixelChannels; j++) distortion[j] += channel_distortion[j]; } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); return (status); } static MagickBooleanType GetFuzzDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { CacheView * image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status = MagickTrue; rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); area = 0.0; image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); for (y = 0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels + 1]; register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (Quantum *) NULL)) { status = MagickFalse; continue; } (void)memset(channel_distortion, 0, sizeof(channel_distortion)); for (x = 0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image, p) <= (QuantumRange / 2)) || (GetPixelReadMask(reconstruct_image, q) <= (QuantumRange / 2))) { p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); continue; } Sa = QuantumScale * GetPixelAlpha(image, p); Da = QuantumScale * GetPixelAlpha(reconstruct_image, q); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance = QuantumScale * (p[i] - GetPixelChannel(reconstruct_image, channel, q)); else distance = QuantumScale * (Sa * p[i] - Da * GetPixelChannel(reconstruct_image, channel, q)); channel_distortion[i] += distance * distance; channel_distortion[CompositePixelChannel] += distance * distance; } area++; p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } for (j = 0; j <= MaxPixelChannels; j++) distortion[j] += channel_distortion[j]; } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); area = PerceptibleReciprocal(area); for (j = 0; j <= MaxPixelChannels; j++) distortion[j] *= area; distortion[CompositePixelChannel] /= (double)GetImageChannels(image); distortion[CompositePixelChannel] = sqrt(distortion[CompositePixelChannel]); return (status); } static MagickBooleanType GetMeanAbsoluteDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { CacheView * image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status = MagickTrue; rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); area = 0.0; image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); for (y = 0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels + 1]; register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL)) { status = MagickFalse; continue; } (void)memset(channel_distortion, 0, sizeof(channel_distortion)); for (x = 0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image, p) <= (QuantumRange / 2)) || (GetPixelReadMask(reconstruct_image, q) <= (QuantumRange / 2))) { p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); continue; } Sa = QuantumScale * GetPixelAlpha(image, p); Da = QuantumScale * GetPixelAlpha(reconstruct_image, q); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance = QuantumScale * fabs((double)p[i] - GetPixelChannel(reconstruct_image, channel, q)); else distance = QuantumScale * fabs(Sa * p[i] - Da * GetPixelChannel(reconstruct_image, channel, q)); channel_distortion[i] += distance; channel_distortion[CompositePixelChannel] += distance; } area++; p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } for (j = 0; j <= MaxPixelChannels; j++) distortion[j] += channel_distortion[j]; } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); area = PerceptibleReciprocal(area); for (j = 0; j <= MaxPixelChannels; j++) distortion[j] *= area; distortion[CompositePixelChannel] /= (double)GetImageChannels(image); return (status); } static MagickBooleanType GetMeanErrorPerPixel(Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { CacheView * image_view, *reconstruct_view; MagickBooleanType status; double area, maximum_error, mean_error; size_t columns, rows; ssize_t y; status = MagickTrue; area = 0.0; maximum_error = 0.0; mean_error = 0.0; rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); for (y = 0; y < (ssize_t) rows; y++) { register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t x; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL)) { status = MagickFalse; break; } for (x = 0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image, p) <= (QuantumRange / 2)) || (GetPixelReadMask(reconstruct_image, q) <= (QuantumRange / 2))) { p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); continue; } Sa = QuantumScale * GetPixelAlpha(image, p); Da = QuantumScale * GetPixelAlpha(reconstruct_image, q); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance = fabs((double)p[i] - GetPixelChannel(reconstruct_image, channel, q)); else distance = fabs(Sa * p[i] - Da * GetPixelChannel(reconstruct_image, channel, q)); distortion[i] += distance; distortion[CompositePixelChannel] += distance; mean_error += distance * distance; if (distance > maximum_error) maximum_error = distance; area++; } p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); image->error.mean_error_per_pixel = distortion[CompositePixelChannel] / area; image->error.normalized_mean_error = QuantumScale * QuantumScale * mean_error / area; image->error.normalized_maximum_error = QuantumScale * maximum_error; return (status); } static MagickBooleanType GetMeanSquaredDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { CacheView * image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status = MagickTrue; rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); area = 0.0; image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); for (y = 0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels + 1]; register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL)) { status = MagickFalse; continue; } (void)memset(channel_distortion, 0, sizeof(channel_distortion)); for (x = 0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image, p) <= (QuantumRange / 2)) || (GetPixelReadMask(reconstruct_image, q) <= (QuantumRange / 2))) { p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); continue; } Sa = QuantumScale * GetPixelAlpha(image, p); Da = QuantumScale * GetPixelAlpha(reconstruct_image, q); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance = QuantumScale * (p[i] - GetPixelChannel(reconstruct_image, channel, q)); else distance = QuantumScale * (Sa * p[i] - Da * GetPixelChannel(reconstruct_image, channel, q)); channel_distortion[i] += distance * distance; channel_distortion[CompositePixelChannel] += distance * distance; } area++; p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } for (j = 0; j <= MaxPixelChannels; j++) distortion[j] += channel_distortion[j]; } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); area = PerceptibleReciprocal(area); for (j = 0; j <= MaxPixelChannels; j++) distortion[j] *= area; distortion[CompositePixelChannel] /= GetImageChannels(image); return (status); } static MagickBooleanType GetNormalizedCrossCorrelationDistortion( const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { #define SimilarityImageTag "Similarity/Image" CacheView * image_view, *reconstruct_view; ChannelStatistics * image_statistics, *reconstruct_statistics; double area; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t columns, rows; ssize_t y; /* * Normalize to account for variation due to lighting and exposure * condition. */ image_statistics = GetImageStatistics(image, exception); reconstruct_statistics = GetImageStatistics(reconstruct_image, exception); if ((image_statistics == (ChannelStatistics *) NULL) || (reconstruct_statistics == (ChannelStatistics *) NULL)) { if (image_statistics != (ChannelStatistics *) NULL) image_statistics = (ChannelStatistics *) RelinquishMagickMemory( image_statistics); if (reconstruct_statistics != (ChannelStatistics *) NULL) reconstruct_statistics = (ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); return (MagickFalse); } status = MagickTrue; progress = 0; for (i = 0; i <= MaxPixelChannels; i++) distortion[i] = 0.0; rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); area = 0.0; image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); for (y = 0; y < (ssize_t) rows; y++) { register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t x; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL)) { status = MagickFalse; break; } for (x = 0; x < (ssize_t) columns; x++) { if ((GetPixelReadMask(image, p) <= (QuantumRange / 2)) || (GetPixelReadMask(reconstruct_image, q) <= (QuantumRange / 2))) { p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); continue; } area++; p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } } area = PerceptibleReciprocal(area); for (y = 0; y < (ssize_t) rows; y++) { register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t x; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL)) { status = MagickFalse; break; } for (x = 0; x < (ssize_t) columns; x++) { double Da, Sa; if ((GetPixelReadMask(image, p) <= (QuantumRange / 2)) || (GetPixelReadMask(reconstruct_image, q) <= (QuantumRange / 2))) { p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); continue; } Sa = QuantumScale * GetPixelAlpha(image, p); Da = QuantumScale * GetPixelAlpha(reconstruct_image, q); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) { distortion[i] += area * QuantumScale * (p[i] - image_statistics[channel].mean) * (GetPixelChannel( reconstruct_image, channel, q) - reconstruct_statistics[channel].mean); } else { distortion[i] += area * QuantumScale * (Sa * p[i] - image_statistics[channel].mean) * (Da * GetPixelChannel( reconstruct_image, channel, q) - reconstruct_statistics[channel].mean); } } p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, SimilarityImageTag, progress, rows); if (proceed == MagickFalse) { status = MagickFalse; break; } } } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); /* * Divide by the standard deviation. */ distortion[CompositePixelChannel] = 0.0; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma; PixelChannel channel = GetPixelChannelChannel(image, i); gamma = image_statistics[channel].standard_deviation * reconstruct_statistics[channel].standard_deviation; gamma = PerceptibleReciprocal(gamma); distortion[i] = QuantumRange * gamma * distortion[i]; distortion[CompositePixelChannel] += distortion[i] * distortion[i]; } distortion[CompositePixelChannel] = sqrt(distortion[CompositePixelChannel] / GetImageChannels(image)); /* * Free resources. */ reconstruct_statistics = (ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); image_statistics = (ChannelStatistics *) RelinquishMagickMemory( image_statistics); return (status); } static MagickBooleanType GetPeakAbsoluteDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { CacheView * image_view, *reconstruct_view; MagickBooleanType status; size_t columns, rows; ssize_t y; status = MagickTrue; rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); for (y = 0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels + 1]; register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t j, x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL)) { status = MagickFalse; continue; } (void)memset(channel_distortion, 0, sizeof(channel_distortion)); for (x = 0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image, p) <= (QuantumRange / 2)) || (GetPixelReadMask(reconstruct_image, q) <= (QuantumRange / 2))) { p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); continue; } Sa = QuantumScale * GetPixelAlpha(image, p); Da = QuantumScale * GetPixelAlpha(reconstruct_image, q); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance = QuantumScale * fabs((double)p[i] - GetPixelChannel(reconstruct_image, channel, q)); else distance = QuantumScale * fabs(Sa * p[i] - Da * GetPixelChannel(reconstruct_image, channel, q)); if (distance > channel_distortion[i]) channel_distortion[i] = distance; if (distance > channel_distortion[CompositePixelChannel]) channel_distortion[CompositePixelChannel] = distance; } p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } for (j = 0; j <= MaxPixelChannels; j++) if (channel_distortion[j] > distortion[j]) distortion[j] = channel_distortion[j]; } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); return (status); } static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return (log10(Log10Epsilon)); return (log10(fabs(x))); } static MagickBooleanType GetPeakSignalToNoiseRatio(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { MagickBooleanType status; register ssize_t i; status = GetMeanSquaredDistortion(image, reconstruct_image, distortion, exception); for (i = 0; i <= MaxPixelChannels; i++) if (fabs(distortion[i]) < MagickEpsilon) distortion[i] = INFINITY; else distortion[i] = 10.0 * MagickLog10(1.0) - 10.0 * MagickLog10(distortion[i]); return (status); } static MagickBooleanType GetPerceptualHashDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { ChannelPerceptualHash * channel_phash, *reconstruct_phash; const char *artifact; MagickBooleanType normalize; ssize_t channel; /* * Compute perceptual hash in the sRGB colorspace. */ channel_phash = GetImagePerceptualHash(image, exception); if (channel_phash == (ChannelPerceptualHash *) NULL) return (MagickFalse); reconstruct_phash = GetImagePerceptualHash(reconstruct_image, exception); if (reconstruct_phash == (ChannelPerceptualHash *) NULL) { channel_phash = (ChannelPerceptualHash *) RelinquishMagickMemory( channel_phash); return (MagickFalse); } artifact = GetImageArtifact(image, "phash:normalize"); normalize = (artifact == (const char *)NULL) || (IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue; for (channel = 0; channel < MaxPixelChannels; channel++) { double difference; register ssize_t i; difference = 0.0; for (i = 0; i < MaximumNumberOfImageMoments; i++) { double alpha, beta; register ssize_t j; for (j = 0; j < (ssize_t) channel_phash[0].number_colorspaces; j++) { alpha = channel_phash[channel].phash[j][i]; beta = reconstruct_phash[channel].phash[j][i]; if (normalize == MagickFalse) difference += (beta - alpha) * (beta - alpha); else difference = sqrt((beta - alpha) * (beta - alpha) / channel_phash[0].number_channels); } } distortion[channel] += difference; distortion[CompositePixelChannel] += difference; } /* * Free resources. */ reconstruct_phash = (ChannelPerceptualHash *) RelinquishMagickMemory( reconstruct_phash); channel_phash = (ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash); return (MagickTrue); } static MagickBooleanType GetRootMeanSquaredDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { MagickBooleanType status; register ssize_t i; status = GetMeanSquaredDistortion(image, reconstruct_image, distortion, exception); for (i = 0; i <= MaxPixelChannels; i++) distortion[i] = sqrt(distortion[i]); return (status); } static MagickBooleanType GetStructuralSimilarityDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { #define SSIMRadius 5.0 #define SSIMSigma 1.5 #define SSIMBlocksize 8 #define SSIMK1 0.01 #define SSIMK2 0.03 #define SSIML 1.0 CacheView * image_view, *reconstruct_view; char geometry[MagickPathExtent]; const char *artifact; double c1, c2, radius, sigma; KernelInfo * kernel_info; MagickBooleanType status; register ssize_t i; size_t columns, rows; ssize_t y; /* * Compute structural similarity index @ * https://en.wikipedia.org/wiki/Structural_similarity. */ radius = SSIMRadius; artifact = GetImageArtifact(image, "compare:ssim-radius"); if (artifact != (const char *)NULL) radius = StringToDouble(artifact, (char **)NULL); sigma = SSIMSigma; artifact = GetImageArtifact(image, "compare:ssim-sigma"); if (artifact != (const char *)NULL) sigma = StringToDouble(artifact, (char **)NULL); (void)FormatLocaleString(geometry, MagickPathExtent, "gaussian:%.20gx%.20g", radius, sigma); kernel_info = AcquireKernelInfo(geometry, exception); if (kernel_info == (KernelInfo *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); c1 = pow(SSIMK1 * SSIML, 2.0); artifact = GetImageArtifact(image, "compare:ssim-k1"); if (artifact != (const char *)NULL) c1 = pow(StringToDouble(artifact, (char **)NULL) * SSIML, 2.0); c2 = pow(SSIMK2 * SSIML, 2.0); artifact = GetImageArtifact(image, "compare:ssim-k2"); if (artifact != (const char *)NULL) c2 = pow(StringToDouble(artifact, (char **)NULL) * SSIML, 2.0); status = MagickTrue; rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); for (y = 0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels + 1]; register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, -((ssize_t) kernel_info->width / 2L), y - ((ssize_t) kernel_info->height / 2L), columns + kernel_info->width, kernel_info->height, exception); q = GetCacheViewVirtualPixels(reconstruct_view, -((ssize_t) kernel_info->width / 2L), y - ((ssize_t) kernel_info->height / 2L), columns + kernel_info->width, kernel_info->height, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL)) { status = MagickFalse; continue; } (void)memset(channel_distortion, 0, sizeof(channel_distortion)); for (x = 0; x < (ssize_t) columns; x++) { double x_pixel_mu[MaxPixelChannels + 1], x_pixel_sigma_squared[MaxPixelChannels + 1], xy_sigma[MaxPixelChannels + 1], y_pixel_mu[MaxPixelChannels + 1], y_pixel_sigma_squared[MaxPixelChannels + 1]; register const Quantum * magick_restrict reference, *magick_restrict target; register MagickRealType * k; ssize_t v; (void)memset(x_pixel_mu, 0, sizeof(x_pixel_mu)); (void)memset(x_pixel_sigma_squared, 0, sizeof(x_pixel_sigma_squared)); (void)memset(xy_sigma, 0, sizeof(xy_sigma)); (void)memset(x_pixel_sigma_squared, 0, sizeof(y_pixel_sigma_squared)); (void)memset(y_pixel_mu, 0, sizeof(y_pixel_mu)); (void)memset(y_pixel_sigma_squared, 0, sizeof(y_pixel_sigma_squared)); k = kernel_info->values; reference = p; target = q; for (v = 0; v < (ssize_t) kernel_info->height; v++) { register ssize_t u; for (u = 0; u < (ssize_t) kernel_info->width; u++) { for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double x_pixel, y_pixel; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits( reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; x_pixel = QuantumScale * reference[i]; x_pixel_mu[i] += (*k) * x_pixel; x_pixel_sigma_squared[i] += (*k) * x_pixel * x_pixel; y_pixel = QuantumScale * GetPixelChannel(reconstruct_image, channel, target); y_pixel_mu[i] += (*k) * y_pixel; y_pixel_sigma_squared[i] += (*k) * y_pixel * y_pixel; xy_sigma[i] += (*k) * x_pixel * y_pixel; } k++; reference += GetPixelChannels(image); target += GetPixelChannels(reconstruct_image); } reference += GetPixelChannels(image) * columns; target += GetPixelChannels(reconstruct_image) * columns; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double ssim, x_pixel_mu_squared, x_pixel_sigmas_squared, xy_mu, xy_sigmas, y_pixel_mu_squared, y_pixel_sigmas_squared; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits( reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; x_pixel_mu_squared = x_pixel_mu[i] * x_pixel_mu[i]; y_pixel_mu_squared = y_pixel_mu[i] * y_pixel_mu[i]; xy_mu = x_pixel_mu[i] * y_pixel_mu[i]; xy_sigmas = xy_sigma[i] - xy_mu; x_pixel_sigmas_squared = x_pixel_sigma_squared[i] - x_pixel_mu_squared; y_pixel_sigmas_squared = y_pixel_sigma_squared[i] - y_pixel_mu_squared; ssim = ((2.0 * xy_mu + c1) * (2.0 * xy_sigmas + c2)) / ((x_pixel_mu_squared + y_pixel_mu_squared + c1) * (x_pixel_sigmas_squared + y_pixel_sigmas_squared + c2)); channel_distortion[i] += ssim; channel_distortion[CompositePixelChannel] += ssim; } p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } for (i = 0; i <= MaxPixelChannels; i++) distortion[i] += channel_distortion[i]; } image_view = DestroyCacheView(image_view); reconstruct_view = DestroyCacheView(reconstruct_view); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; distortion[i] /= ((double)columns * rows); } distortion[CompositePixelChannel] /= ((double)columns * rows); distortion[CompositePixelChannel] /= (double)GetImageChannels(image); kernel_info = DestroyKernelInfo(kernel_info); return (status); } static MagickBooleanType GetStructuralDisimilarityDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { MagickBooleanType status; register ssize_t i; status = GetStructuralSimilarityDistortion(image, reconstruct_image, distortion, exception); for (i = 0; i <= MaxPixelChannels; i++) distortion[i] = (1.0 - (distortion[i])) / 2.0; return (status); } MagickExport MagickBooleanType GetImageDistortion(Image * image, const Image * reconstruct_image, const MetricType metric, double *distortion, ExceptionInfo * exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(reconstruct_image != (const Image *)NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *)NULL); *distortion = 0.0; if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); /* * Get image distortion. */ length = MaxPixelChannels + 1; channel_distortion = (double *)AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *)NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); (void)memset(channel_distortion, 0, length * sizeof(*channel_distortion)); switch (metric) { case AbsoluteErrorMetric: { status = GetAbsoluteDistortion(image, reconstruct_image, channel_distortion, exception); break; } case FuzzErrorMetric: { status = GetFuzzDistortion(image, reconstruct_image, channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status = GetMeanAbsoluteDistortion(image, reconstruct_image, channel_distortion, exception); break; } case MeanErrorPerPixelErrorMetric: { status = GetMeanErrorPerPixel(image, reconstruct_image, channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status = GetMeanSquaredDistortion(image, reconstruct_image, channel_distortion, exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status = GetNormalizedCrossCorrelationDistortion(image, reconstruct_image, channel_distortion, exception); break; } case PeakAbsoluteErrorMetric: { status = GetPeakAbsoluteDistortion(image, reconstruct_image, channel_distortion, exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status = GetPeakSignalToNoiseRatio(image, reconstruct_image, channel_distortion, exception); break; } case PerceptualHashErrorMetric: { status = GetPerceptualHashDistortion(image, reconstruct_image, channel_distortion, exception); break; } case RootMeanSquaredErrorMetric: { status = GetRootMeanSquaredDistortion(image, reconstruct_image, channel_distortion, exception); break; } case StructuralSimilarityErrorMetric: { status = GetStructuralSimilarityDistortion(image, reconstruct_image, channel_distortion, exception); break; } case StructuralDissimilarityErrorMetric: { status = GetStructuralDisimilarityDistortion(image, reconstruct_image, channel_distortion, exception); break; } } *distortion = channel_distortion[CompositePixelChannel]; channel_distortion = (double *)RelinquishMagickMemory(channel_distortion); (void)FormatImageProperty(image, "distortion", "%.*g", GetMagickPrecision(), *distortion); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e D i s t o r t i o n s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageDistortions() compares the pixel channels of an image to * a % reconstructed image and returns the specified distortion metric for * each % channel. % % The format of the GetImageDistortions method is: % % * double *GetImageDistortions(const Image *image, % const Image * *reconstruct_image,const MetricType metric, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o reconstruct_image: the reconstruct image. % % o * metric: the metric. % % o exception: return any errors or warnings in * this structure. % */ MagickExport double * GetImageDistortions(Image * image, const Image * reconstruct_image, const MetricType metric, ExceptionInfo * exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(reconstruct_image != (const Image *)NULL); assert(reconstruct_image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); /* * Get image distortion. */ length = MaxPixelChannels + 1UL; channel_distortion = (double *)AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *)NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); (void)memset(channel_distortion, 0, length * sizeof(*channel_distortion)); status = MagickTrue; switch (metric) { case AbsoluteErrorMetric: { status = GetAbsoluteDistortion(image, reconstruct_image, channel_distortion, exception); break; } case FuzzErrorMetric: { status = GetFuzzDistortion(image, reconstruct_image, channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status = GetMeanAbsoluteDistortion(image, reconstruct_image, channel_distortion, exception); break; } case MeanErrorPerPixelErrorMetric: { status = GetMeanErrorPerPixel(image, reconstruct_image, channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status = GetMeanSquaredDistortion(image, reconstruct_image, channel_distortion, exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status = GetNormalizedCrossCorrelationDistortion(image, reconstruct_image, channel_distortion, exception); break; } case PeakAbsoluteErrorMetric: { status = GetPeakAbsoluteDistortion(image, reconstruct_image, channel_distortion, exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status = GetPeakSignalToNoiseRatio(image, reconstruct_image, channel_distortion, exception); break; } case PerceptualHashErrorMetric: { status = GetRootMeanSquaredDistortion(image, reconstruct_image, channel_distortion, exception); break; } case RootMeanSquaredErrorMetric: { status = GetRootMeanSquaredDistortion(image, reconstruct_image, channel_distortion, exception); break; } case StructuralSimilarityErrorMetric: { status = GetStructuralSimilarityDistortion(image, reconstruct_image, channel_distortion, exception); break; } case StructuralDissimilarityErrorMetric: { status = GetStructuralDisimilarityDistortion(image, reconstruct_image, channel_distortion, exception); break; } } if (status == MagickFalse) { channel_distortion = (double *)RelinquishMagickMemory(channel_distortion); return ((double *)NULL); } return (channel_distortion); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I s I m a g e s E q u a l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IsImagesEqual() compare the pixels of two images and returns * immediately % if any pixel is not identical. % % The format of the * IsImagesEqual method is: % % MagickBooleanType IsImagesEqual(const * Image *image, % const Image *reconstruct_image,ExceptionInfo * *exception) % % A description of each parameter follows. % % o image: * the image. % % o reconstruct_image: the reconstruct image. % % o * exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImagesEqual(const Image * image, const Image * reconstruct_image, ExceptionInfo * exception) { CacheView * image_view, *reconstruct_view; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *)NULL); assert(reconstruct_image->signature == MagickCoreSignature); rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); for (y = 0; y < (ssize_t) rows; y++) { register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t x; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (Quantum *) NULL)) break; for (x = 0; x < (ssize_t) columns; x++) { register ssize_t i; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance = fabs(p[i] - (double)GetPixelChannel(reconstruct_image, channel, q)); if (distance >= MagickEpsilon) break; } if (i < (ssize_t) GetPixelChannels(image)) break; p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } if (x < (ssize_t) columns) break; } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); return (y < (ssize_t) rows ? MagickFalse : MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e C o l o r M e t r i c * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageColorMetric() measures the difference between colors at * each pixel % location of two images. A value other than 0 means the * colors match % exactly. Otherwise an error measure is computed by * summing over all % pixels in an image the distance squared in RGB space * between each image % pixel and its corresponding pixel in the reconstruct * image. The error % measure is assigned to these image members: % % o * mean_error_per_pixel: The mean error for any single pixel in % the * image. % % o normalized_mean_error: The normalized mean quantization * error for % any single pixel in the image. This distance measure is * normalized to % a range between 0 and 1. It is independent of the * range of red, green, % and blue values in the image. % % o * normalized_maximum_error: The normalized maximum quantization % * error for any single pixel in the image. This distance measure is % * normalized to a range between 0 and 1. It is independent of the range % * of red, green, and blue values in your image. % % A small normalized mean * square error, accessed as % image->normalized_mean_error, suggests the * images are very similar in % spatial layout and color. % % The format of * the SetImageColorMetric method is: % % MagickBooleanType * SetImageColorMetric(Image *image, % const Image * *reconstruct_image,ExceptionInfo *exception) % % A description of each * parameter follows. % % o image: the image. % % o reconstruct_image: * the reconstruct image. % % o exception: return any errors or warnings * in this structure. % */ MagickExport MagickBooleanType SetImageColorMetric(Image * image, const Image * reconstruct_image, ExceptionInfo * exception) { CacheView * image_view, *reconstruct_view; double area, maximum_error, mean_error, mean_error_per_pixel; MagickBooleanType status; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *)NULL); assert(reconstruct_image->signature == MagickCoreSignature); area = 0.0; maximum_error = 0.0; mean_error_per_pixel = 0.0; mean_error = 0.0; rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); for (y = 0; y < (ssize_t) rows; y++) { register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t x; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (Quantum *) NULL)) break; for (x = 0; x < (ssize_t) columns; x++) { register ssize_t i; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance = fabs(p[i] - (double)GetPixelChannel(reconstruct_image, channel, q)); if (distance >= MagickEpsilon) { mean_error_per_pixel += distance; mean_error += distance * distance; if (distance > maximum_error) maximum_error = distance; } area++; } p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); image->error.mean_error_per_pixel = (double)(mean_error_per_pixel / area); image->error.normalized_mean_error = (double)(QuantumScale * QuantumScale * mean_error / area); image->error.normalized_maximum_error = (double)(QuantumScale * maximum_error); status = image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse; return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S i m i l a r i t y I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SimilarityImage() compares the reference image of the image and * returns the % best match offset. In addition, it returns a similarity * image such that an % exact match location is completely white and if none * of the pixels match, % black, otherwise some gray level in-between. % % * The format of the SimilarityImageImage method is: % % Image * *SimilarityImage(const Image *image,const Image *reference, % const * MetricType metric,const double similarity_threshold, % * RectangleInfo *offset,double *similarity,ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * reference: find an area of the image that closely resembles this image. % * % o metric: the metric. % % o similarity_threshold: minimum * distortion for (sub)image match. % % o offset: the best match offset of * the reference image within the image. % % o similarity: the computed * similarity between the images. % % o exception: return any errors or * warnings in this structure. % */ static double GetSimilarityMetric(const Image * image, const Image * reference, const MetricType metric, const ssize_t x_offset, const ssize_t y_offset, ExceptionInfo * exception) { double distortion; Image * similarity_image; MagickBooleanType status; RectangleInfo geometry; SetGeometry(reference, &geometry); geometry.x = x_offset; geometry.y = y_offset; similarity_image = CropImage(image, &geometry, exception); if (similarity_image == (Image *) NULL) return (0.0); distortion = 0.0; status = GetImageDistortion(similarity_image, reference, metric, &distortion, exception); similarity_image = DestroyImage(similarity_image); if (status == MagickFalse) return (0.0); return (distortion); } MagickExport Image * SimilarityImage(const Image * image, const Image * reference, const MetricType metric, const double similarity_threshold, RectangleInfo * offset, double *similarity_metric, ExceptionInfo * exception) { #define SimilarityImageTag "Similarity/Image" CacheView * similarity_view; Image * similarity_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *)NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(offset != (RectangleInfo *) NULL); SetGeometry(reference, offset); *similarity_metric = MagickMaximumValue; similarity_image = CloneImage(image, image->columns - reference->columns + 1, image->rows - reference->rows + 1, MagickTrue, exception); if (similarity_image == (Image *) NULL) return ((Image *) NULL); status = SetImageStorageClass(similarity_image, DirectClass, exception); if (status == MagickFalse) { similarity_image = DestroyImage(similarity_image); return ((Image *) NULL); } (void)SetImageAlphaChannel(similarity_image, DeactivateAlphaChannel, exception); /* * Measure similarity of reference image against image. */ status = MagickTrue; progress = 0; similarity_view = AcquireAuthenticCacheView(similarity_image, exception); for (y = 0; y < (ssize_t) (image->rows - reference->rows + 1); y++) { double similarity; register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; if (*similarity_metric <= similarity_threshold) continue; q = GetCacheViewAuthenticPixels(similarity_view, 0, y, similarity_image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) (image->columns - reference->columns + 1); x++) { register ssize_t i; if (*similarity_metric <= similarity_threshold) break; similarity = GetSimilarityMetric(image, reference, metric, x, y, exception); if ((metric == NormalizedCrossCorrelationErrorMetric) || (metric == UndefinedErrorMetric)) similarity = 1.0 - similarity; if (similarity < *similarity_metric) { offset->x = x; offset->y = y; *similarity_metric = similarity; } if (metric == PerceptualHashErrorMetric) similarity = MagickMin(0.01 * similarity, 1.0); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait similarity_traits = GetPixelChannelTraits(similarity_image, channel); if ((traits == UndefinedPixelTrait) || (similarity_traits == UndefinedPixelTrait) || ((similarity_traits & UpdatePixelTrait) == 0)) continue; SetPixelChannel(similarity_image, channel, ClampToQuantum(QuantumRange - QuantumRange * similarity), q); } q += GetPixelChannels(similarity_image); } if (SyncCacheViewAuthenticPixels(similarity_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, SimilarityImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } similarity_view = DestroyCacheView(similarity_view); if (status == MagickFalse) similarity_image = DestroyImage(similarity_image); return (similarity_image); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/statistic.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o m p a r e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CompareImages() compares one or more pixel channels of an image * to a % reconstructed image and returns the difference image. % % The * format of the CompareImages method is: % % Image *CompareImages(const * Image *image,const Image *reconstruct_image, % const MetricType * metric,double *distortion,ExceptionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o * reconstruct_image: the reconstruct image. % % o metric: the metric. % % * o distortion: the computed distortion between the images. % % o * exception: return any errors or warnings in this structure. % */ static size_t GetImageChannels(const Image * image) { register ssize_t i; size_t channels; channels = 0; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) != 0) channels++; } return (channels == 0 ? (size_t) 1 : channels); } MagickExport Image * CompareImages(Image * image, const Image * reconstruct_image, const MetricType metric, double *distortion, ExceptionInfo * exception) { CacheView * highlight_view, *image_view, *reconstruct_view; const char *artifact; double fuzz; Image * clone_image, *difference_image, *highlight_image; MagickBooleanType status; PixelInfo highlight, lowlight, masklight; RectangleInfo geometry; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(reconstruct_image != (const Image *)NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *)NULL); *distortion = 0.0; if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); status = GetImageDistortion(image, reconstruct_image, metric, distortion, exception); if (status == MagickFalse) return ((Image *) NULL); columns = MagickMax(image->columns, reconstruct_image->columns); rows = MagickMax(image->rows, reconstruct_image->rows); SetGeometry(image, &geometry); geometry.width = columns; geometry.height = rows; clone_image = CloneImage(image, 0, 0, MagickTrue, exception); if (clone_image == (Image *) NULL) return ((Image *) NULL); (void)SetImageMask(clone_image, ReadPixelMask, (Image *) NULL, exception); difference_image = ExtentImage(clone_image, &geometry, exception); clone_image = DestroyImage(clone_image); if (difference_image == (Image *) NULL) return ((Image *) NULL); (void)SetImageAlphaChannel(difference_image, OpaqueAlphaChannel, exception); highlight_image = CloneImage(image, columns, rows, MagickTrue, exception); if (highlight_image == (Image *) NULL) { difference_image = DestroyImage(difference_image); return ((Image *) NULL); } status = SetImageStorageClass(highlight_image, DirectClass, exception); if (status == MagickFalse) { difference_image = DestroyImage(difference_image); highlight_image = DestroyImage(highlight_image); return ((Image *) NULL); } (void)SetImageMask(highlight_image, ReadPixelMask, (Image *) NULL, exception); (void)SetImageAlphaChannel(highlight_image, OpaqueAlphaChannel, exception); (void)QueryColorCompliance("#f1001ecc", AllCompliance, &highlight, exception); artifact = GetImageArtifact(image, "compare:highlight-color"); if (artifact != (const char *)NULL) (void)QueryColorCompliance(artifact, AllCompliance, &highlight, exception); (void)QueryColorCompliance("#ffffffcc", AllCompliance, &lowlight, exception); artifact = GetImageArtifact(image, "compare:lowlight-color"); if (artifact != (const char *)NULL) (void)QueryColorCompliance(artifact, AllCompliance, &lowlight, exception); (void)QueryColorCompliance("#888888cc", AllCompliance, &masklight, exception); artifact = GetImageArtifact(image, "compare:masklight-color"); if (artifact != (const char *)NULL) (void)QueryColorCompliance(artifact, AllCompliance, &masklight, exception); /* * Generate difference image. */ status = MagickTrue; fuzz = GetFuzzyColorDistance(image, reconstruct_image); image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); highlight_view = AcquireAuthenticCacheView(highlight_image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,highlight_image,rows,1) #endif for (y = 0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const Quantum * magick_restrict p, *magick_restrict q; register Quantum * magick_restrict r; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); r = QueueCacheViewAuthenticPixels(highlight_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL) || (r == (Quantum *) NULL)) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) columns; x++) { double Da, Sa; MagickStatusType difference; register ssize_t i; if ((GetPixelReadMask(image, p) <= (QuantumRange / 2)) || (GetPixelReadMask(reconstruct_image, q) <= (QuantumRange / 2))) { SetPixelViaPixelInfo(highlight_image, &masklight, r); p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); r += GetPixelChannels(highlight_image); continue; } difference = MagickFalse; Sa = QuantumScale * GetPixelAlpha(image, p); Da = QuantumScale * GetPixelAlpha(reconstruct_image, q); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance = (double)p[i] - GetPixelChannel(reconstruct_image, channel, q); else distance = Sa * p[i] - Da * GetPixelChannel(reconstruct_image, channel, q); if ((distance * distance) > fuzz) { difference = MagickTrue; break; } } if (difference == MagickFalse) SetPixelViaPixelInfo(highlight_image, &lowlight, r); else SetPixelViaPixelInfo(highlight_image, &highlight, r); p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); r += GetPixelChannels(highlight_image); } sync = SyncCacheViewAuthenticPixels(highlight_view, exception); if (sync == MagickFalse) status = MagickFalse; } highlight_view = DestroyCacheView(highlight_view); reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); (void)CompositeImage(difference_image, highlight_image, image->compose, MagickTrue, 0, 0, exception); highlight_image = DestroyImage(highlight_image); if (status == MagickFalse) difference_image = DestroyImage(difference_image); return (difference_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e D i s t o r t i o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageDistortion() compares one or more pixel channels of an * image to a % reconstructed image and returns the specified distortion * metric. % % The format of the GetImageDistortion method is: % % * MagickBooleanType GetImageDistortion(const Image *image, % const * Image *reconstruct_image,const MetricType metric, % double * *distortion,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o reconstruct_image: the * reconstruct image. % % o metric: the metric. % % o distortion: the * computed distortion between the images. % % o exception: return any * errors or warnings in this structure. % */ static MagickBooleanType GetAbsoluteDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { CacheView * image_view, *reconstruct_view; double fuzz; MagickBooleanType status; size_t columns, rows; ssize_t y; /* * Compute the absolute difference in pixels between two images. */ status = MagickTrue; fuzz = (double)MagickMin(GetPixelChannels(image), GetPixelChannels(reconstruct_image)) * GetFuzzyColorDistance(image, reconstruct_image); rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) #endif for (y = 0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels + 1]; register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t j, x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL)) { status = MagickFalse; continue; } (void)memset(channel_distortion, 0, sizeof(channel_distortion)); for (x = 0; x < (ssize_t) columns; x++) { double Da, distance, Sa; MagickBooleanType difference; register ssize_t i; difference = MagickFalse; distance = 0.0; Sa = QuantumScale * GetPixelAlpha(image, p); Da = QuantumScale * GetPixelAlpha(reconstruct_image, q); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) pixel = (double)p[i] - GetPixelChannel(reconstruct_image, channel, q); else pixel = Sa * p[i] - Da * GetPixelChannel(reconstruct_image, channel, q); distance += pixel * pixel; if (distance > fuzz) { channel_distortion[i]++; difference = MagickTrue; } } if (difference != MagickFalse) channel_distortion[CompositePixelChannel]++; p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetAbsoluteDistortion) #endif for (j = 0; j <= MaxPixelChannels; j++) distortion[j] += channel_distortion[j]; } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); return (status); } static MagickBooleanType GetFuzzDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { CacheView * image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status = MagickTrue; rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); area = 0.0; image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y = 0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels + 1]; register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (Quantum *) NULL)) { status = MagickFalse; continue; } (void)memset(channel_distortion, 0, sizeof(channel_distortion)); for (x = 0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image, p) <= (QuantumRange / 2)) || (GetPixelReadMask(reconstruct_image, q) <= (QuantumRange / 2))) { p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); continue; } Sa = QuantumScale * GetPixelAlpha(image, p); Da = QuantumScale * GetPixelAlpha(reconstruct_image, q); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance = QuantumScale * (p[i] - GetPixelChannel(reconstruct_image, channel, q)); else distance = QuantumScale * (Sa * p[i] - Da * GetPixelChannel(reconstruct_image, channel, q)); channel_distortion[i] += distance * distance; channel_distortion[CompositePixelChannel] += distance * distance; } area++; p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetFuzzDistortion) #endif for (j = 0; j <= MaxPixelChannels; j++) distortion[j] += channel_distortion[j]; } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); area = PerceptibleReciprocal(area); for (j = 0; j <= MaxPixelChannels; j++) distortion[j] *= area; distortion[CompositePixelChannel] /= (double)GetImageChannels(image); distortion[CompositePixelChannel] = sqrt(distortion[CompositePixelChannel]); return (status); } static MagickBooleanType GetMeanAbsoluteDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { CacheView * image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status = MagickTrue; rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); area = 0.0; image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y = 0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels + 1]; register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL)) { status = MagickFalse; continue; } (void)memset(channel_distortion, 0, sizeof(channel_distortion)); for (x = 0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image, p) <= (QuantumRange / 2)) || (GetPixelReadMask(reconstruct_image, q) <= (QuantumRange / 2))) { p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); continue; } Sa = QuantumScale * GetPixelAlpha(image, p); Da = QuantumScale * GetPixelAlpha(reconstruct_image, q); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance = QuantumScale * fabs((double)p[i] - GetPixelChannel(reconstruct_image, channel, q)); else distance = QuantumScale * fabs(Sa * p[i] - Da * GetPixelChannel(reconstruct_image, channel, q)); channel_distortion[i] += distance; channel_distortion[CompositePixelChannel] += distance; } area++; p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanAbsoluteError) #endif for (j = 0; j <= MaxPixelChannels; j++) distortion[j] += channel_distortion[j]; } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); area = PerceptibleReciprocal(area); for (j = 0; j <= MaxPixelChannels; j++) distortion[j] *= area; distortion[CompositePixelChannel] /= (double)GetImageChannels(image); return (status); } static MagickBooleanType GetMeanErrorPerPixel(Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { CacheView * image_view, *reconstruct_view; MagickBooleanType status; double area, maximum_error, mean_error; size_t columns, rows; ssize_t y; status = MagickTrue; area = 0.0; maximum_error = 0.0; mean_error = 0.0; rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); for (y = 0; y < (ssize_t) rows; y++) { register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t x; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL)) { status = MagickFalse; break; } for (x = 0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image, p) <= (QuantumRange / 2)) || (GetPixelReadMask(reconstruct_image, q) <= (QuantumRange / 2))) { p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); continue; } Sa = QuantumScale * GetPixelAlpha(image, p); Da = QuantumScale * GetPixelAlpha(reconstruct_image, q); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance = fabs((double)p[i] - GetPixelChannel(reconstruct_image, channel, q)); else distance = fabs(Sa * p[i] - Da * GetPixelChannel(reconstruct_image, channel, q)); distortion[i] += distance; distortion[CompositePixelChannel] += distance; mean_error += distance * distance; if (distance > maximum_error) maximum_error = distance; area++; } p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); image->error.mean_error_per_pixel = distortion[CompositePixelChannel] / area; image->error.normalized_mean_error = QuantumScale * QuantumScale * mean_error / area; image->error.normalized_maximum_error = QuantumScale * maximum_error; return (status); } static MagickBooleanType GetMeanSquaredDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { CacheView * image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status = MagickTrue; rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); area = 0.0; image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y = 0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels + 1]; register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL)) { status = MagickFalse; continue; } (void)memset(channel_distortion, 0, sizeof(channel_distortion)); for (x = 0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image, p) <= (QuantumRange / 2)) || (GetPixelReadMask(reconstruct_image, q) <= (QuantumRange / 2))) { p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); continue; } Sa = QuantumScale * GetPixelAlpha(image, p); Da = QuantumScale * GetPixelAlpha(reconstruct_image, q); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance = QuantumScale * (p[i] - GetPixelChannel(reconstruct_image, channel, q)); else distance = QuantumScale * (Sa * p[i] - Da * GetPixelChannel(reconstruct_image, channel, q)); channel_distortion[i] += distance * distance; channel_distortion[CompositePixelChannel] += distance * distance; } area++; p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanSquaredError) #endif for (j = 0; j <= MaxPixelChannels; j++) distortion[j] += channel_distortion[j]; } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); area = PerceptibleReciprocal(area); for (j = 0; j <= MaxPixelChannels; j++) distortion[j] *= area; distortion[CompositePixelChannel] /= GetImageChannels(image); return (status); } static MagickBooleanType GetNormalizedCrossCorrelationDistortion( const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { #define SimilarityImageTag "Similarity/Image" CacheView * image_view, *reconstruct_view; ChannelStatistics * image_statistics, *reconstruct_statistics; double area; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t columns, rows; ssize_t y; /* * Normalize to account for variation due to lighting and exposure * condition. */ image_statistics = GetImageStatistics(image, exception); reconstruct_statistics = GetImageStatistics(reconstruct_image, exception); if ((image_statistics == (ChannelStatistics *) NULL) || (reconstruct_statistics == (ChannelStatistics *) NULL)) { if (image_statistics != (ChannelStatistics *) NULL) image_statistics = (ChannelStatistics *) RelinquishMagickMemory( image_statistics); if (reconstruct_statistics != (ChannelStatistics *) NULL) reconstruct_statistics = (ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); return (MagickFalse); } status = MagickTrue; progress = 0; for (i = 0; i <= MaxPixelChannels; i++) distortion[i] = 0.0; rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); area = 0.0; image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); for (y = 0; y < (ssize_t) rows; y++) { register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t x; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL)) { status = MagickFalse; break; } for (x = 0; x < (ssize_t) columns; x++) { if ((GetPixelReadMask(image, p) <= (QuantumRange / 2)) || (GetPixelReadMask(reconstruct_image, q) <= (QuantumRange / 2))) { p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); continue; } area++; p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } } area = PerceptibleReciprocal(area); for (y = 0; y < (ssize_t) rows; y++) { register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t x; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL)) { status = MagickFalse; break; } for (x = 0; x < (ssize_t) columns; x++) { double Da, Sa; if ((GetPixelReadMask(image, p) <= (QuantumRange / 2)) || (GetPixelReadMask(reconstruct_image, q) <= (QuantumRange / 2))) { p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); continue; } Sa = QuantumScale * GetPixelAlpha(image, p); Da = QuantumScale * GetPixelAlpha(reconstruct_image, q); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) { distortion[i] += area * QuantumScale * (p[i] - image_statistics[channel].mean) * (GetPixelChannel( reconstruct_image, channel, q) - reconstruct_statistics[channel].mean); } else { distortion[i] += area * QuantumScale * (Sa * p[i] - image_statistics[channel].mean) * (Da * GetPixelChannel( reconstruct_image, channel, q) - reconstruct_statistics[channel].mean); } } p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, SimilarityImageTag, progress, rows); if (proceed == MagickFalse) { status = MagickFalse; break; } } } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); /* * Divide by the standard deviation. */ distortion[CompositePixelChannel] = 0.0; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma; PixelChannel channel = GetPixelChannelChannel(image, i); gamma = image_statistics[channel].standard_deviation * reconstruct_statistics[channel].standard_deviation; gamma = PerceptibleReciprocal(gamma); distortion[i] = QuantumRange * gamma * distortion[i]; distortion[CompositePixelChannel] += distortion[i] * distortion[i]; } distortion[CompositePixelChannel] = sqrt(distortion[CompositePixelChannel] / GetImageChannels(image)); /* * Free resources. */ reconstruct_statistics = (ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); image_statistics = (ChannelStatistics *) RelinquishMagickMemory( image_statistics); return (status); } static MagickBooleanType GetPeakAbsoluteDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { CacheView * image_view, *reconstruct_view; MagickBooleanType status; size_t columns, rows; ssize_t y; status = MagickTrue; rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) #endif for (y = 0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels + 1]; register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t j, x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL)) { status = MagickFalse; continue; } (void)memset(channel_distortion, 0, sizeof(channel_distortion)); for (x = 0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image, p) <= (QuantumRange / 2)) || (GetPixelReadMask(reconstruct_image, q) <= (QuantumRange / 2))) { p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); continue; } Sa = QuantumScale * GetPixelAlpha(image, p); Da = QuantumScale * GetPixelAlpha(reconstruct_image, q); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance = QuantumScale * fabs((double)p[i] - GetPixelChannel(reconstruct_image, channel, q)); else distance = QuantumScale * fabs(Sa * p[i] - Da * GetPixelChannel(reconstruct_image, channel, q)); if (distance > channel_distortion[i]) channel_distortion[i] = distance; if (distance > channel_distortion[CompositePixelChannel]) channel_distortion[CompositePixelChannel] = distance; } p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPeakAbsoluteError) #endif for (j = 0; j <= MaxPixelChannels; j++) if (channel_distortion[j] > distortion[j]) distortion[j] = channel_distortion[j]; } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); return (status); } static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return (log10(Log10Epsilon)); return (log10(fabs(x))); } static MagickBooleanType GetPeakSignalToNoiseRatio(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { MagickBooleanType status; register ssize_t i; status = GetMeanSquaredDistortion(image, reconstruct_image, distortion, exception); for (i = 0; i <= MaxPixelChannels; i++) if (fabs(distortion[i]) < MagickEpsilon) distortion[i] = INFINITY; else distortion[i] = 10.0 * MagickLog10(1.0) - 10.0 * MagickLog10(distortion[i]); return (status); } static MagickBooleanType GetPerceptualHashDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { ChannelPerceptualHash * channel_phash, *reconstruct_phash; const char *artifact; MagickBooleanType normalize; ssize_t channel; /* * Compute perceptual hash in the sRGB colorspace. */ channel_phash = GetImagePerceptualHash(image, exception); if (channel_phash == (ChannelPerceptualHash *) NULL) return (MagickFalse); reconstruct_phash = GetImagePerceptualHash(reconstruct_image, exception); if (reconstruct_phash == (ChannelPerceptualHash *) NULL) { channel_phash = (ChannelPerceptualHash *) RelinquishMagickMemory( channel_phash); return (MagickFalse); } artifact = GetImageArtifact(image, "phash:normalize"); normalize = (artifact == (const char *)NULL) || (IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (channel = 0; channel < MaxPixelChannels; channel++) { double difference; register ssize_t i; difference = 0.0; for (i = 0; i < MaximumNumberOfImageMoments; i++) { double alpha, beta; register ssize_t j; for (j = 0; j < (ssize_t) channel_phash[0].number_colorspaces; j++) { alpha = channel_phash[channel].phash[j][i]; beta = reconstruct_phash[channel].phash[j][i]; if (normalize == MagickFalse) difference += (beta - alpha) * (beta - alpha); else difference = sqrt((beta - alpha) * (beta - alpha) / channel_phash[0].number_channels); } } distortion[channel] += difference; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPerceptualHashDistortion) #endif distortion[CompositePixelChannel] += difference; } /* * Free resources. */ reconstruct_phash = (ChannelPerceptualHash *) RelinquishMagickMemory( reconstruct_phash); channel_phash = (ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash); return (MagickTrue); } static MagickBooleanType GetRootMeanSquaredDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { MagickBooleanType status; register ssize_t i; status = GetMeanSquaredDistortion(image, reconstruct_image, distortion, exception); for (i = 0; i <= MaxPixelChannels; i++) distortion[i] = sqrt(distortion[i]); return (status); } static MagickBooleanType GetStructuralSimilarityDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { #define SSIMRadius 5.0 #define SSIMSigma 1.5 #define SSIMBlocksize 8 #define SSIMK1 0.01 #define SSIMK2 0.03 #define SSIML 1.0 CacheView * image_view, *reconstruct_view; char geometry[MagickPathExtent]; const char *artifact; double c1, c2, radius, sigma; KernelInfo * kernel_info; MagickBooleanType status; register ssize_t i; size_t columns, rows; ssize_t y; /* * Compute structural similarity index @ * https://en.wikipedia.org/wiki/Structural_similarity. */ radius = SSIMRadius; artifact = GetImageArtifact(image, "compare:ssim-radius"); if (artifact != (const char *)NULL) radius = StringToDouble(artifact, (char **)NULL); sigma = SSIMSigma; artifact = GetImageArtifact(image, "compare:ssim-sigma"); if (artifact != (const char *)NULL) sigma = StringToDouble(artifact, (char **)NULL); (void)FormatLocaleString(geometry, MagickPathExtent, "gaussian:%.20gx%.20g", radius, sigma); kernel_info = AcquireKernelInfo(geometry, exception); if (kernel_info == (KernelInfo *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); c1 = pow(SSIMK1 * SSIML, 2.0); artifact = GetImageArtifact(image, "compare:ssim-k1"); if (artifact != (const char *)NULL) c1 = pow(StringToDouble(artifact, (char **)NULL) * SSIML, 2.0); c2 = pow(SSIMK2 * SSIML, 2.0); artifact = GetImageArtifact(image, "compare:ssim-k2"); if (artifact != (const char *)NULL) c2 = pow(StringToDouble(artifact, (char **)NULL) * SSIML, 2.0); status = MagickTrue; rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,reconstruct_image,rows,1) #endif for (y = 0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels + 1]; register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, -((ssize_t) kernel_info->width / 2L), y - ((ssize_t) kernel_info->height / 2L), columns + kernel_info->width, kernel_info->height, exception); q = GetCacheViewVirtualPixels(reconstruct_view, -((ssize_t) kernel_info->width / 2L), y - ((ssize_t) kernel_info->height / 2L), columns + kernel_info->width, kernel_info->height, exception); if ((p == (const Quantum *)NULL) || (q == (const Quantum *)NULL)) { status = MagickFalse; continue; } (void)memset(channel_distortion, 0, sizeof(channel_distortion)); for (x = 0; x < (ssize_t) columns; x++) { double x_pixel_mu[MaxPixelChannels + 1], x_pixel_sigma_squared[MaxPixelChannels + 1], xy_sigma[MaxPixelChannels + 1], y_pixel_mu[MaxPixelChannels + 1], y_pixel_sigma_squared[MaxPixelChannels + 1]; register const Quantum * magick_restrict reference, *magick_restrict target; register MagickRealType * k; ssize_t v; (void)memset(x_pixel_mu, 0, sizeof(x_pixel_mu)); (void)memset(x_pixel_sigma_squared, 0, sizeof(x_pixel_sigma_squared)); (void)memset(xy_sigma, 0, sizeof(xy_sigma)); (void)memset(x_pixel_sigma_squared, 0, sizeof(y_pixel_sigma_squared)); (void)memset(y_pixel_mu, 0, sizeof(y_pixel_mu)); (void)memset(y_pixel_sigma_squared, 0, sizeof(y_pixel_sigma_squared)); k = kernel_info->values; reference = p; target = q; for (v = 0; v < (ssize_t) kernel_info->height; v++) { register ssize_t u; for (u = 0; u < (ssize_t) kernel_info->width; u++) { for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double x_pixel, y_pixel; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits( reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; x_pixel = QuantumScale * reference[i]; x_pixel_mu[i] += (*k) * x_pixel; x_pixel_sigma_squared[i] += (*k) * x_pixel * x_pixel; y_pixel = QuantumScale * GetPixelChannel(reconstruct_image, channel, target); y_pixel_mu[i] += (*k) * y_pixel; y_pixel_sigma_squared[i] += (*k) * y_pixel * y_pixel; xy_sigma[i] += (*k) * x_pixel * y_pixel; } k++; reference += GetPixelChannels(image); target += GetPixelChannels(reconstruct_image); } reference += GetPixelChannels(image) * columns; target += GetPixelChannels(reconstruct_image) * columns; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double ssim, x_pixel_mu_squared, x_pixel_sigmas_squared, xy_mu, xy_sigmas, y_pixel_mu_squared, y_pixel_sigmas_squared; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits( reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; x_pixel_mu_squared = x_pixel_mu[i] * x_pixel_mu[i]; y_pixel_mu_squared = y_pixel_mu[i] * y_pixel_mu[i]; xy_mu = x_pixel_mu[i] * y_pixel_mu[i]; xy_sigmas = xy_sigma[i] - xy_mu; x_pixel_sigmas_squared = x_pixel_sigma_squared[i] - x_pixel_mu_squared; y_pixel_sigmas_squared = y_pixel_sigma_squared[i] - y_pixel_mu_squared; ssim = ((2.0 * xy_mu + c1) * (2.0 * xy_sigmas + c2)) / ((x_pixel_mu_squared + y_pixel_mu_squared + c1) * (x_pixel_sigmas_squared + y_pixel_sigmas_squared + c2)); channel_distortion[i] += ssim; channel_distortion[CompositePixelChannel] += ssim; } p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetStructuralSimilarityDistortion) #endif for (i = 0; i <= MaxPixelChannels; i++) distortion[i] += channel_distortion[i]; } image_view = DestroyCacheView(image_view); reconstruct_view = DestroyCacheView(reconstruct_view); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; distortion[i] /= ((double)columns * rows); } distortion[CompositePixelChannel] /= ((double)columns * rows); distortion[CompositePixelChannel] /= (double)GetImageChannels(image); kernel_info = DestroyKernelInfo(kernel_info); return (status); } static MagickBooleanType GetStructuralDisimilarityDistortion(const Image * image, const Image * reconstruct_image, double *distortion, ExceptionInfo * exception) { MagickBooleanType status; register ssize_t i; status = GetStructuralSimilarityDistortion(image, reconstruct_image, distortion, exception); for (i = 0; i <= MaxPixelChannels; i++) distortion[i] = (1.0 - (distortion[i])) / 2.0; return (status); } MagickExport MagickBooleanType GetImageDistortion(Image * image, const Image * reconstruct_image, const MetricType metric, double *distortion, ExceptionInfo * exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(reconstruct_image != (const Image *)NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *)NULL); *distortion = 0.0; if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); /* * Get image distortion. */ length = MaxPixelChannels + 1; channel_distortion = (double *)AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *)NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); (void)memset(channel_distortion, 0, length * sizeof(*channel_distortion)); switch (metric) { case AbsoluteErrorMetric: { status = GetAbsoluteDistortion(image, reconstruct_image, channel_distortion, exception); break; } case FuzzErrorMetric: { status = GetFuzzDistortion(image, reconstruct_image, channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status = GetMeanAbsoluteDistortion(image, reconstruct_image, channel_distortion, exception); break; } case MeanErrorPerPixelErrorMetric: { status = GetMeanErrorPerPixel(image, reconstruct_image, channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status = GetMeanSquaredDistortion(image, reconstruct_image, channel_distortion, exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status = GetNormalizedCrossCorrelationDistortion(image, reconstruct_image, channel_distortion, exception); break; } case PeakAbsoluteErrorMetric: { status = GetPeakAbsoluteDistortion(image, reconstruct_image, channel_distortion, exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status = GetPeakSignalToNoiseRatio(image, reconstruct_image, channel_distortion, exception); break; } case PerceptualHashErrorMetric: { status = GetPerceptualHashDistortion(image, reconstruct_image, channel_distortion, exception); break; } case RootMeanSquaredErrorMetric: { status = GetRootMeanSquaredDistortion(image, reconstruct_image, channel_distortion, exception); break; } case StructuralSimilarityErrorMetric: { status = GetStructuralSimilarityDistortion(image, reconstruct_image, channel_distortion, exception); break; } case StructuralDissimilarityErrorMetric: { status = GetStructuralDisimilarityDistortion(image, reconstruct_image, channel_distortion, exception); break; } } *distortion = channel_distortion[CompositePixelChannel]; channel_distortion = (double *)RelinquishMagickMemory(channel_distortion); (void)FormatImageProperty(image, "distortion", "%.*g", GetMagickPrecision(), *distortion); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e D i s t o r t i o n s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageDistortions() compares the pixel channels of an image to * a % reconstructed image and returns the specified distortion metric for * each % channel. % % The format of the GetImageDistortions method is: % % * double *GetImageDistortions(const Image *image, % const Image * *reconstruct_image,const MetricType metric, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o reconstruct_image: the reconstruct image. % % o * metric: the metric. % % o exception: return any errors or warnings in * this structure. % */ MagickExport double * GetImageDistortions(Image * image, const Image * reconstruct_image, const MetricType metric, ExceptionInfo * exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(reconstruct_image != (const Image *)NULL); assert(reconstruct_image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); /* * Get image distortion. */ length = MaxPixelChannels + 1UL; channel_distortion = (double *)AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *)NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); (void)memset(channel_distortion, 0, length * sizeof(*channel_distortion)); status = MagickTrue; switch (metric) { case AbsoluteErrorMetric: { status = GetAbsoluteDistortion(image, reconstruct_image, channel_distortion, exception); break; } case FuzzErrorMetric: { status = GetFuzzDistortion(image, reconstruct_image, channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status = GetMeanAbsoluteDistortion(image, reconstruct_image, channel_distortion, exception); break; } case MeanErrorPerPixelErrorMetric: { status = GetMeanErrorPerPixel(image, reconstruct_image, channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status = GetMeanSquaredDistortion(image, reconstruct_image, channel_distortion, exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status = GetNormalizedCrossCorrelationDistortion(image, reconstruct_image, channel_distortion, exception); break; } case PeakAbsoluteErrorMetric: { status = GetPeakAbsoluteDistortion(image, reconstruct_image, channel_distortion, exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status = GetPeakSignalToNoiseRatio(image, reconstruct_image, channel_distortion, exception); break; } case PerceptualHashErrorMetric: { status = GetRootMeanSquaredDistortion(image, reconstruct_image, channel_distortion, exception); break; } case RootMeanSquaredErrorMetric: { status = GetRootMeanSquaredDistortion(image, reconstruct_image, channel_distortion, exception); break; } case StructuralSimilarityErrorMetric: { status = GetStructuralSimilarityDistortion(image, reconstruct_image, channel_distortion, exception); break; } case StructuralDissimilarityErrorMetric: { status = GetStructuralDisimilarityDistortion(image, reconstruct_image, channel_distortion, exception); break; } } if (status == MagickFalse) { channel_distortion = (double *)RelinquishMagickMemory(channel_distortion); return ((double *)NULL); } return (channel_distortion); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I s I m a g e s E q u a l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IsImagesEqual() compare the pixels of two images and returns * immediately % if any pixel is not identical. % % The format of the * IsImagesEqual method is: % % MagickBooleanType IsImagesEqual(const * Image *image, % const Image *reconstruct_image,ExceptionInfo * *exception) % % A description of each parameter follows. % % o image: * the image. % % o reconstruct_image: the reconstruct image. % % o * exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImagesEqual(const Image * image, const Image * reconstruct_image, ExceptionInfo * exception) { CacheView * image_view, *reconstruct_view; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *)NULL); assert(reconstruct_image->signature == MagickCoreSignature); rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); for (y = 0; y < (ssize_t) rows; y++) { register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t x; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (Quantum *) NULL)) break; for (x = 0; x < (ssize_t) columns; x++) { register ssize_t i; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance = fabs(p[i] - (double)GetPixelChannel(reconstruct_image, channel, q)); if (distance >= MagickEpsilon) break; } if (i < (ssize_t) GetPixelChannels(image)) break; p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } if (x < (ssize_t) columns) break; } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); return (y < (ssize_t) rows ? MagickFalse : MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e C o l o r M e t r i c * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageColorMetric() measures the difference between colors at * each pixel % location of two images. A value other than 0 means the * colors match % exactly. Otherwise an error measure is computed by * summing over all % pixels in an image the distance squared in RGB space * between each image % pixel and its corresponding pixel in the reconstruct * image. The error % measure is assigned to these image members: % % o * mean_error_per_pixel: The mean error for any single pixel in % the * image. % % o normalized_mean_error: The normalized mean quantization * error for % any single pixel in the image. This distance measure is * normalized to % a range between 0 and 1. It is independent of the * range of red, green, % and blue values in the image. % % o * normalized_maximum_error: The normalized maximum quantization % * error for any single pixel in the image. This distance measure is % * normalized to a range between 0 and 1. It is independent of the range % * of red, green, and blue values in your image. % % A small normalized mean * square error, accessed as % image->normalized_mean_error, suggests the * images are very similar in % spatial layout and color. % % The format of * the SetImageColorMetric method is: % % MagickBooleanType * SetImageColorMetric(Image *image, % const Image * *reconstruct_image,ExceptionInfo *exception) % % A description of each * parameter follows. % % o image: the image. % % o reconstruct_image: * the reconstruct image. % % o exception: return any errors or warnings * in this structure. % */ MagickExport MagickBooleanType SetImageColorMetric(Image * image, const Image * reconstruct_image, ExceptionInfo * exception) { CacheView * image_view, *reconstruct_view; double area, maximum_error, mean_error, mean_error_per_pixel; MagickBooleanType status; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *)NULL); assert(reconstruct_image->signature == MagickCoreSignature); area = 0.0; maximum_error = 0.0; mean_error_per_pixel = 0.0; mean_error = 0.0; rows = MagickMax(image->rows, reconstruct_image->rows); columns = MagickMax(image->columns, reconstruct_image->columns); image_view = AcquireVirtualCacheView(image, exception); reconstruct_view = AcquireVirtualCacheView(reconstruct_image, exception); for (y = 0; y < (ssize_t) rows; y++) { register const Quantum * magick_restrict p, *magick_restrict q; register ssize_t x; p = GetCacheViewVirtualPixels(image_view, 0, y, columns, 1, exception); q = GetCacheViewVirtualPixels(reconstruct_view, 0, y, columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (Quantum *) NULL)) break; for (x = 0; x < (ssize_t) columns; x++) { register ssize_t i; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance = fabs(p[i] - (double)GetPixelChannel(reconstruct_image, channel, q)); if (distance >= MagickEpsilon) { mean_error_per_pixel += distance; mean_error += distance * distance; if (distance > maximum_error) maximum_error = distance; } area++; } p += GetPixelChannels(image); q += GetPixelChannels(reconstruct_image); } } reconstruct_view = DestroyCacheView(reconstruct_view); image_view = DestroyCacheView(image_view); image->error.mean_error_per_pixel = (double)(mean_error_per_pixel / area); image->error.normalized_mean_error = (double)(QuantumScale * QuantumScale * mean_error / area); image->error.normalized_maximum_error = (double)(QuantumScale * maximum_error); status = image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse; return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S i m i l a r i t y I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SimilarityImage() compares the reference image of the image and * returns the % best match offset. In addition, it returns a similarity * image such that an % exact match location is completely white and if none * of the pixels match, % black, otherwise some gray level in-between. % % * The format of the SimilarityImageImage method is: % % Image * *SimilarityImage(const Image *image,const Image *reference, % const * MetricType metric,const double similarity_threshold, % * RectangleInfo *offset,double *similarity,ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * reference: find an area of the image that closely resembles this image. % * % o metric: the metric. % % o similarity_threshold: minimum * distortion for (sub)image match. % % o offset: the best match offset of * the reference image within the image. % % o similarity: the computed * similarity between the images. % % o exception: return any errors or * warnings in this structure. % */ static double GetSimilarityMetric(const Image * image, const Image * reference, const MetricType metric, const ssize_t x_offset, const ssize_t y_offset, ExceptionInfo * exception) { double distortion; Image * similarity_image; MagickBooleanType status; RectangleInfo geometry; SetGeometry(reference, &geometry); geometry.x = x_offset; geometry.y = y_offset; similarity_image = CropImage(image, &geometry, exception); if (similarity_image == (Image *) NULL) return (0.0); distortion = 0.0; status = GetImageDistortion(similarity_image, reference, metric, &distortion, exception); similarity_image = DestroyImage(similarity_image); if (status == MagickFalse) return (0.0); return (distortion); } MagickExport Image * SimilarityImage(const Image * image, const Image * reference, const MetricType metric, const double similarity_threshold, RectangleInfo * offset, double *similarity_metric, ExceptionInfo * exception) { #define SimilarityImageTag "Similarity/Image" CacheView * similarity_view; Image * similarity_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *)NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(offset != (RectangleInfo *) NULL); SetGeometry(reference, offset); *similarity_metric = MagickMaximumValue; similarity_image = CloneImage(image, image->columns - reference->columns + 1, image->rows - reference->rows + 1, MagickTrue, exception); if (similarity_image == (Image *) NULL) return ((Image *) NULL); status = SetImageStorageClass(similarity_image, DirectClass, exception); if (status == MagickFalse) { similarity_image = DestroyImage(similarity_image); return ((Image *) NULL); } (void)SetImageAlphaChannel(similarity_image, DeactivateAlphaChannel, exception); /* * Measure similarity of reference image against image. */ status = MagickTrue; progress = 0; similarity_view = AcquireAuthenticCacheView(similarity_image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ shared(progress,status,similarity_metric) \ magick_number_threads(image,image,image->rows-reference->rows+1,1) #endif for (y = 0; y < (ssize_t) (image->rows - reference->rows + 1); y++) { double similarity; register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) continue; q = GetCacheViewAuthenticPixels(similarity_view, 0, y, similarity_image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) (image->columns - reference->columns + 1); x++) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) break; similarity = GetSimilarityMetric(image, reference, metric, x, y, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif if ((metric == NormalizedCrossCorrelationErrorMetric) || (metric == UndefinedErrorMetric)) similarity = 1.0 - similarity; if (similarity < *similarity_metric) { offset->x = x; offset->y = y; *similarity_metric = similarity; } if (metric == PerceptualHashErrorMetric) similarity = MagickMin(0.01 * similarity, 1.0); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait similarity_traits = GetPixelChannelTraits(similarity_image, channel); if ((traits == UndefinedPixelTrait) || (similarity_traits == UndefinedPixelTrait) || ((similarity_traits & UpdatePixelTrait) == 0)) continue; SetPixelChannel(similarity_image, channel, ClampToQuantum(QuantumRange - QuantumRange * similarity), q); } q += GetPixelChannels(similarity_image); } if (SyncCacheViewAuthenticPixels(similarity_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, SimilarityImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } similarity_view = DestroyCacheView(similarity_view); if (status == MagickFalse) similarity_image = DestroyImage(similarity_image); return (similarity_image); }
GB_subassign_04.c
//------------------------------------------------------------------------------ // GB_subassign_04: C(I,J) += A ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 04: C(I,J) += A ; using S // M: NULL // Mask_comp: false // C_replace: false // accum: present // A: matrix // S: constructed // C: not bitmap: use GB_bitmap_assign instead // A: any sparsity structure. #include "GB_subassign_methods.h" GrB_Info GB_subassign_04 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_BinaryOp accum, const GrB_Matrix A, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_MATRIX_WAIT_IF_JUMBLED (A) ; GB_GET_C ; // C must not be bitmap GB_GET_A ; GB_GET_S ; GB_GET_ACCUM ; //-------------------------------------------------------------------------- // Method 04: C(I,J) += A ; using S //-------------------------------------------------------------------------- // Time: Close to Optimal. Every entry in A must be visited, and the // corresponding entry in S must then be found. Time for this phase is // Omega(nnz(A)), but S has already been constructed, in Omega(nnz(S)) // time. This method simply traverses all of A+S (like GB_add for // computing A+S), the same as Method 02. Time taken is O(nnz(A)+nnz(S)). // The only difference is that the traversal of A+S can terminate if A is // exhausted. Entries in S but not A do not actually require any work // (unlike Method 02, which must visit all entries in A+S). // Method 02 and Method 04 are somewhat similar. They differ on how C is // modified when the entry is present in S but not A. // TODO: phase2 of Method 02 and 04 are identical and could be // done in a single function. // Compare with Method 16, which computes C(I,J)<!M> += A, using S. //-------------------------------------------------------------------------- // Parallel: A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- if (A_is_bitmap) { // all of IxJ must be examined GB_SUBASSIGN_IXJ_SLICE ; } else { // traverse all A+S GB_SUBASSIGN_TWO_SLICE (A, S) ; } //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- if (A_is_bitmap) { //---------------------------------------------------------------------- // phase1: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (Sfound && !Afound) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( C ): no change, with accum // [X . 1]: action: ( X ): still a zombie GB_NEXT (S) ; } else if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; } else if (Sfound && Afound) { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_matrix ; GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase1: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE1 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( C ): no change, with accum // [X . 1]: action: ( X ): still a zombie GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; GB_NEXT (A) ; } else { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_matrix ; GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // List A (:,j) has entries. List S (:,j) exhausted. task_pending += (pA_end - pA) ; } GB_PHASE1_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; if (A_is_bitmap) { //---------------------------------------------------------------------- // phase2: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } else if (Sfound) { // S (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase2: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE2 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } else { GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // while list A (:,j) has entries. List S (:,j) exhausted. while (pA < pA_end) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iA = GBI (Ai, pA, Avlen) ; int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } } GB_PHASE2_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
//------------------------------------------------------------------------------ // GB_subassign_04: C(I,J) += A ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 04: C(I,J) += A ; using S // M: NULL // Mask_comp: false // C_replace: false // accum: present // A: matrix // S: constructed // C: not bitmap: use GB_bitmap_assign instead // A: any sparsity structure. #include "GB_subassign_methods.h" GrB_Info GB_subassign_04 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_BinaryOp accum, const GrB_Matrix A, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_MATRIX_WAIT_IF_JUMBLED (A) ; GB_GET_C ; // C must not be bitmap GB_GET_A ; GB_GET_S ; GB_GET_ACCUM ; //-------------------------------------------------------------------------- // Method 04: C(I,J) += A ; using S //-------------------------------------------------------------------------- // Time: Close to Optimal. Every entry in A must be visited, and the // corresponding entry in S must then be found. Time for this phase is // Omega(nnz(A)), but S has already been constructed, in Omega(nnz(S)) // time. This method simply traverses all of A+S (like GB_add for // computing A+S), the same as Method 02. Time taken is O(nnz(A)+nnz(S)). // The only difference is that the traversal of A+S can terminate if A is // exhausted. Entries in S but not A do not actually require any work // (unlike Method 02, which must visit all entries in A+S). // Method 02 and Method 04 are somewhat similar. They differ on how C is // modified when the entry is present in S but not A. // TODO: phase2 of Method 02 and 04 are identical and could be // done in a single function. // Compare with Method 16, which computes C(I,J)<!M> += A, using S. //-------------------------------------------------------------------------- // Parallel: A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- if (A_is_bitmap) { // all of IxJ must be examined GB_SUBASSIGN_IXJ_SLICE ; } else { // traverse all A+S GB_SUBASSIGN_TWO_SLICE (A, S) ; } //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- if (A_is_bitmap) { //---------------------------------------------------------------------- // phase1: A is bitmap //---------------------------------------------------------------------- reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (Sfound && !Afound) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( C ): no change, with accum // [X . 1]: action: ( X ): still a zombie GB_NEXT (S) ; } else if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; } else if (Sfound && Afound) { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_matrix ; GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase1: A is hypersparse, sparse, or full //---------------------------------------------------------------------- reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE1 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( C ): no change, with accum // [X . 1]: action: ( X ): still a zombie GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; GB_NEXT (A) ; } else { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_matrix ; GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // List A (:,j) has entries. List S (:,j) exhausted. task_pending += (pA_end - pA) ; } GB_PHASE1_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; if (A_is_bitmap) { //---------------------------------------------------------------------- // phase2: A is bitmap //---------------------------------------------------------------------- reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } else if (Sfound) { // S (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase2: A is hypersparse, sparse, or full //---------------------------------------------------------------------- reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE2 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } else { GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // while list A (:,j) has entries. List S (:,j) exhausted. while (pA < pA_end) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iA = GBI (Ai, pA, Avlen) ; int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } } GB_PHASE2_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
//------------------------------------------------------------------------------ // GB_subassign_04: C(I,J) += A ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 04: C(I,J) += A ; using S // M: NULL // Mask_comp: false // C_replace: false // accum: present // A: matrix // S: constructed // C: not bitmap: use GB_bitmap_assign instead // A: any sparsity structure. #include "GB_subassign_methods.h" GrB_Info GB_subassign_04 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_BinaryOp accum, const GrB_Matrix A, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_MATRIX_WAIT_IF_JUMBLED (A) ; GB_GET_C ; // C must not be bitmap GB_GET_A ; GB_GET_S ; GB_GET_ACCUM ; //-------------------------------------------------------------------------- // Method 04: C(I,J) += A ; using S //-------------------------------------------------------------------------- // Time: Close to Optimal. Every entry in A must be visited, and the // corresponding entry in S must then be found. Time for this phase is // Omega(nnz(A)), but S has already been constructed, in Omega(nnz(S)) // time. This method simply traverses all of A+S (like GB_add for // computing A+S), the same as Method 02. Time taken is O(nnz(A)+nnz(S)). // The only difference is that the traversal of A+S can terminate if A is // exhausted. Entries in S but not A do not actually require any work // (unlike Method 02, which must visit all entries in A+S). // Method 02 and Method 04 are somewhat similar. They differ on how C is // modified when the entry is present in S but not A. // TODO: phase2 of Method 02 and 04 are identical and could be // done in a single function. // Compare with Method 16, which computes C(I,J)<!M> += A, using S. //-------------------------------------------------------------------------- // Parallel: A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- if (A_is_bitmap) { // all of IxJ must be examined GB_SUBASSIGN_IXJ_SLICE ; } else { // traverse all A+S GB_SUBASSIGN_TWO_SLICE (A, S) ; } //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- if (A_is_bitmap) { //---------------------------------------------------------------------- // phase1: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (Sfound && !Afound) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( C ): no change, with accum // [X . 1]: action: ( X ): still a zombie GB_NEXT (S) ; } else if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; } else if (Sfound && Afound) { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_matrix ; GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase1: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE1 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( C ): no change, with accum // [X . 1]: action: ( X ): still a zombie GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; GB_NEXT (A) ; } else { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_matrix ; GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // List A (:,j) has entries. List S (:,j) exhausted. task_pending += (pA_end - pA) ; } GB_PHASE1_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; if (A_is_bitmap) { //---------------------------------------------------------------------- // phase2: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } else if (Sfound) { // S (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase2: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE2 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } else { GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // while list A (:,j) has entries. List S (:,j) exhausted. while (pA < pA_end) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iA = GBI (Ai, pA, Avlen) ; int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } } GB_PHASE2_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
GB_binop__isgt_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isgt_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__isgt_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__isgt_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__isgt_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_uint64) // A*D function (colscale): GB (_AxD__isgt_uint64) // D*A function (rowscale): GB (_DxB__isgt_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__isgt_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__isgt_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_uint64) // C=scalar+B GB (_bind1st__isgt_uint64) // C=scalar+B' GB (_bind1st_tran__isgt_uint64) // C=A+scalar GB (_bind2nd__isgt_uint64) // C=A'+scalar GB (_bind2nd_tran__isgt_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_UINT64 || GxB_NO_ISGT_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isgt_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isgt_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isgt_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isgt_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isgt_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isgt_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__isgt_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isgt_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__isgt_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__isgt_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__isgt_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_uint64) // A*D function (colscale): GB (_AxD__isgt_uint64) // D*A function (rowscale): GB (_DxB__isgt_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__isgt_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__isgt_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_uint64) // C=scalar+B GB (_bind1st__isgt_uint64) // C=scalar+B' GB (_bind1st_tran__isgt_uint64) // C=A+scalar GB (_bind2nd__isgt_uint64) // C=A'+scalar GB (_bind2nd_tran__isgt_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_UINT64 || GxB_NO_ISGT_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isgt_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isgt_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isgt_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isgt_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isgt_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isgt_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__isgt_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isgt_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__isgt_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__isgt_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__isgt_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_uint64) // A*D function (colscale): GB (_AxD__isgt_uint64) // D*A function (rowscale): GB (_DxB__isgt_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__isgt_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__isgt_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_uint64) // C=scalar+B GB (_bind1st__isgt_uint64) // C=scalar+B' GB (_bind1st_tran__isgt_uint64) // C=A+scalar GB (_bind2nd__isgt_uint64) // C=A'+scalar GB (_bind2nd_tran__isgt_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_UINT64 || GxB_NO_ISGT_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isgt_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isgt_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isgt_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isgt_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isgt_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isgt_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__isgt_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__isgt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
scheduled-clauseModificado4.c
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif int main(int argc, char **argv) { int i, n = 16,chunk, a[n],suma=0; int modifier; omp_sched_t kind; if(argc < 2) { fprintf(stderr,"\nFalta chunk \n"); exit(-1); } chunk = atoi(argv[1]); for (i=0; i<n; i++) a[i] = i; #pragma omp parallel { #pragma omp for firstprivate(suma) \ lastprivate(suma) schedule(dynamic,chunk) for (i=0; i<n; i++) { suma = suma + a[i]; printf(" thread %d suma a[%d] suma=%d \n", omp_get_thread_num(),i,suma); } #pragma omp single { printf("omp_get_num_threads: %d \n",omp_get_num_threads()); printf("omp_get_num_procs: %d \n",omp_get_num_procs()); printf("omp_in_parallel: %d \n",omp_in_parallel()); } } printf("Fuera de 'parallel for' suma=%d\n",suma); printf("Fuera de la región paralell: \n"); printf("omp_get_num_threads: %d \n",omp_get_num_threads()); printf("omp_get_num_procs: %d \n",omp_get_num_procs()); printf("omo_in_parallel: %d",omp_in_parallel()); }
#include <stdio.h> #include <stdlib.h> int main(int argc, char **argv) { int i, n = 16, chunk, a[n], suma = 0; int modifier; omp_sched_t kind; if (argc < 2) { fprintf(stderr, "\nFalta chunk \n"); exit(-1); } chunk = atoi(argv[1]); for (i = 0; i < n; i++) a[i] = i; lastprivate(suma) schedule(dynamic, chunk) for (i = 0; i < n; i++) { suma = suma + a[i]; printf(" thread %d suma a[%d] suma=%d \n", omp_get_thread_num(), i, suma); } printf("omp_get_num_threads: %d \n", omp_get_num_threads()); printf("omp_get_num_procs: %d \n", omp_get_num_procs()); printf("omp_in_parallel: %d \n", omp_in_parallel()); printf("Fuera de 'parallel for' suma=%d\n", suma); printf("Fuera de la región paralell: \n"); printf("omp_get_num_threads: %d \n", omp_get_num_threads()); printf("omp_get_num_procs: %d \n", omp_get_num_procs()); printf("omo_in_parallel: %d", omp_in_parallel()); }
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif int main(int argc, char **argv) { int i, n = 16, chunk, a[n], suma = 0; int modifier; omp_sched_t kind; if (argc < 2) { fprintf(stderr, "\nFalta chunk \n"); exit(-1); } chunk = atoi(argv[1]); for (i = 0; i < n; i++) a[i] = i; #pragma omp parallel { #pragma omp for firstprivate(suma) \ lastprivate(suma) schedule(dynamic,chunk) for (i = 0; i < n; i++) { suma = suma + a[i]; printf(" thread %d suma a[%d] suma=%d \n", omp_get_thread_num(), i, suma); } #pragma omp single { printf("omp_get_num_threads: %d \n", omp_get_num_threads()); printf("omp_get_num_procs: %d \n", omp_get_num_procs()); printf("omp_in_parallel: %d \n", omp_in_parallel()); } } printf("Fuera de 'parallel for' suma=%d\n", suma); printf("Fuera de la región paralell: \n"); printf("omp_get_num_threads: %d \n", omp_get_num_threads()); printf("omp_get_num_procs: %d \n", omp_get_num_procs()); printf("omo_in_parallel: %d", omp_in_parallel()); }
GB_binop__min_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__min_fp64 // A.*B function (eWiseMult): GB_AemultB__min_fp64 // A*D function (colscale): GB_AxD__min_fp64 // D*A function (rowscale): GB_DxB__min_fp64 // C+=B function (dense accum): GB_Cdense_accumB__min_fp64 // C+=b function (dense accum): GB_Cdense_accumb__min_fp64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__min_fp64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__min_fp64 // C=scalar+B GB_bind1st__min_fp64 // C=scalar+B' GB_bind1st_tran__min_fp64 // C=A+scalar GB_bind2nd__min_fp64 // C=A'+scalar GB_bind2nd_tran__min_fp64 // C type: double // A type: double // B,b type: double // BinaryOp: cij = fmin (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = fmin (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_FP64 || GxB_NO_MIN_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__min_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__min_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__min_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__min_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__min_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__min_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = fmin (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__min_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = fmin (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmin (x, aij) ; \ } GrB_Info GB_bind1st_tran__min_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmin (aij, y) ; \ } GrB_Info GB_bind2nd_tran__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__min_fp64 // A.*B function (eWiseMult): GB_AemultB__min_fp64 // A*D function (colscale): GB_AxD__min_fp64 // D*A function (rowscale): GB_DxB__min_fp64 // C+=B function (dense accum): GB_Cdense_accumB__min_fp64 // C+=b function (dense accum): GB_Cdense_accumb__min_fp64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__min_fp64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__min_fp64 // C=scalar+B GB_bind1st__min_fp64 // C=scalar+B' GB_bind1st_tran__min_fp64 // C=A+scalar GB_bind2nd__min_fp64 // C=A'+scalar GB_bind2nd_tran__min_fp64 // C type: double // A type: double // B,b type: double // BinaryOp: cij = fmin (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = fmin (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_FP64 || GxB_NO_MIN_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__min_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__min_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__min_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__min_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__min_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__min_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = fmin (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__min_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = fmin (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmin (x, aij) ; \ } GrB_Info GB_bind1st_tran__min_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmin (aij, y) ; \ } GrB_Info GB_bind2nd_tran__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__min_fp64 // A.*B function (eWiseMult): GB_AemultB__min_fp64 // A*D function (colscale): GB_AxD__min_fp64 // D*A function (rowscale): GB_DxB__min_fp64 // C+=B function (dense accum): GB_Cdense_accumB__min_fp64 // C+=b function (dense accum): GB_Cdense_accumb__min_fp64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__min_fp64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__min_fp64 // C=scalar+B GB_bind1st__min_fp64 // C=scalar+B' GB_bind1st_tran__min_fp64 // C=A+scalar GB_bind2nd__min_fp64 // C=A'+scalar GB_bind2nd_tran__min_fp64 // C type: double // A type: double // B,b type: double // BinaryOp: cij = fmin (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = fmin (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_FP64 || GxB_NO_MIN_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__min_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__min_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__min_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__min_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__min_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__min_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = fmin (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__min_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = fmin (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmin (x, aij) ; \ } GrB_Info GB_bind1st_tran__min_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmin (aij, y) ; \ } GrB_Info GB_bind2nd_tran__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__ainv_int8_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__ainv_int8_int8 // op(A') function: GB_unop_tran__ainv_int8_int8 // C type: int8_t // A type: int8_t // cast: int8_t cij = aij // unaryop: cij = -aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ int8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = aij ; \ Cx [pC] = -z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__ainv_int8_int8 ( int8_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = -z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__ainv_int8_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__ainv_int8_int8 // op(A') function: GB_unop_tran__ainv_int8_int8 // C type: int8_t // A type: int8_t // cast: int8_t cij = aij // unaryop: cij = -aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ int8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = aij ; \ Cx [pC] = -z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__ainv_int8_int8 ( int8_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = -z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__ainv_int8_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__ainv_int8_int8 // op(A') function: GB_unop_tran__ainv_int8_int8 // C type: int8_t // A type: int8_t // cast: int8_t cij = aij // unaryop: cij = -aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ int8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = aij ; \ Cx [pC] = -z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__ainv_int8_int8 ( int8_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = -z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__ainv_int8_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matmult.c
/****************************************************************************** * OpenMp Example - Matrix Multiply - C Version * Demonstrates a matrix multiply using OpenMP. * * Modified from here: * https://computing.llnl.gov/tutorials/openMP/samples/C/omp_mm.c * * For PAPI_FP_INS, the exclusive count for the event: * for (null) [OpenMP location: file:matmult.c ] * should be 2E+06 / Number of Threads ******************************************************************************/ #include <stdio.h> #include <stdlib.h> #ifdef TAU_MPI int provided; #include <mpi.h> /* NOTE: MPI is just used to spawn multiple copies of the kernel to different ranks. This is not a parallel implementation */ #endif /* TAU_MPI */ #ifdef PTHREADS #include <pthread.h> #include <unistd.h> #include <errno.h> /*** NOTE THE ATTR INITIALIZER HERE! ***/ pthread_mutex_t mutexsum; #endif /* PTHREADS */ #define APP_USE_INLINE_MULTIPLY 1 #ifndef MATRIX_SIZE #define MATRIX_SIZE 512 #endif #define NRA MATRIX_SIZE /* number of rows in matrix A */ #define NCA MATRIX_SIZE /* number of columns in matrix A */ #define NCB MATRIX_SIZE /* number of columns in matrix B */ void initialize(double **matrix, int rows, int cols) { int i,j; #pragma omp parallel private(i,j) shared(matrix) { //set_num_threads(); /*** Initialize matrices ***/ #pragma omp for nowait for (i=0; i<rows; i++) { for (j=0; j<cols; j++) { matrix[i][j]= i+j; } } } } double** allocateMatrix(int rows, int cols) { int i; double **matrix = (double**)malloc((sizeof(double*)) * rows); for (i=0; i<rows; i++) { matrix[i] = (double*)malloc((sizeof(double)) * cols); } return matrix; } void freeMatrix(double** matrix, int rows, int cols) { int i; for (i=0; i<rows; i++) { free(matrix[i]); } free(matrix); } double multiply(double a, double b) { return a * b; } #ifdef TAU_OPENMP // cols_a and rows_b are the same value void compute_nested(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i,j,k; double tmp = 0.0; /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp parallel for private(i,j,k) shared(a,b,c) for (i=0; i<rows_a; i++) { { for (k=0; k<cols_a; k++) { for(j=0; j<cols_b; j++) { #ifdef APP_USE_INLINE_MULTIPLY c[i][j] += multiply(a[i][k], b[k][j]); #else tmp = a[i][k]; tmp = tmp * b[k][j]; c[i][j] += tmp; #endif } } } } } #endif // cols_a and rows_b are the same value void compute(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i,j,k; #pragma omp parallel private(i,j,k) shared(a,b,c) { /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp for nowait for (i=0; i<rows_a; i++) { for(j=0; j<cols_b; j++) { for (k=0; k<cols_a; k++) { #ifdef APP_USE_INLINE_MULTIPLY c[i][j] += multiply(a[i][k], b[k][j]); #else /* APP_USE_INLINE_MULTIPLY */ c[i][j] += a[i][k] * b[k][j]; #endif /* APP_USE_INLINE_MULTIPLY */ } } } } /*** End of parallel region ***/ } void compute_interchange(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i,j,k; #pragma omp parallel private(i,j,k) shared(a,b,c) { /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp for nowait for (i=0; i<rows_a; i++) { for (k=0; k<cols_a; k++) { for(j=0; j<cols_b; j++) { #ifdef APP_USE_INLINE_MULTIPLY c[i][j] += multiply(a[i][k], b[k][j]); #else /* APP_USE_INLINE_MULTIPLY */ c[i][j] += a[i][k] * b[k][j]; #endif /* APP_USE_INLINE_MULTIPLY */ } } } } /*** End of parallel region ***/ } double do_work(void) { double **a, /* matrix A to be multiplied */ **b, /* matrix B to be multiplied */ **c; /* result matrix C */ a = allocateMatrix(NRA, NCA); b = allocateMatrix(NCA, NCB); c = allocateMatrix(NRA, NCB); /*** Spawn a parallel region explicitly scoping all variables ***/ initialize(a, NRA, NCA); initialize(b, NCA, NCB); initialize(c, NRA, NCB); compute(a, b, c, NRA, NCA, NCB); #if defined(TAU_OPENMP) //if (omp_get_nested()) { compute_nested(a, b, c, NRA, NCA, NCB); //} #endif #ifdef TAU_MPI if (provided == MPI_THREAD_MULTIPLE) { printf("provided is MPI_THREAD_MULTIPLE\n"); } else if (provided == MPI_THREAD_FUNNELED) { printf("provided is MPI_THREAD_FUNNELED\n"); } #endif /* TAU_MPI */ compute_interchange(a, b, c, NRA, NCA, NCB); double result = c[0][1]; freeMatrix(a, NRA, NCA); freeMatrix(b, NCA, NCB); freeMatrix(c, NCA, NCB); return result; } #ifdef PTHREADS int busy_sleep() { int i, sum = 0; for (i = 0 ; i < 100000000 ; i++) { sum = sum+i; } return sum; } void * threaded_func(void *data) { int rc; int sum = 0; // compute do_work(); #ifdef APP_DO_LOCK_TEST // test locking - sampling should catch this if ((rc = pthread_mutex_lock(&mutexsum)) != 0) { errno = rc; perror("thread lock error"); exit(1); } fprintf(stderr,"Thread 'sleeping'...\n"); fflush(stderr); sum += busy_sleep(); fprintf(stderr,"Thread 'awake'...\n"); fflush(stderr); if ((rc = pthread_mutex_unlock(&mutexsum)) != 0) { errno = rc; perror("thread unlock error"); exit(1); } pthread_exit((void*) 0); //return NULL; #endif // APP_DO_LOCK_TEST } #endif // PTHREADS int main (int argc, char *argv[]) { #ifdef PTHREADS int ret; pthread_attr_t attr; pthread_t tid1, tid2, tid3; pthread_mutexattr_t Attr; pthread_mutexattr_init(&Attr); pthread_mutexattr_settype(&Attr, PTHREAD_MUTEX_ERRORCHECK); if (pthread_mutex_init(&mutexsum, &Attr)) { printf("Error while using pthread_mutex_init\n"); } #endif /* PTHREADS */ #ifdef TAU_MPI int rc = MPI_SUCCESS; #if defined(PTHREADS) rc = MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); printf("MPI_Init_thread: provided = %d, MPI_THREAD_MULTIPLE=%d\n", provided, MPI_THREAD_MULTIPLE); #elif defined(TAU_OPENMP) rc = MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided); printf("MPI_Init_thread: provided = %d, MPI_THREAD_FUNNELED=%d\n", provided, MPI_THREAD_FUNNELED); #else rc = MPI_Init(&argc, &argv); #endif /* THREADS */ if (rc != MPI_SUCCESS) { char *errorstring; int length = 0; MPI_Error_string(rc, errorstring, &length); printf("Error: MPI_Init failed, rc = %d\n%s\n", rc, errorstring); exit(1); } #endif /* TAU_MPI */ #ifdef PTHREADS if (ret = pthread_create(&tid1, NULL, threaded_func, NULL) ) { printf("Error: pthread_create (1) fails ret = %d\n", ret); exit(1); } if (ret = pthread_create(&tid2, NULL, threaded_func, NULL) ) { printf("Error: pthread_create (2) fails ret = %d\n", ret); exit(1); } if (ret = pthread_create(&tid3, NULL, threaded_func, NULL) ) { printf("Error: pthread_create (3) fails ret = %d\n", ret); exit(1); } #endif /* PTHREADS */ /* On thread 0: */ int i; //for (i = 0 ; i < 100 ; i++) { do_work(); //} #ifdef PTHREADS if (ret = pthread_join(tid1, NULL) ) { printf("Error: pthread_join (1) fails ret = %d\n", ret); exit(1); } if (ret = pthread_join(tid2, NULL) ) { printf("Error: pthread_join (2) fails ret = %d\n", ret); exit(1); } if (ret = pthread_join(tid3, NULL) ) { printf("Error: pthread_join (3) fails ret = %d\n", ret); exit(1); } pthread_mutex_destroy(&mutexsum); #endif /* PTHREADS */ #ifdef TAU_MPI MPI_Finalize(); #endif /* TAU_MPI */ printf ("Done.\n"); return 0; }
/****************************************************************************** * OpenMp Example - Matrix Multiply - C Version * Demonstrates a matrix multiply using OpenMP. * * Modified from here: * https://computing.llnl.gov/tutorials/openMP/samples/C/omp_mm.c * * For PAPI_FP_INS, the exclusive count for the event: * for (null) [OpenMP location: file:matmult.c ] * should be 2E+06 / Number of Threads ******************************************************************************/ #include <stdio.h> #include <stdlib.h> #ifdef TAU_MPI int provided; #include <mpi.h> /* * NOTE: MPI is just used to spawn multiple copies of the kernel to different * ranks. This is not a parallel implementation */ #endif /* TAU_MPI */ #ifdef PTHREADS #include <pthread.h> #include <unistd.h> #include <errno.h> /*** NOTE THE ATTR INITIALIZER HERE! ***/ pthread_mutex_t mutexsum; #endif /* PTHREADS */ #define APP_USE_INLINE_MULTIPLY 1 #ifndef MATRIX_SIZE #define MATRIX_SIZE 512 #endif #define NRA MATRIX_SIZE /* number of rows in matrix A */ #define NCA MATRIX_SIZE /* number of columns in matrix A */ #define NCB MATRIX_SIZE /* number of columns in matrix B */ void initialize(double **matrix, int rows, int cols) { int i, j; //set_num_threads(); /*** Initialize matrices ***/ for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { matrix[i][j] = i + j; } } } double ** allocateMatrix(int rows, int cols) { int i; double **matrix = (double **)malloc((sizeof(double *)) * rows); for (i = 0; i < rows; i++) { matrix[i] = (double *)malloc((sizeof(double)) * cols); } return matrix; } void freeMatrix(double **matrix, int rows, int cols) { int i; for (i = 0; i < rows; i++) { free(matrix[i]); } free(matrix); } double multiply(double a, double b) { return a * b; } //cols_a and rows_b are the same value void compute(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i, j, k; /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ for (i = 0; i < rows_a; i++) { for (j = 0; j < cols_b; j++) { for (k = 0; k < cols_a; k++) { #ifdef APP_USE_INLINE_MULTIPLY c[i][j] += multiply(a[i][k], b[k][j]); #else /* APP_USE_INLINE_MULTIPLY */ c[i][j] += a[i][k] * b[k][j]; #endif /* APP_USE_INLINE_MULTIPLY */ } } } /*** End of parallel region ***/ } void compute_interchange(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i, j, k; /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ for (i = 0; i < rows_a; i++) { for (k = 0; k < cols_a; k++) { for (j = 0; j < cols_b; j++) { #ifdef APP_USE_INLINE_MULTIPLY c[i][j] += multiply(a[i][k], b[k][j]); #else /* APP_USE_INLINE_MULTIPLY */ c[i][j] += a[i][k] * b[k][j]; #endif /* APP_USE_INLINE_MULTIPLY */ } } } /*** End of parallel region ***/ } double do_work(void) { double **a, /* matrix A to be multiplied */ **b, /* matrix B to be multiplied */ **c; /* result matrix C */ a = allocateMatrix(NRA, NCA); b = allocateMatrix(NCA, NCB); c = allocateMatrix(NRA, NCB); /*** Spawn a parallel region explicitly scoping all variables ***/ initialize(a, NRA, NCA); initialize(b, NCA, NCB); initialize(c, NRA, NCB); compute(a, b, c, NRA, NCA, NCB); #ifdef TAU_MPI if (provided == MPI_THREAD_MULTIPLE) { printf("provided is MPI_THREAD_MULTIPLE\n"); } else if (provided == MPI_THREAD_FUNNELED) { printf("provided is MPI_THREAD_FUNNELED\n"); } #endif /* TAU_MPI */ compute_interchange(a, b, c, NRA, NCA, NCB); double result = c[0][1]; freeMatrix(a, NRA, NCA); freeMatrix(b, NCA, NCB); freeMatrix(c, NCA, NCB); return result; } #ifdef PTHREADS int busy_sleep() { int i, sum = 0; for (i = 0; i < 100000000; i++) { sum = sum + i; } return sum; } void * threaded_func(void *data) { int rc; int sum = 0; //compute do_work(); #ifdef APP_DO_LOCK_TEST //test locking - sampling should catch this if ((rc = pthread_mutex_lock(&mutexsum)) != 0) { errno = rc; perror("thread lock error"); exit(1); } fprintf(stderr, "Thread 'sleeping'...\n"); fflush(stderr); sum += busy_sleep(); fprintf(stderr, "Thread 'awake'...\n"); fflush(stderr); if ((rc = pthread_mutex_unlock(&mutexsum)) != 0) { errno = rc; perror("thread unlock error"); exit(1); } pthread_exit((void *)0); //return NULL; #endif /* // APP_DO_LOCK_TEST */ } #endif /* // PTHREADS */ int main(int argc, char *argv[]) { #ifdef PTHREADS int ret; pthread_attr_t attr; pthread_t tid1, tid2, tid3; pthread_mutexattr_t Attr; pthread_mutexattr_init(&Attr); pthread_mutexattr_settype(&Attr, PTHREAD_MUTEX_ERRORCHECK); if (pthread_mutex_init(&mutexsum, &Attr)) { printf("Error while using pthread_mutex_init\n"); } #endif /* PTHREADS */ #ifdef TAU_MPI int rc = MPI_SUCCESS; #if defined(PTHREADS) rc = MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); printf("MPI_Init_thread: provided = %d, MPI_THREAD_MULTIPLE=%d\n", provided, MPI_THREAD_MULTIPLE); #elif defined(TAU_OPENMP) rc = MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided); printf("MPI_Init_thread: provided = %d, MPI_THREAD_FUNNELED=%d\n", provided, MPI_THREAD_FUNNELED); #else rc = MPI_Init(&argc, &argv); #endif /* THREADS */ if (rc != MPI_SUCCESS) { char *errorstring; int length = 0; MPI_Error_string(rc, errorstring, &length); printf("Error: MPI_Init failed, rc = %d\n%s\n", rc, errorstring); exit(1); } #endif /* TAU_MPI */ #ifdef PTHREADS if (ret = pthread_create(&tid1, NULL, threaded_func, NULL)) { printf("Error: pthread_create (1) fails ret = %d\n", ret); exit(1); } if (ret = pthread_create(&tid2, NULL, threaded_func, NULL)) { printf("Error: pthread_create (2) fails ret = %d\n", ret); exit(1); } if (ret = pthread_create(&tid3, NULL, threaded_func, NULL)) { printf("Error: pthread_create (3) fails ret = %d\n", ret); exit(1); } #endif /* PTHREADS */ /* On thread 0: */ int i; //for (i = 0; i < 100; i++) { do_work(); // } #ifdef PTHREADS if (ret = pthread_join(tid1, NULL)) { printf("Error: pthread_join (1) fails ret = %d\n", ret); exit(1); } if (ret = pthread_join(tid2, NULL)) { printf("Error: pthread_join (2) fails ret = %d\n", ret); exit(1); } if (ret = pthread_join(tid3, NULL)) { printf("Error: pthread_join (3) fails ret = %d\n", ret); exit(1); } pthread_mutex_destroy(&mutexsum); #endif /* PTHREADS */ #ifdef TAU_MPI MPI_Finalize(); #endif /* TAU_MPI */ printf("Done.\n"); return 0; }
/****************************************************************************** * OpenMp Example - Matrix Multiply - C Version * Demonstrates a matrix multiply using OpenMP. * * Modified from here: * https://computing.llnl.gov/tutorials/openMP/samples/C/omp_mm.c * * For PAPI_FP_INS, the exclusive count for the event: * for (null) [OpenMP location: file:matmult.c ] * should be 2E+06 / Number of Threads ******************************************************************************/ #include <stdio.h> #include <stdlib.h> #ifdef TAU_MPI int provided; #include <mpi.h> /* * NOTE: MPI is just used to spawn multiple copies of the kernel to different * ranks. This is not a parallel implementation */ #endif /* TAU_MPI */ #ifdef PTHREADS #include <pthread.h> #include <unistd.h> #include <errno.h> /*** NOTE THE ATTR INITIALIZER HERE! ***/ pthread_mutex_t mutexsum; #endif /* PTHREADS */ #define APP_USE_INLINE_MULTIPLY 1 #ifndef MATRIX_SIZE #define MATRIX_SIZE 512 #endif #define NRA MATRIX_SIZE /* number of rows in matrix A */ #define NCA MATRIX_SIZE /* number of columns in matrix A */ #define NCB MATRIX_SIZE /* number of columns in matrix B */ void initialize(double **matrix, int rows, int cols) { int i, j; #pragma omp parallel private(i,j) shared(matrix) { //set_num_threads(); /*** Initialize matrices ***/ #pragma omp for nowait for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { matrix[i][j] = i + j; } } } } double ** allocateMatrix(int rows, int cols) { int i; double **matrix = (double **)malloc((sizeof(double *)) * rows); for (i = 0; i < rows; i++) { matrix[i] = (double *)malloc((sizeof(double)) * cols); } return matrix; } void freeMatrix(double **matrix, int rows, int cols) { int i; for (i = 0; i < rows; i++) { free(matrix[i]); } free(matrix); } double multiply(double a, double b) { return a * b; } #ifdef TAU_OPENMP //cols_a and rows_b are the same value void compute_nested(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i, j, k; double tmp = 0.0; /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp parallel for private(i,j,k) shared(a,b,c) for (i = 0; i < rows_a; i++) { { for (k = 0; k < cols_a; k++) { for (j = 0; j < cols_b; j++) { #ifdef APP_USE_INLINE_MULTIPLY c[i][j] += multiply(a[i][k], b[k][j]); #else tmp = a[i][k]; tmp = tmp * b[k][j]; c[i][j] += tmp; #endif } } } } } #endif //cols_a and rows_b are the same value void compute(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i, j, k; #pragma omp parallel private(i,j,k) shared(a,b,c) { /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp for nowait for (i = 0; i < rows_a; i++) { for (j = 0; j < cols_b; j++) { for (k = 0; k < cols_a; k++) { #ifdef APP_USE_INLINE_MULTIPLY c[i][j] += multiply(a[i][k], b[k][j]); #else /* APP_USE_INLINE_MULTIPLY */ c[i][j] += a[i][k] * b[k][j]; #endif /* APP_USE_INLINE_MULTIPLY */ } } } } /*** End of parallel region ***/ } void compute_interchange(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i, j, k; #pragma omp parallel private(i,j,k) shared(a,b,c) { /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp for nowait for (i = 0; i < rows_a; i++) { for (k = 0; k < cols_a; k++) { for (j = 0; j < cols_b; j++) { #ifdef APP_USE_INLINE_MULTIPLY c[i][j] += multiply(a[i][k], b[k][j]); #else /* APP_USE_INLINE_MULTIPLY */ c[i][j] += a[i][k] * b[k][j]; #endif /* APP_USE_INLINE_MULTIPLY */ } } } } /*** End of parallel region ***/ } double do_work(void) { double **a, /* matrix A to be multiplied */ **b, /* matrix B to be multiplied */ **c; /* result matrix C */ a = allocateMatrix(NRA, NCA); b = allocateMatrix(NCA, NCB); c = allocateMatrix(NRA, NCB); /*** Spawn a parallel region explicitly scoping all variables ***/ initialize(a, NRA, NCA); initialize(b, NCA, NCB); initialize(c, NRA, NCB); compute(a, b, c, NRA, NCA, NCB); #if defined(TAU_OPENMP) //if (omp_get_nested()) { compute_nested(a, b, c, NRA, NCA, NCB); // } #endif #ifdef TAU_MPI if (provided == MPI_THREAD_MULTIPLE) { printf("provided is MPI_THREAD_MULTIPLE\n"); } else if (provided == MPI_THREAD_FUNNELED) { printf("provided is MPI_THREAD_FUNNELED\n"); } #endif /* TAU_MPI */ compute_interchange(a, b, c, NRA, NCA, NCB); double result = c[0][1]; freeMatrix(a, NRA, NCA); freeMatrix(b, NCA, NCB); freeMatrix(c, NCA, NCB); return result; } #ifdef PTHREADS int busy_sleep() { int i, sum = 0; for (i = 0; i < 100000000; i++) { sum = sum + i; } return sum; } void * threaded_func(void *data) { int rc; int sum = 0; //compute do_work(); #ifdef APP_DO_LOCK_TEST //test locking - sampling should catch this if ((rc = pthread_mutex_lock(&mutexsum)) != 0) { errno = rc; perror("thread lock error"); exit(1); } fprintf(stderr, "Thread 'sleeping'...\n"); fflush(stderr); sum += busy_sleep(); fprintf(stderr, "Thread 'awake'...\n"); fflush(stderr); if ((rc = pthread_mutex_unlock(&mutexsum)) != 0) { errno = rc; perror("thread unlock error"); exit(1); } pthread_exit((void *)0); //return NULL; #endif /* // APP_DO_LOCK_TEST */ } #endif /* // PTHREADS */ int main(int argc, char *argv[]) { #ifdef PTHREADS int ret; pthread_attr_t attr; pthread_t tid1, tid2, tid3; pthread_mutexattr_t Attr; pthread_mutexattr_init(&Attr); pthread_mutexattr_settype(&Attr, PTHREAD_MUTEX_ERRORCHECK); if (pthread_mutex_init(&mutexsum, &Attr)) { printf("Error while using pthread_mutex_init\n"); } #endif /* PTHREADS */ #ifdef TAU_MPI int rc = MPI_SUCCESS; #if defined(PTHREADS) rc = MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); printf("MPI_Init_thread: provided = %d, MPI_THREAD_MULTIPLE=%d\n", provided, MPI_THREAD_MULTIPLE); #elif defined(TAU_OPENMP) rc = MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided); printf("MPI_Init_thread: provided = %d, MPI_THREAD_FUNNELED=%d\n", provided, MPI_THREAD_FUNNELED); #else rc = MPI_Init(&argc, &argv); #endif /* THREADS */ if (rc != MPI_SUCCESS) { char *errorstring; int length = 0; MPI_Error_string(rc, errorstring, &length); printf("Error: MPI_Init failed, rc = %d\n%s\n", rc, errorstring); exit(1); } #endif /* TAU_MPI */ #ifdef PTHREADS if (ret = pthread_create(&tid1, NULL, threaded_func, NULL)) { printf("Error: pthread_create (1) fails ret = %d\n", ret); exit(1); } if (ret = pthread_create(&tid2, NULL, threaded_func, NULL)) { printf("Error: pthread_create (2) fails ret = %d\n", ret); exit(1); } if (ret = pthread_create(&tid3, NULL, threaded_func, NULL)) { printf("Error: pthread_create (3) fails ret = %d\n", ret); exit(1); } #endif /* PTHREADS */ /* On thread 0: */ int i; //for (i = 0; i < 100; i++) { do_work(); // } #ifdef PTHREADS if (ret = pthread_join(tid1, NULL)) { printf("Error: pthread_join (1) fails ret = %d\n", ret); exit(1); } if (ret = pthread_join(tid2, NULL)) { printf("Error: pthread_join (2) fails ret = %d\n", ret); exit(1); } if (ret = pthread_join(tid3, NULL)) { printf("Error: pthread_join (3) fails ret = %d\n", ret); exit(1); } pthread_mutex_destroy(&mutexsum); #endif /* PTHREADS */ #ifdef TAU_MPI MPI_Finalize(); #endif /* TAU_MPI */ printf("Done.\n"); return 0; }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 32; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 32; tile_size[3] = 1024; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz - 4; i++) { for (j = 4; j < Ny - 4; j++) { for (k = 4; k < Nx - 4; k++) { A[(t + 1) % 2][i][j][k] = coef[0][i][j][k] * A[(t) % 2][i][j][k] + coef[1][i][j][k] * (A[(t) % 2][i - 1][j][k] + A[(t) % 2][i + 1][j][k]) + coef[2][i][j][k] * (A[(t) % 2][i][j - 1][k] + A[(t) % 2][i][j + 1][k]) + coef[3][i][j][k] * (A[(t) % 2][i][j][k - 1] + A[(t) % 2][i][j][k + 1]) + coef[4][i][j][k] * (A[(t) % 2][i - 2][j][k] + A[(t) % 2][i + 2][j][k]) + coef[5][i][j][k] * (A[(t) % 2][i][j - 2][k] + A[(t) % 2][i][j + 2][k]) + coef[6][i][j][k] * (A[(t) % 2][i][j][k - 2] + A[(t) % 2][i][j][k + 2]) + coef[7][i][j][k] * (A[(t) % 2][i - 3][j][k] + A[(t) % 2][i + 3][j][k]) + coef[8][i][j][k] * (A[(t) % 2][i][j - 3][k] + A[(t) % 2][i][j + 3][k]) + coef[9][i][j][k] * (A[(t) % 2][i][j][k - 3] + A[(t) % 2][i][j][k + 3]) + coef[10][i][j][k] * (A[(t) % 2][i - 4][j][k] + A[(t) % 2][i + 4][j][k]) + coef[11][i][j][k] * (A[(t) % 2][i][j - 4][k] + A[(t) % 2][i][j + 4][k]) + coef[12][i][j][k] * (A[(t) % 2][i][j][k - 4] + A[(t) % 2][i][j][k + 4]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 32; tile_size[3] = 1024; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz - 4; i++) { for (j = 4; j < Ny - 4; j++) { for (k = 4; k < Nx - 4; k++) { A[(t + 1) % 2][i][j][k] = coef[0][i][j][k] * A[(t) % 2][i][j][k] + coef[1][i][j][k] * (A[(t) % 2][i - 1][j][k] + A[(t) % 2][i + 1][j][k]) + coef[2][i][j][k] * (A[(t) % 2][i][j - 1][k] + A[(t) % 2][i][j + 1][k]) + coef[3][i][j][k] * (A[(t) % 2][i][j][k - 1] + A[(t) % 2][i][j][k + 1]) + coef[4][i][j][k] * (A[(t) % 2][i - 2][j][k] + A[(t) % 2][i + 2][j][k]) + coef[5][i][j][k] * (A[(t) % 2][i][j - 2][k] + A[(t) % 2][i][j + 2][k]) + coef[6][i][j][k] * (A[(t) % 2][i][j][k - 2] + A[(t) % 2][i][j][k + 2]) + coef[7][i][j][k] * (A[(t) % 2][i - 3][j][k] + A[(t) % 2][i + 3][j][k]) + coef[8][i][j][k] * (A[(t) % 2][i][j - 3][k] + A[(t) % 2][i][j + 3][k]) + coef[9][i][j][k] * (A[(t) % 2][i][j][k - 3] + A[(t) % 2][i][j][k + 3]) + coef[10][i][j][k] * (A[(t) % 2][i - 4][j][k] + A[(t) % 2][i + 4][j][k]) + coef[11][i][j][k] * (A[(t) % 2][i][j - 4][k] + A[(t) % 2][i][j + 4][k]) + coef[12][i][j][k] * (A[(t) % 2][i][j][k - 4] + A[(t) % 2][i][j][k + 4]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
Query3.h
#pragma once #include <queue> #include <algorithm> #include <cassert> #include <numeric> #include <memory> #include <set> #include <cstdio> #include <utility> #include "utils.h" #include "Query.h" #include <cstdio> class Query3 : public Query<int, int, std::string> { int topKLimit, maximumHopCount; std::string placeName; GBxx_Object<GrB_Matrix> hasInterest; GBxx_Object<GrB_Vector> getRelevantPersons() { GrB_Index place_index = input.places.findIndexByName(placeName); // set starting place auto selected_places = GB(GrB_Vector_new, GrB_BOOL, input.places.size()); ok(GrB_Vector_setElement_BOOL(selected_places.get(), true, place_index)); auto selected_organizations = GB(GrB_Vector_new, GrB_BOOL, input.organizations.size()); auto relevant_persons = GB(GrB_Vector_new, GrB_BOOL, input.persons.size()); Places::Type place_type = input.places.types[place_index]; while (true) { switch (place_type) { case Places::Continent: // no person to reach from the continent break; case Places::Country: // get companies ok(GrB_vxm(selected_organizations.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, selected_places.get(), input.organizationIsLocatedInPlaceTran.matrix.get(), GrB_NULL)); // get persons working at companies ok(GrB_vxm(relevant_persons.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, selected_organizations.get(), input.workAtTran.matrix.get(), GrB_NULL)); break; case Places::City: // get universities ok(GrB_vxm(selected_organizations.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, selected_places.get(), input.organizationIsLocatedInPlaceTran.matrix.get(), GrB_NULL)); // add persons studying at universities ok(GrB_vxm(relevant_persons.get(), GrB_NULL, GxB_PAIR_BOOL, GxB_ANY_PAIR_BOOL, selected_organizations.get(), input.studyAtTran.matrix.get(), GrB_NULL)); // add persons at cities ok(GrB_vxm(relevant_persons.get(), GrB_NULL, GxB_PAIR_BOOL, GxB_ANY_PAIR_BOOL, selected_places.get(), input.personIsLocatedInCityTran.matrix.get(), GrB_NULL)); break; default: throw std::runtime_error("Unknown Place.type"); } // get parts of current places (overwriting current ones) if (place_type < Places::City) { ok(GrB_vxm(selected_places.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, selected_places.get(), input.isPartOfTran.matrix.get(), GrB_NULL)); ++place_type; } else break; } return relevant_persons; } using score_type = std::tuple<int64_t, uint64_t, uint64_t>; void tagCount_filtered_reachable_count_tags_strategy(GrB_Vector const local_persons, SmallestElementsContainer<score_type, std::less<score_type>> &person_scores) { // maximum value: 10 -> UINT8 auto tag_count_per_person = GB(GrB_Vector_new, GrB_UINT8, input.persons.size()); ok(GrB_Vector_assign_UINT8(tag_count_per_person.get(), local_persons, GrB_NULL, 0, GrB_ALL, 0, GrB_NULL)); ok(GrB_Matrix_reduce_Monoid(tag_count_per_person.get(), local_persons, GrB_NULL, GrB_PLUS_MONOID_UINT8, hasInterest.get(), GrB_NULL)); uint8_t max_tag_count; ok(GrB_Vector_reduce_UINT8(&max_tag_count, GrB_NULL, GrB_MAX_MONOID_UINT8, tag_count_per_person.get(), GrB_NULL)); #ifndef NDEBUG std::cerr << "max_tag_count: " << (unsigned) max_tag_count << std::endl; #endif GrB_Index common_interests_nvals; auto relevant_persons = GB(GrB_Vector_new, GrB_BOOL, input.persons.size()); GBxx_Object<GrB_Matrix> common_interests; for (int lower_tag_count = max_tag_count;;) { // add persons with less tags auto limit = GB(GxB_Scalar_new, GrB_UINT8); ok(GxB_Scalar_setElement_INT32(limit.get(), lower_tag_count)); ok(GxB_Vector_select(relevant_persons.get(), GrB_NULL, GxB_PAIR_BOOL, GxB_EQ_THUNK, tag_count_per_person.get(), limit.get(), GrB_NULL)); GrB_Index relevant_persons_nvals; ok(GrB_Vector_nvals(&relevant_persons_nvals, relevant_persons.get())); std::vector<GrB_Index> relevant_persons_indices(relevant_persons_nvals); { GrB_Index nvals = relevant_persons_nvals; ok(GrB_Vector_extractTuples_BOOL(relevant_persons_indices.data(), GrB_NULL, &nvals, relevant_persons.get())); assert(relevant_persons_nvals == nvals); } // build diagonal matrix of relevant persons auto persons_diag_mx = GB(GrB_Matrix_new, GrB_BOOL, input.persons.size(), input.persons.size()); ok(GrB_Matrix_build_BOOL(persons_diag_mx.get(), relevant_persons_indices.data(), relevant_persons_indices.data(), array_of_true(relevant_persons_nvals).get(), relevant_persons_nvals, GxB_PAIR_BOOL)); auto next_mx = GB(GrB_Matrix_dup, persons_diag_mx.get()); auto seen_mx = GB(GrB_Matrix_dup, next_mx.get()); // MSBFS from relevant persons for (int i = 0; i < maximumHopCount; ++i) { ok(GrB_mxm(next_mx.get(), seen_mx.get(), GrB_NULL, GxB_ANY_PAIR_BOOL, next_mx.get(), input.knows.matrix.get(), GrB_DESC_RSC)); GrB_Index next_mx_nvals; ok(GrB_Matrix_nvals(&next_mx_nvals, next_mx.get())); // if emptied the component if (next_mx_nvals == 0) break; ok(GrB_Matrix_eWiseAdd_BinaryOp(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_PAIR_BOOL, seen_mx.get(), next_mx.get(), GrB_NULL)); } // strictly lower triangular matrix is enough for reachable persons // source persons were filtered at the beginning // drop friends in different place ok(GxB_Matrix_select(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_OFFDIAG, seen_mx.get(), GrB_NULL, GrB_NULL)); ok(GxB_Matrix_select(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_TRIL, seen_mx.get(), GrB_NULL, GrB_NULL)); ok(GrB_mxm(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, seen_mx.get(), persons_diag_mx.get(), GrB_NULL)); auto h_reachable_knows_tril = std::move(seen_mx); // calculate common interests between persons in h hop distance common_interests = GB(GrB_Matrix_new, GrB_INT64, input.persons.size(), input.persons.size()); ok(GrB_mxm(common_interests.get(), h_reachable_knows_tril.get(), GrB_NULL, GxB_PLUS_TIMES_INT64, hasInterest.get(), input.hasInterestTran.matrix.get(), GrB_DESC_S)); // count tag scores per person pairs ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests.get())); if (common_interests_nvals < topKLimit) { // there are not enough non-zero scores // add reachable persons with zero common tags // assign 0 to every reachable person pair, but select non-zero score (first operand) if present // common_interests <h_reachable_knows_tril> 1ST= 0 ok(GrB_Matrix_assign_INT64(common_interests.get(), h_reachable_knows_tril.get(), GrB_FIRST_INT64, 0, GrB_ALL, 0, GrB_ALL, 0, GrB_DESC_S)); // recount nvals ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests.get())); } // extract result from matrix std::vector<GrB_Index> common_interests_rows(common_interests_nvals), common_interests_cols(common_interests_nvals); std::vector<int64_t> common_interests_vals(common_interests_nvals); { GrB_Index nvals = common_interests_nvals; ok(GrB_Matrix_extractTuples_INT64(common_interests_rows.data(), common_interests_cols.data(), common_interests_vals.data(), &nvals, common_interests.get())); assert(common_interests_nvals == nvals); } person_scores.removeElements(); // collect top scores for (size_t i = 0; i < common_interests_vals.size(); ++i) { GrB_Index p1_index = common_interests_rows[i], p2_index = common_interests_cols[i]; int64_t score = common_interests_vals[i]; uint64_t p1_id = input.persons.vertexIds[p1_index]; uint64_t p2_id = input.persons.vertexIds[p2_index]; // put the smallest ID first if (p1_id > p2_id) std::swap(p1_id, p2_id); // DESC score person_scores.add({-score, p1_id, p2_id}); } if (person_scores.size() == topKLimit && std::get<0>(person_scores.max()) == -lower_tag_count) { #ifndef NDEBUG std::cerr << "stopped at min(top scores)=" << lower_tag_count << std::endl; #endif break; } if (lower_tag_count != 0) --lower_tag_count; else break; } } inline __attribute__((always_inline)) void push_next(GrB_Matrix Next, GrB_Matrix Seen, GrB_Matrix Edges) { // next<!seen> = next * A ok(GrB_mxm(Next, Seen, NULL, GxB_ANY_PAIR_UINT8, Next, Edges, GrB_DESC_RSC)); // seen pair= next ok(GrB_transpose(Seen, NULL, GxB_PAIR_UINT8, Next, GrB_DESC_T0)); } void tagCount_msbfs_strategy(GrB_Vector const local_persons, SmallestElementsContainer<score_type, std::less<score_type>> &person_scores) { // maximum value: 10 -> UINT8 auto tag_count_per_person = GB(GrB_Vector_new, GrB_UINT8, input.persons.size()); // every local person has at least 0 tags ok(GrB_Vector_assign_UINT8(tag_count_per_person.get(), local_persons, GrB_NULL, 0, GrB_ALL, 0, GrB_NULL)); // count tags per person ok(GrB_Matrix_reduce_Monoid(tag_count_per_person.get(), local_persons, GrB_NULL, GrB_PLUS_MONOID_UINT8, hasInterest.get(), GrB_NULL)); uint8_t max_tag_count; ok(GrB_Vector_reduce_UINT8(&max_tag_count, GrB_NULL, GrB_MAX_MONOID_UINT8, tag_count_per_person.get(), GrB_NULL)); #ifndef NDEBUG std::cerr << "max_tag_count: " << (unsigned) max_tag_count << std::endl; #endif GBxx_Object<GrB_Matrix> last_common_interests_pattern = GB(GrB_Matrix_new, GrB_BOOL, input.persons.size(), input.persons.size()); // persons with 10 tags, persons with 9..10 tags, ... auto relevant_persons = GB(GrB_Vector_new, GrB_BOOL, input.persons.size()); for (int lower_tag_count = max_tag_count;;) { #ifndef NDEBUG std::cerr << "Loop:" << lower_tag_count << std::endl; #endif // add persons with less tags auto limit = GB(GxB_Scalar_new, GrB_UINT8); ok(GxB_Scalar_setElement_INT32(limit.get(), lower_tag_count)); ok(GxB_Vector_select(relevant_persons.get(), GrB_NULL, GxB_PAIR_BOOL, GxB_EQ_THUNK, tag_count_per_person.get(), limit.get(), GrB_NULL)); GrB_Index relevant_persons_nvals; ok(GrB_Vector_nvals(&relevant_persons_nvals, relevant_persons.get())); // extract relevant person indices std::vector<GrB_Index> relevant_persons_indices(relevant_persons_nvals); { GrB_Index nvals = relevant_persons_nvals; ok(GrB_Vector_extractTuples_BOOL(relevant_persons_indices.data(), GrB_NULL, &nvals, relevant_persons.get())); assert(relevant_persons_nvals == nvals); } // build diagonal matrix of relevant persons auto persons_diag_mx = GB(GrB_Matrix_new, GrB_UINT8, input.persons.size(), input.persons.size()); ok(GrB_Matrix_build_UINT8(persons_diag_mx.get(), relevant_persons_indices.data(), relevant_persons_indices.data(), (uint8_t *) array_of_true(relevant_persons_nvals).get(), relevant_persons_nvals, GxB_PAIR_BOOL)); auto next_mx = GB(GrB_Matrix_dup, persons_diag_mx.get()); auto seen_mx = GB(GrB_Matrix_dup, next_mx.get()); // MSBFS from relevant persons for (int i = 0; i < maximumHopCount / 2; ++i) { push_next(next_mx.get(), seen_mx.get(), input.knows.matrix.get()); } // persons reached in the first (maximumHopCount / 2) steps are marked with 2 ok(GrB_Matrix_assign_UINT8(seen_mx.get(), seen_mx.get(), GrB_NULL, 2, GrB_ALL, 0, GrB_ALL, 0, GrB_NULL)); // one more "half" step is needed for odd distances: nodes reached now are marked with 1 if (maximumHopCount % 2 == 1) { // next<!seen> = next * A ok(GrB_mxm(next_mx.get(), seen_mx.get(), NULL, GxB_ANY_PAIR_BOOL, next_mx.get(), input.knows.matrix.get(), GrB_DESC_RSC)); ok(GrB_Matrix_assign_UINT8(seen_mx.get(), next_mx.get(), GrB_NULL, 1, GrB_ALL, 0, GrB_ALL, 0, GrB_NULL)); } // TODO: offdiag? tril? // ok(GxB_Matrix_select(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_OFFDIAG, seen_mx.get(), GrB_NULL, GrB_NULL)); auto half_reachable = std::move(seen_mx); // find vertices where relevant persons meet: reduce to row vector auto columns_where_vertices_meet = GB(GrB_Vector_new, GrB_UINT64, input.persons.size()); ok(GrB_Matrix_reduce_Monoid(columns_where_vertices_meet.get(), GrB_NULL, GrB_NULL, GrB_PLUS_MONOID_UINT64, half_reachable.get(), GrB_DESC_T0)); #ifndef NDEBUG { GrB_Index nvals; ok(GrB_Vector_nvals(&nvals, columns_where_vertices_meet.get())); std::cerr << "columns_where_vertices_meet nvals:" << nvals << std::endl; } #endif // prune: goal: keep vertices where at least 2 vertices meet // invalid values: // - 1: single person reached (in the last "half" step) // - 2: single person reached in the first (maximumHopCount / 2) steps, // or for odd maximumHopCount (2*e+1): 2 persons in the last "half" step with 1+1 values: // two 1 values mean the distance is (floor((2*e+1) / 2) + 1)*2 = (2*e+1)+1 > maximumHopCount // valid values: // - 3 (odd only): 1 + 2 values: floor((2*e+1) / 2) + (floor((2*e+1) / 2) + 1) = 2*e+1 = maximumHopCount // - 4: 2 + 2 values: two persons reached each other in the first (maximumHopCount / 2) steps // false positives: // - >= 3: 1+1+1+... without value 2 auto scalar3 = GB(GxB_Scalar_new, GrB_UINT64); ok(GxB_Scalar_setElement_UINT64(scalar3.get(), 3)); ok(GxB_Vector_select(columns_where_vertices_meet.get(), GrB_NULL, GrB_NULL, GxB_GE_THUNK, columns_where_vertices_meet.get(), scalar3.get(), GrB_NULL)); // extract columns_where_vertices_meet GrB_Index columns_where_vertices_meet_nvals; ok(GrB_Vector_nvals(&columns_where_vertices_meet_nvals, columns_where_vertices_meet.get())); std::vector<GrB_Index> columns_where_vertices_meet_indices(columns_where_vertices_meet_nvals); { GrB_Index nvals = columns_where_vertices_meet_nvals; ok(GrB_Vector_extractTuples_UINT64(columns_where_vertices_meet_indices.data(), nullptr, &nvals, columns_where_vertices_meet.get())); assert(columns_where_vertices_meet_nvals == nvals); } #ifndef NDEBUG std::cerr << "columns_where_vertices_meet_nvals after select:" << columns_where_vertices_meet_nvals << std::endl; #endif // calculate common interests between persons in h hop distance GBxx_Object<GrB_Matrix> common_interests_global = GB(GrB_Matrix_new, GrB_UINT64, input.persons.size(), input.persons.size()); #pragma omp parallel num_threads(GlobalNThreads) { // thread-local GBxx_Object<GrB_Matrix> common_interests = GB(GrB_Matrix_new, GrB_UINT64, input.persons.size(), input.persons.size()); #pragma omp for schedule(static) for (GrB_Index i = 0; i < columns_where_vertices_meet_nvals; ++i) { GrB_Index meet_column = columns_where_vertices_meet_indices[i]; // get persons who meet at vertex meet_column auto meeting_vertices = GB(GrB_Vector_new, GrB_UINT8, input.persons.size()); ok(GrB_Col_extract(meeting_vertices.get(), GrB_NULL, GrB_NULL, half_reachable.get(), GrB_ALL, 0, meet_column, GrB_NULL)); GrB_Index meeting_vertices_nvals; ok(GrB_Vector_nvals(&meeting_vertices_nvals, meeting_vertices.get())); std::vector<GrB_Index> meeting_vertices_indices(meeting_vertices_nvals); std::vector<uint8_t> meeting_vertices_vals(meeting_vertices_nvals); { GrB_Index nvals = meeting_vertices_nvals; ok(GrB_Vector_extractTuples_UINT8(meeting_vertices_indices.data(), meeting_vertices_vals.data(), &nvals, meeting_vertices.get())); assert(meeting_vertices_nvals == nvals); } for (GrB_Index p1_iter = 0; p1_iter < meeting_vertices_nvals; ++p1_iter) { auto val1 = meeting_vertices_vals[p1_iter]; bool is_from_next1 = val1 == 1; for (GrB_Index p2_iter = 0; p2_iter < p1_iter; ++p2_iter) { auto val2 = meeting_vertices_vals[p2_iter]; // 1 & 1 values means the persons meet after maximumHopCount + 1, which is invalid if (is_from_next1 && val2 == 1) continue; GrB_Index p1 = meeting_vertices_indices[p1_iter]; GrB_Index p2 = meeting_vertices_indices[p2_iter]; ok(GrB_Matrix_setElement_UINT64(common_interests.get(), 0, p1, p2)); } } } #pragma omp critical(Q3_merge_thread_local_matrices) { ok(GrB_transpose(common_interests_global.get(), last_common_interests_pattern.get(), GxB_PAIR_UINT64, common_interests.get(), GrB_DESC_SCT0)); auto ptr = common_interests_global.release(); ok(GrB_Matrix_wait(&ptr)); common_interests_global.reset(ptr); } } // store current pattern to evaluate only once ok(GrB_transpose(last_common_interests_pattern.get(), GrB_NULL, GxB_PAIR_BOOL, common_interests_global.get(), GrB_DESC_T0)); // keep offdiag tril ok(GxB_Matrix_select(common_interests_global.get(), GrB_NULL, GrB_NULL, GxB_OFFDIAG, common_interests_global.get(), GrB_NULL, GrB_NULL)); ok(GxB_Matrix_select(common_interests_global.get(), GrB_NULL, GrB_NULL, GxB_TRIL, common_interests_global.get(), GrB_NULL, GrB_NULL)); auto common_interests_pattern = GB(GrB_Matrix_dup, common_interests_global.get()); ok(GrB_mxm(common_interests_global.get(), common_interests_global.get(), GrB_NULL, GxB_PLUS_TIMES_UINT64, hasInterest.get(), input.hasInterestTran.matrix.get(), GrB_DESC_S)); #ifndef NDEBUG ok(GxB_Matrix_fprint(common_interests_global.get(), "common_interests", GxB_SUMMARY, stdout)); #endif // count tag scores per person pairs GrB_Index common_interests_nvals; ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests_global.get())); bool might_contain_duplicates = false; if (lower_tag_count == 0 && person_scores.size() < topKLimit) { might_contain_duplicates = true; // there are not enough non-zero scores // add reachable persons with zero common tags // assign 0 to every reachable person pair, but select non-zero score (first operand) if present // common_interests <h_reachable_knows_tril> 1ST= 0 ok(GrB_Matrix_assign_INT64(common_interests_global.get(), last_common_interests_pattern.get(), GrB_FIRST_INT64, 0, GrB_ALL, 0, GrB_ALL, 0, GrB_DESC_S)); // recount nvals ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests_global.get())); } // extract result from matrix std::vector<GrB_Index> common_interests_rows(common_interests_nvals), common_interests_cols(common_interests_nvals); std::vector<int64_t> common_interests_vals(common_interests_nvals); { GrB_Index nvals = common_interests_nvals; ok(GrB_Matrix_extractTuples_INT64(common_interests_rows.data(), common_interests_cols.data(), common_interests_vals.data(), &nvals, common_interests_global.get())); assert(common_interests_nvals == nvals); } // collect top scores for (size_t i = 0; i < common_interests_vals.size(); ++i) { GrB_Index p1_index = common_interests_rows[i], p2_index = common_interests_cols[i]; int64_t score = common_interests_vals[i]; uint64_t p1_id = input.persons.vertexIds[p1_index]; uint64_t p2_id = input.persons.vertexIds[p2_index]; // put the smallest ID first if (p1_id > p2_id) std::swap(p1_id, p2_id); // do not add duplicates if (!might_contain_duplicates || std::find_if(person_scores.elements.begin(), person_scores.elements.end(), [&](auto &t) { return std::get<1>(t) == p1_id && std::get<2>(t) == p2_id; }) == person_scores.elements.end()) { // DESC score person_scores.add({-score, p1_id, p2_id}); } } if (person_scores.size() == topKLimit && std::get<0>(person_scores.max()) == -lower_tag_count) { #ifndef NDEBUG std::cerr << "stopped at min(top scores)=" << lower_tag_count << std::endl; #endif break; } if (lower_tag_count != 0) --lower_tag_count; else break; } } void reachable_count_tags_strategy(GrB_Vector const local_persons, GrB_Index const local_persons_nvals, SmallestElementsContainer<score_type, std::less<score_type>> &person_scores) { std::vector<GrB_Index> local_persons_indices(local_persons_nvals); { GrB_Index nvals = local_persons_nvals; ok(GrB_Vector_extractTuples_BOOL(local_persons_indices.data(), GrB_NULL, &nvals, local_persons)); assert(local_persons_nvals == nvals); } // build diagonal matrix of local persons auto persons_diag_mx = GB(GrB_Matrix_new, GrB_BOOL, input.persons.size(), input.persons.size()); ok(GrB_Matrix_build_BOOL(persons_diag_mx.get(), local_persons_indices.data(), local_persons_indices.data(), array_of_true(local_persons_nvals).get(), local_persons_nvals, GxB_PAIR_BOOL)); auto next_mx = GB(GrB_Matrix_dup, persons_diag_mx.get()); auto seen_mx = GB(GrB_Matrix_dup, next_mx.get()); // MSBFS from relevant persons for (int i = 0; i < maximumHopCount; ++i) { ok(GrB_mxm(next_mx.get(), seen_mx.get(), GrB_NULL, GxB_ANY_PAIR_BOOL, next_mx.get(), input.knows.matrix.get(), GrB_DESC_RSC)); GrB_Index next_mx_nvals; ok(GrB_Matrix_nvals(&next_mx_nvals, next_mx.get())); // if emptied the component if (next_mx_nvals == 0) break; ok(GrB_Matrix_eWiseAdd_BinaryOp(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_PAIR_BOOL, seen_mx.get(), next_mx.get(), GrB_NULL)); } // strictly lower triangular matrix is enough for reachable persons // source persons were filtered at the beginning // drop friends in different place ok(GxB_Matrix_select(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_OFFDIAG, seen_mx.get(), GrB_NULL, GrB_NULL)); ok(GxB_Matrix_select(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_TRIL, seen_mx.get(), GrB_NULL, GrB_NULL)); ok(GrB_mxm(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, seen_mx.get(), persons_diag_mx.get(), GrB_NULL)); auto h_reachable_knows_tril = std::move(seen_mx); // calculate common interests between persons in h hop distance auto common_interests = GB(GrB_Matrix_new, GrB_INT64, input.persons.size(), input.persons.size()); ok(GrB_mxm(common_interests.get(), h_reachable_knows_tril.get(), GrB_NULL, GxB_PLUS_TIMES_INT64, hasInterest.get(), input.hasInterestTran.matrix.get(), GrB_DESC_S)); // count tag scores per person pairs GrB_Index common_interests_nvals; ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests.get())); if (common_interests_nvals < topKLimit) { // there are not enough non-zero scores // add reachable persons with zero common tags // assign 0 to every reachable person pair, but select non-zero score (first operand) if present // common_interests <h_reachable_knows_tril> 1ST= 0 ok(GrB_Matrix_assign_INT64(common_interests.get(), h_reachable_knows_tril.get(), GrB_FIRST_INT64, 0, GrB_ALL, 0, GrB_ALL, 0, GrB_DESC_S)); // recount nvals ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests.get())); } // extract result from matrix std::vector<GrB_Index> common_interests_rows(common_interests_nvals), common_interests_cols(common_interests_nvals); std::vector<int64_t> common_interests_vals(common_interests_nvals); { GrB_Index nvals = common_interests_nvals; ok(GrB_Matrix_extractTuples_INT64(common_interests_rows.data(), common_interests_cols.data(), common_interests_vals.data(), &nvals, common_interests.get())); assert(common_interests_nvals == nvals); } // collect top scores for (size_t i = 0; i < common_interests_vals.size(); ++i) { GrB_Index p1_index = common_interests_rows[i], p2_index = common_interests_cols[i]; int64_t score = common_interests_vals[i]; uint64_t p1_id = input.persons.vertexIds[p1_index]; uint64_t p2_id = input.persons.vertexIds[p2_index]; // put the smallest ID first if (p1_id > p2_id) std::swap(p1_id, p2_id); // DESC score person_scores.add({-score, p1_id, p2_id}); } } std::tuple<std::string, std::string> initial_calculation() override { hasInterest = GB(GrB_Matrix_new, GrB_BOOL, input.hasInterestTran.trg->size(), input.hasInterestTran.src->size()); ok(GrB_transpose(hasInterest.get(), GrB_NULL, GrB_NULL, input.hasInterestTran.matrix.get(), GrB_NULL)); auto local_persons = getRelevantPersons(); // extract person indices GrB_Index relevant_persons_nvals; ok(GrB_Vector_nvals(&relevant_persons_nvals, local_persons.get())); if (relevant_persons_nvals == 0) return {"", "Nobody lives/studies/works there."}; auto person_scores = makeSmallestElementsContainer<score_type>(topKLimit); // reachable_count_tags_strategy(local_persons.get(), relevant_persons_nvals, person_scores); // tagCount_filtered_reachable_count_tags_strategy(local_persons.get(), person_scores); tagCount_msbfs_strategy(local_persons.get(), person_scores); std::string result, comment; bool firstIter = true; for (auto[neg_score, p1_id, p2_id]: person_scores.removeElements()) { if (firstIter) firstIter = false; else { result += ' '; comment += ' '; } result += std::to_string(p1_id); result += '|'; result += std::to_string(p2_id); comment += std::to_string(-neg_score); } hasInterest.reset(); return {result, comment}; } public: int getQueryId() const override { return 3; } Query3(BenchmarkParameters const &benchmark_parameters, ParameterType query_params, QueryInput const &input) : Query(benchmark_parameters, std::move(query_params), input) { std::tie(topKLimit, maximumHopCount, placeName) = queryParams; } };
#pragma once #include <queue> #include <algorithm> #include <cassert> #include <numeric> #include <memory> #include <set> #include <cstdio> #include <utility> #include "utils.h" #include "Query.h" #include <cstdio> class Query3 : public Query<int, int, std::string> { int topKLimit, maximumHopCount; std::string placeName; GBxx_Object<GrB_Matrix> hasInterest; GBxx_Object<GrB_Vector> getRelevantPersons() { GrB_Index place_index = input.places.findIndexByName(placeName); // set starting place auto selected_places = GB(GrB_Vector_new, GrB_BOOL, input.places.size()); ok(GrB_Vector_setElement_BOOL(selected_places.get(), true, place_index)); auto selected_organizations = GB(GrB_Vector_new, GrB_BOOL, input.organizations.size()); auto relevant_persons = GB(GrB_Vector_new, GrB_BOOL, input.persons.size()); Places::Type place_type = input.places.types[place_index]; while (true) { switch (place_type) { case Places::Continent: // no person to reach from the continent break; case Places::Country: // get companies ok(GrB_vxm(selected_organizations.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, selected_places.get(), input.organizationIsLocatedInPlaceTran.matrix.get(), GrB_NULL)); // get persons working at companies ok(GrB_vxm(relevant_persons.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, selected_organizations.get(), input.workAtTran.matrix.get(), GrB_NULL)); break; case Places::City: // get universities ok(GrB_vxm(selected_organizations.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, selected_places.get(), input.organizationIsLocatedInPlaceTran.matrix.get(), GrB_NULL)); // add persons studying at universities ok(GrB_vxm(relevant_persons.get(), GrB_NULL, GxB_PAIR_BOOL, GxB_ANY_PAIR_BOOL, selected_organizations.get(), input.studyAtTran.matrix.get(), GrB_NULL)); // add persons at cities ok(GrB_vxm(relevant_persons.get(), GrB_NULL, GxB_PAIR_BOOL, GxB_ANY_PAIR_BOOL, selected_places.get(), input.personIsLocatedInCityTran.matrix.get(), GrB_NULL)); break; default: throw std::runtime_error("Unknown Place.type"); } // get parts of current places (overwriting current ones) if (place_type < Places::City) { ok(GrB_vxm(selected_places.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, selected_places.get(), input.isPartOfTran.matrix.get(), GrB_NULL)); ++place_type; } else break; } return relevant_persons; } using score_type = std::tuple<int64_t, uint64_t, uint64_t>; void tagCount_filtered_reachable_count_tags_strategy(GrB_Vector const local_persons, SmallestElementsContainer<score_type, std::less<score_type>> &person_scores) { // maximum value: 10 -> UINT8 auto tag_count_per_person = GB(GrB_Vector_new, GrB_UINT8, input.persons.size()); ok(GrB_Vector_assign_UINT8(tag_count_per_person.get(), local_persons, GrB_NULL, 0, GrB_ALL, 0, GrB_NULL)); ok(GrB_Matrix_reduce_Monoid(tag_count_per_person.get(), local_persons, GrB_NULL, GrB_PLUS_MONOID_UINT8, hasInterest.get(), GrB_NULL)); uint8_t max_tag_count; ok(GrB_Vector_reduce_UINT8(&max_tag_count, GrB_NULL, GrB_MAX_MONOID_UINT8, tag_count_per_person.get(), GrB_NULL)); #ifndef NDEBUG std::cerr << "max_tag_count: " << (unsigned) max_tag_count << std::endl; #endif GrB_Index common_interests_nvals; auto relevant_persons = GB(GrB_Vector_new, GrB_BOOL, input.persons.size()); GBxx_Object<GrB_Matrix> common_interests; for (int lower_tag_count = max_tag_count;;) { // add persons with less tags auto limit = GB(GxB_Scalar_new, GrB_UINT8); ok(GxB_Scalar_setElement_INT32(limit.get(), lower_tag_count)); ok(GxB_Vector_select(relevant_persons.get(), GrB_NULL, GxB_PAIR_BOOL, GxB_EQ_THUNK, tag_count_per_person.get(), limit.get(), GrB_NULL)); GrB_Index relevant_persons_nvals; ok(GrB_Vector_nvals(&relevant_persons_nvals, relevant_persons.get())); std::vector<GrB_Index> relevant_persons_indices(relevant_persons_nvals); { GrB_Index nvals = relevant_persons_nvals; ok(GrB_Vector_extractTuples_BOOL(relevant_persons_indices.data(), GrB_NULL, &nvals, relevant_persons.get())); assert(relevant_persons_nvals == nvals); } // build diagonal matrix of relevant persons auto persons_diag_mx = GB(GrB_Matrix_new, GrB_BOOL, input.persons.size(), input.persons.size()); ok(GrB_Matrix_build_BOOL(persons_diag_mx.get(), relevant_persons_indices.data(), relevant_persons_indices.data(), array_of_true(relevant_persons_nvals).get(), relevant_persons_nvals, GxB_PAIR_BOOL)); auto next_mx = GB(GrB_Matrix_dup, persons_diag_mx.get()); auto seen_mx = GB(GrB_Matrix_dup, next_mx.get()); // MSBFS from relevant persons for (int i = 0; i < maximumHopCount; ++i) { ok(GrB_mxm(next_mx.get(), seen_mx.get(), GrB_NULL, GxB_ANY_PAIR_BOOL, next_mx.get(), input.knows.matrix.get(), GrB_DESC_RSC)); GrB_Index next_mx_nvals; ok(GrB_Matrix_nvals(&next_mx_nvals, next_mx.get())); // if emptied the component if (next_mx_nvals == 0) break; ok(GrB_Matrix_eWiseAdd_BinaryOp(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_PAIR_BOOL, seen_mx.get(), next_mx.get(), GrB_NULL)); } // strictly lower triangular matrix is enough for reachable persons // source persons were filtered at the beginning // drop friends in different place ok(GxB_Matrix_select(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_OFFDIAG, seen_mx.get(), GrB_NULL, GrB_NULL)); ok(GxB_Matrix_select(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_TRIL, seen_mx.get(), GrB_NULL, GrB_NULL)); ok(GrB_mxm(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, seen_mx.get(), persons_diag_mx.get(), GrB_NULL)); auto h_reachable_knows_tril = std::move(seen_mx); // calculate common interests between persons in h hop distance common_interests = GB(GrB_Matrix_new, GrB_INT64, input.persons.size(), input.persons.size()); ok(GrB_mxm(common_interests.get(), h_reachable_knows_tril.get(), GrB_NULL, GxB_PLUS_TIMES_INT64, hasInterest.get(), input.hasInterestTran.matrix.get(), GrB_DESC_S)); // count tag scores per person pairs ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests.get())); if (common_interests_nvals < topKLimit) { // there are not enough non-zero scores // add reachable persons with zero common tags // assign 0 to every reachable person pair, but select non-zero score (first operand) if present // common_interests <h_reachable_knows_tril> 1ST= 0 ok(GrB_Matrix_assign_INT64(common_interests.get(), h_reachable_knows_tril.get(), GrB_FIRST_INT64, 0, GrB_ALL, 0, GrB_ALL, 0, GrB_DESC_S)); // recount nvals ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests.get())); } // extract result from matrix std::vector<GrB_Index> common_interests_rows(common_interests_nvals), common_interests_cols(common_interests_nvals); std::vector<int64_t> common_interests_vals(common_interests_nvals); { GrB_Index nvals = common_interests_nvals; ok(GrB_Matrix_extractTuples_INT64(common_interests_rows.data(), common_interests_cols.data(), common_interests_vals.data(), &nvals, common_interests.get())); assert(common_interests_nvals == nvals); } person_scores.removeElements(); // collect top scores for (size_t i = 0; i < common_interests_vals.size(); ++i) { GrB_Index p1_index = common_interests_rows[i], p2_index = common_interests_cols[i]; int64_t score = common_interests_vals[i]; uint64_t p1_id = input.persons.vertexIds[p1_index]; uint64_t p2_id = input.persons.vertexIds[p2_index]; // put the smallest ID first if (p1_id > p2_id) std::swap(p1_id, p2_id); // DESC score person_scores.add({-score, p1_id, p2_id}); } if (person_scores.size() == topKLimit && std::get<0>(person_scores.max()) == -lower_tag_count) { #ifndef NDEBUG std::cerr << "stopped at min(top scores)=" << lower_tag_count << std::endl; #endif break; } if (lower_tag_count != 0) --lower_tag_count; else break; } } inline __attribute__((always_inline)) void push_next(GrB_Matrix Next, GrB_Matrix Seen, GrB_Matrix Edges) { // next<!seen> = next * A ok(GrB_mxm(Next, Seen, NULL, GxB_ANY_PAIR_UINT8, Next, Edges, GrB_DESC_RSC)); // seen pair= next ok(GrB_transpose(Seen, NULL, GxB_PAIR_UINT8, Next, GrB_DESC_T0)); } void tagCount_msbfs_strategy(GrB_Vector const local_persons, SmallestElementsContainer<score_type, std::less<score_type>> &person_scores) { // maximum value: 10 -> UINT8 auto tag_count_per_person = GB(GrB_Vector_new, GrB_UINT8, input.persons.size()); // every local person has at least 0 tags ok(GrB_Vector_assign_UINT8(tag_count_per_person.get(), local_persons, GrB_NULL, 0, GrB_ALL, 0, GrB_NULL)); // count tags per person ok(GrB_Matrix_reduce_Monoid(tag_count_per_person.get(), local_persons, GrB_NULL, GrB_PLUS_MONOID_UINT8, hasInterest.get(), GrB_NULL)); uint8_t max_tag_count; ok(GrB_Vector_reduce_UINT8(&max_tag_count, GrB_NULL, GrB_MAX_MONOID_UINT8, tag_count_per_person.get(), GrB_NULL)); #ifndef NDEBUG std::cerr << "max_tag_count: " << (unsigned) max_tag_count << std::endl; #endif GBxx_Object<GrB_Matrix> last_common_interests_pattern = GB(GrB_Matrix_new, GrB_BOOL, input.persons.size(), input.persons.size()); // persons with 10 tags, persons with 9..10 tags, ... auto relevant_persons = GB(GrB_Vector_new, GrB_BOOL, input.persons.size()); for (int lower_tag_count = max_tag_count;;) { #ifndef NDEBUG std::cerr << "Loop:" << lower_tag_count << std::endl; #endif // add persons with less tags auto limit = GB(GxB_Scalar_new, GrB_UINT8); ok(GxB_Scalar_setElement_INT32(limit.get(), lower_tag_count)); ok(GxB_Vector_select(relevant_persons.get(), GrB_NULL, GxB_PAIR_BOOL, GxB_EQ_THUNK, tag_count_per_person.get(), limit.get(), GrB_NULL)); GrB_Index relevant_persons_nvals; ok(GrB_Vector_nvals(&relevant_persons_nvals, relevant_persons.get())); // extract relevant person indices std::vector<GrB_Index> relevant_persons_indices(relevant_persons_nvals); { GrB_Index nvals = relevant_persons_nvals; ok(GrB_Vector_extractTuples_BOOL(relevant_persons_indices.data(), GrB_NULL, &nvals, relevant_persons.get())); assert(relevant_persons_nvals == nvals); } // build diagonal matrix of relevant persons auto persons_diag_mx = GB(GrB_Matrix_new, GrB_UINT8, input.persons.size(), input.persons.size()); ok(GrB_Matrix_build_UINT8(persons_diag_mx.get(), relevant_persons_indices.data(), relevant_persons_indices.data(), (uint8_t *) array_of_true(relevant_persons_nvals).get(), relevant_persons_nvals, GxB_PAIR_BOOL)); auto next_mx = GB(GrB_Matrix_dup, persons_diag_mx.get()); auto seen_mx = GB(GrB_Matrix_dup, next_mx.get()); // MSBFS from relevant persons for (int i = 0; i < maximumHopCount / 2; ++i) { push_next(next_mx.get(), seen_mx.get(), input.knows.matrix.get()); } // persons reached in the first (maximumHopCount / 2) steps are marked with 2 ok(GrB_Matrix_assign_UINT8(seen_mx.get(), seen_mx.get(), GrB_NULL, 2, GrB_ALL, 0, GrB_ALL, 0, GrB_NULL)); // one more "half" step is needed for odd distances: nodes reached now are marked with 1 if (maximumHopCount % 2 == 1) { // next<!seen> = next * A ok(GrB_mxm(next_mx.get(), seen_mx.get(), NULL, GxB_ANY_PAIR_BOOL, next_mx.get(), input.knows.matrix.get(), GrB_DESC_RSC)); ok(GrB_Matrix_assign_UINT8(seen_mx.get(), next_mx.get(), GrB_NULL, 1, GrB_ALL, 0, GrB_ALL, 0, GrB_NULL)); } // TODO: offdiag? tril? // ok(GxB_Matrix_select(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_OFFDIAG, seen_mx.get(), GrB_NULL, GrB_NULL)); auto half_reachable = std::move(seen_mx); // find vertices where relevant persons meet: reduce to row vector auto columns_where_vertices_meet = GB(GrB_Vector_new, GrB_UINT64, input.persons.size()); ok(GrB_Matrix_reduce_Monoid(columns_where_vertices_meet.get(), GrB_NULL, GrB_NULL, GrB_PLUS_MONOID_UINT64, half_reachable.get(), GrB_DESC_T0)); #ifndef NDEBUG { GrB_Index nvals; ok(GrB_Vector_nvals(&nvals, columns_where_vertices_meet.get())); std::cerr << "columns_where_vertices_meet nvals:" << nvals << std::endl; } #endif // prune: goal: keep vertices where at least 2 vertices meet // invalid values: // - 1: single person reached (in the last "half" step) // - 2: single person reached in the first (maximumHopCount / 2) steps, // or for odd maximumHopCount (2*e+1): 2 persons in the last "half" step with 1+1 values: // two 1 values mean the distance is (floor((2*e+1) / 2) + 1)*2 = (2*e+1)+1 > maximumHopCount // valid values: // - 3 (odd only): 1 + 2 values: floor((2*e+1) / 2) + (floor((2*e+1) / 2) + 1) = 2*e+1 = maximumHopCount // - 4: 2 + 2 values: two persons reached each other in the first (maximumHopCount / 2) steps // false positives: // - >= 3: 1+1+1+... without value 2 auto scalar3 = GB(GxB_Scalar_new, GrB_UINT64); ok(GxB_Scalar_setElement_UINT64(scalar3.get(), 3)); ok(GxB_Vector_select(columns_where_vertices_meet.get(), GrB_NULL, GrB_NULL, GxB_GE_THUNK, columns_where_vertices_meet.get(), scalar3.get(), GrB_NULL)); // extract columns_where_vertices_meet GrB_Index columns_where_vertices_meet_nvals; ok(GrB_Vector_nvals(&columns_where_vertices_meet_nvals, columns_where_vertices_meet.get())); std::vector<GrB_Index> columns_where_vertices_meet_indices(columns_where_vertices_meet_nvals); { GrB_Index nvals = columns_where_vertices_meet_nvals; ok(GrB_Vector_extractTuples_UINT64(columns_where_vertices_meet_indices.data(), nullptr, &nvals, columns_where_vertices_meet.get())); assert(columns_where_vertices_meet_nvals == nvals); } #ifndef NDEBUG std::cerr << "columns_where_vertices_meet_nvals after select:" << columns_where_vertices_meet_nvals << std::endl; #endif // calculate common interests between persons in h hop distance GBxx_Object<GrB_Matrix> common_interests_global = GB(GrB_Matrix_new, GrB_UINT64, input.persons.size(), input.persons.size()); // thread-local GBxx_Object<GrB_Matrix> common_interests = GB(GrB_Matrix_new, GrB_UINT64, input.persons.size(), input.persons.size()); for (GrB_Index i = 0; i < columns_where_vertices_meet_nvals; ++i) { GrB_Index meet_column = columns_where_vertices_meet_indices[i]; // get persons who meet at vertex meet_column auto meeting_vertices = GB(GrB_Vector_new, GrB_UINT8, input.persons.size()); ok(GrB_Col_extract(meeting_vertices.get(), GrB_NULL, GrB_NULL, half_reachable.get(), GrB_ALL, 0, meet_column, GrB_NULL)); GrB_Index meeting_vertices_nvals; ok(GrB_Vector_nvals(&meeting_vertices_nvals, meeting_vertices.get())); std::vector<GrB_Index> meeting_vertices_indices(meeting_vertices_nvals); std::vector<uint8_t> meeting_vertices_vals(meeting_vertices_nvals); { GrB_Index nvals = meeting_vertices_nvals; ok(GrB_Vector_extractTuples_UINT8(meeting_vertices_indices.data(), meeting_vertices_vals.data(), &nvals, meeting_vertices.get())); assert(meeting_vertices_nvals == nvals); } for (GrB_Index p1_iter = 0; p1_iter < meeting_vertices_nvals; ++p1_iter) { auto val1 = meeting_vertices_vals[p1_iter]; bool is_from_next1 = val1 == 1; for (GrB_Index p2_iter = 0; p2_iter < p1_iter; ++p2_iter) { auto val2 = meeting_vertices_vals[p2_iter]; // 1 & 1 values means the persons meet after maximumHopCount + 1, which is invalid if (is_from_next1 && val2 == 1) continue; GrB_Index p1 = meeting_vertices_indices[p1_iter]; GrB_Index p2 = meeting_vertices_indices[p2_iter]; ok(GrB_Matrix_setElement_UINT64(common_interests.get(), 0, p1, p2)); } } } ok(GrB_transpose(common_interests_global.get(), last_common_interests_pattern.get(), GxB_PAIR_UINT64, common_interests.get(), GrB_DESC_SCT0)); auto ptr = common_interests_global.release(); ok(GrB_Matrix_wait(&ptr)); common_interests_global.reset(ptr); // store current pattern to evaluate only once ok(GrB_transpose(last_common_interests_pattern.get(), GrB_NULL, GxB_PAIR_BOOL, common_interests_global.get(), GrB_DESC_T0)); // keep offdiag tril ok(GxB_Matrix_select(common_interests_global.get(), GrB_NULL, GrB_NULL, GxB_OFFDIAG, common_interests_global.get(), GrB_NULL, GrB_NULL)); ok(GxB_Matrix_select(common_interests_global.get(), GrB_NULL, GrB_NULL, GxB_TRIL, common_interests_global.get(), GrB_NULL, GrB_NULL)); auto common_interests_pattern = GB(GrB_Matrix_dup, common_interests_global.get()); ok(GrB_mxm(common_interests_global.get(), common_interests_global.get(), GrB_NULL, GxB_PLUS_TIMES_UINT64, hasInterest.get(), input.hasInterestTran.matrix.get(), GrB_DESC_S)); #ifndef NDEBUG ok(GxB_Matrix_fprint(common_interests_global.get(), "common_interests", GxB_SUMMARY, stdout)); #endif // count tag scores per person pairs GrB_Index common_interests_nvals; ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests_global.get())); bool might_contain_duplicates = false; if (lower_tag_count == 0 && person_scores.size() < topKLimit) { might_contain_duplicates = true; // there are not enough non-zero scores // add reachable persons with zero common tags // assign 0 to every reachable person pair, but select non-zero score (first operand) if present // common_interests <h_reachable_knows_tril> 1ST= 0 ok(GrB_Matrix_assign_INT64(common_interests_global.get(), last_common_interests_pattern.get(), GrB_FIRST_INT64, 0, GrB_ALL, 0, GrB_ALL, 0, GrB_DESC_S)); // recount nvals ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests_global.get())); } // extract result from matrix std::vector<GrB_Index> common_interests_rows(common_interests_nvals), common_interests_cols(common_interests_nvals); std::vector<int64_t> common_interests_vals(common_interests_nvals); { GrB_Index nvals = common_interests_nvals; ok(GrB_Matrix_extractTuples_INT64(common_interests_rows.data(), common_interests_cols.data(), common_interests_vals.data(), &nvals, common_interests_global.get())); assert(common_interests_nvals == nvals); } // collect top scores for (size_t i = 0; i < common_interests_vals.size(); ++i) { GrB_Index p1_index = common_interests_rows[i], p2_index = common_interests_cols[i]; int64_t score = common_interests_vals[i]; uint64_t p1_id = input.persons.vertexIds[p1_index]; uint64_t p2_id = input.persons.vertexIds[p2_index]; // put the smallest ID first if (p1_id > p2_id) std::swap(p1_id, p2_id); // do not add duplicates if (!might_contain_duplicates || std::find_if(person_scores.elements.begin(), person_scores.elements.end(), [&](auto &t) { return std::get<1>(t) == p1_id && std::get<2>(t) == p2_id; }) == person_scores.elements.end()) { // DESC score person_scores.add({-score, p1_id, p2_id}); } } if (person_scores.size() == topKLimit && std::get<0>(person_scores.max()) == -lower_tag_count) { #ifndef NDEBUG std::cerr << "stopped at min(top scores)=" << lower_tag_count << std::endl; #endif break; } if (lower_tag_count != 0) --lower_tag_count; else break; } } void reachable_count_tags_strategy(GrB_Vector const local_persons, GrB_Index const local_persons_nvals, SmallestElementsContainer<score_type, std::less<score_type>> &person_scores) { std::vector<GrB_Index> local_persons_indices(local_persons_nvals); { GrB_Index nvals = local_persons_nvals; ok(GrB_Vector_extractTuples_BOOL(local_persons_indices.data(), GrB_NULL, &nvals, local_persons)); assert(local_persons_nvals == nvals); } // build diagonal matrix of local persons auto persons_diag_mx = GB(GrB_Matrix_new, GrB_BOOL, input.persons.size(), input.persons.size()); ok(GrB_Matrix_build_BOOL(persons_diag_mx.get(), local_persons_indices.data(), local_persons_indices.data(), array_of_true(local_persons_nvals).get(), local_persons_nvals, GxB_PAIR_BOOL)); auto next_mx = GB(GrB_Matrix_dup, persons_diag_mx.get()); auto seen_mx = GB(GrB_Matrix_dup, next_mx.get()); // MSBFS from relevant persons for (int i = 0; i < maximumHopCount; ++i) { ok(GrB_mxm(next_mx.get(), seen_mx.get(), GrB_NULL, GxB_ANY_PAIR_BOOL, next_mx.get(), input.knows.matrix.get(), GrB_DESC_RSC)); GrB_Index next_mx_nvals; ok(GrB_Matrix_nvals(&next_mx_nvals, next_mx.get())); // if emptied the component if (next_mx_nvals == 0) break; ok(GrB_Matrix_eWiseAdd_BinaryOp(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_PAIR_BOOL, seen_mx.get(), next_mx.get(), GrB_NULL)); } // strictly lower triangular matrix is enough for reachable persons // source persons were filtered at the beginning // drop friends in different place ok(GxB_Matrix_select(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_OFFDIAG, seen_mx.get(), GrB_NULL, GrB_NULL)); ok(GxB_Matrix_select(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_TRIL, seen_mx.get(), GrB_NULL, GrB_NULL)); ok(GrB_mxm(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, seen_mx.get(), persons_diag_mx.get(), GrB_NULL)); auto h_reachable_knows_tril = std::move(seen_mx); // calculate common interests between persons in h hop distance auto common_interests = GB(GrB_Matrix_new, GrB_INT64, input.persons.size(), input.persons.size()); ok(GrB_mxm(common_interests.get(), h_reachable_knows_tril.get(), GrB_NULL, GxB_PLUS_TIMES_INT64, hasInterest.get(), input.hasInterestTran.matrix.get(), GrB_DESC_S)); // count tag scores per person pairs GrB_Index common_interests_nvals; ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests.get())); if (common_interests_nvals < topKLimit) { // there are not enough non-zero scores // add reachable persons with zero common tags // assign 0 to every reachable person pair, but select non-zero score (first operand) if present // common_interests <h_reachable_knows_tril> 1ST= 0 ok(GrB_Matrix_assign_INT64(common_interests.get(), h_reachable_knows_tril.get(), GrB_FIRST_INT64, 0, GrB_ALL, 0, GrB_ALL, 0, GrB_DESC_S)); // recount nvals ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests.get())); } // extract result from matrix std::vector<GrB_Index> common_interests_rows(common_interests_nvals), common_interests_cols(common_interests_nvals); std::vector<int64_t> common_interests_vals(common_interests_nvals); { GrB_Index nvals = common_interests_nvals; ok(GrB_Matrix_extractTuples_INT64(common_interests_rows.data(), common_interests_cols.data(), common_interests_vals.data(), &nvals, common_interests.get())); assert(common_interests_nvals == nvals); } // collect top scores for (size_t i = 0; i < common_interests_vals.size(); ++i) { GrB_Index p1_index = common_interests_rows[i], p2_index = common_interests_cols[i]; int64_t score = common_interests_vals[i]; uint64_t p1_id = input.persons.vertexIds[p1_index]; uint64_t p2_id = input.persons.vertexIds[p2_index]; // put the smallest ID first if (p1_id > p2_id) std::swap(p1_id, p2_id); // DESC score person_scores.add({-score, p1_id, p2_id}); } } std::tuple<std::string, std::string> initial_calculation() override { hasInterest = GB(GrB_Matrix_new, GrB_BOOL, input.hasInterestTran.trg->size(), input.hasInterestTran.src->size()); ok(GrB_transpose(hasInterest.get(), GrB_NULL, GrB_NULL, input.hasInterestTran.matrix.get(), GrB_NULL)); auto local_persons = getRelevantPersons(); // extract person indices GrB_Index relevant_persons_nvals; ok(GrB_Vector_nvals(&relevant_persons_nvals, local_persons.get())); if (relevant_persons_nvals == 0) return {"", "Nobody lives/studies/works there."}; auto person_scores = makeSmallestElementsContainer<score_type>(topKLimit); // reachable_count_tags_strategy(local_persons.get(), relevant_persons_nvals, person_scores); // tagCount_filtered_reachable_count_tags_strategy(local_persons.get(), person_scores); tagCount_msbfs_strategy(local_persons.get(), person_scores); std::string result, comment; bool firstIter = true; for (auto[neg_score, p1_id, p2_id]: person_scores.removeElements()) { if (firstIter) firstIter = false; else { result += ' '; comment += ' '; } result += std::to_string(p1_id); result += '|'; result += std::to_string(p2_id); comment += std::to_string(-neg_score); } hasInterest.reset(); return {result, comment}; } public: int getQueryId() const override { return 3; } Query3(BenchmarkParameters const &benchmark_parameters, ParameterType query_params, QueryInput const &input) : Query(benchmark_parameters, std::move(query_params), input) { std::tie(topKLimit, maximumHopCount, placeName) = queryParams; } };
#pragma once #include <queue> #include <algorithm> #include <cassert> #include <numeric> #include <memory> #include <set> #include <cstdio> #include <utility> #include "utils.h" #include "Query.h" #include <cstdio> class Query3 : public Query<int, int, std::string> { int topKLimit, maximumHopCount; std::string placeName; GBxx_Object<GrB_Matrix> hasInterest; GBxx_Object<GrB_Vector> getRelevantPersons() { GrB_Index place_index = input.places.findIndexByName(placeName); // set starting place auto selected_places = GB(GrB_Vector_new, GrB_BOOL, input.places.size()); ok(GrB_Vector_setElement_BOOL(selected_places.get(), true, place_index)); auto selected_organizations = GB(GrB_Vector_new, GrB_BOOL, input.organizations.size()); auto relevant_persons = GB(GrB_Vector_new, GrB_BOOL, input.persons.size()); Places::Type place_type = input.places.types[place_index]; while (true) { switch (place_type) { case Places::Continent: // no person to reach from the continent break; case Places::Country: // get companies ok(GrB_vxm(selected_organizations.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, selected_places.get(), input.organizationIsLocatedInPlaceTran.matrix.get(), GrB_NULL)); // get persons working at companies ok(GrB_vxm(relevant_persons.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, selected_organizations.get(), input.workAtTran.matrix.get(), GrB_NULL)); break; case Places::City: // get universities ok(GrB_vxm(selected_organizations.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, selected_places.get(), input.organizationIsLocatedInPlaceTran.matrix.get(), GrB_NULL)); // add persons studying at universities ok(GrB_vxm(relevant_persons.get(), GrB_NULL, GxB_PAIR_BOOL, GxB_ANY_PAIR_BOOL, selected_organizations.get(), input.studyAtTran.matrix.get(), GrB_NULL)); // add persons at cities ok(GrB_vxm(relevant_persons.get(), GrB_NULL, GxB_PAIR_BOOL, GxB_ANY_PAIR_BOOL, selected_places.get(), input.personIsLocatedInCityTran.matrix.get(), GrB_NULL)); break; default: throw std::runtime_error("Unknown Place.type"); } // get parts of current places (overwriting current ones) if (place_type < Places::City) { ok(GrB_vxm(selected_places.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, selected_places.get(), input.isPartOfTran.matrix.get(), GrB_NULL)); ++place_type; } else break; } return relevant_persons; } using score_type = std::tuple<int64_t, uint64_t, uint64_t>; void tagCount_filtered_reachable_count_tags_strategy(GrB_Vector const local_persons, SmallestElementsContainer<score_type, std::less<score_type>> &person_scores) { // maximum value: 10 -> UINT8 auto tag_count_per_person = GB(GrB_Vector_new, GrB_UINT8, input.persons.size()); ok(GrB_Vector_assign_UINT8(tag_count_per_person.get(), local_persons, GrB_NULL, 0, GrB_ALL, 0, GrB_NULL)); ok(GrB_Matrix_reduce_Monoid(tag_count_per_person.get(), local_persons, GrB_NULL, GrB_PLUS_MONOID_UINT8, hasInterest.get(), GrB_NULL)); uint8_t max_tag_count; ok(GrB_Vector_reduce_UINT8(&max_tag_count, GrB_NULL, GrB_MAX_MONOID_UINT8, tag_count_per_person.get(), GrB_NULL)); #ifndef NDEBUG std::cerr << "max_tag_count: " << (unsigned) max_tag_count << std::endl; #endif GrB_Index common_interests_nvals; auto relevant_persons = GB(GrB_Vector_new, GrB_BOOL, input.persons.size()); GBxx_Object<GrB_Matrix> common_interests; for (int lower_tag_count = max_tag_count;;) { // add persons with less tags auto limit = GB(GxB_Scalar_new, GrB_UINT8); ok(GxB_Scalar_setElement_INT32(limit.get(), lower_tag_count)); ok(GxB_Vector_select(relevant_persons.get(), GrB_NULL, GxB_PAIR_BOOL, GxB_EQ_THUNK, tag_count_per_person.get(), limit.get(), GrB_NULL)); GrB_Index relevant_persons_nvals; ok(GrB_Vector_nvals(&relevant_persons_nvals, relevant_persons.get())); std::vector<GrB_Index> relevant_persons_indices(relevant_persons_nvals); { GrB_Index nvals = relevant_persons_nvals; ok(GrB_Vector_extractTuples_BOOL(relevant_persons_indices.data(), GrB_NULL, &nvals, relevant_persons.get())); assert(relevant_persons_nvals == nvals); } // build diagonal matrix of relevant persons auto persons_diag_mx = GB(GrB_Matrix_new, GrB_BOOL, input.persons.size(), input.persons.size()); ok(GrB_Matrix_build_BOOL(persons_diag_mx.get(), relevant_persons_indices.data(), relevant_persons_indices.data(), array_of_true(relevant_persons_nvals).get(), relevant_persons_nvals, GxB_PAIR_BOOL)); auto next_mx = GB(GrB_Matrix_dup, persons_diag_mx.get()); auto seen_mx = GB(GrB_Matrix_dup, next_mx.get()); // MSBFS from relevant persons for (int i = 0; i < maximumHopCount; ++i) { ok(GrB_mxm(next_mx.get(), seen_mx.get(), GrB_NULL, GxB_ANY_PAIR_BOOL, next_mx.get(), input.knows.matrix.get(), GrB_DESC_RSC)); GrB_Index next_mx_nvals; ok(GrB_Matrix_nvals(&next_mx_nvals, next_mx.get())); // if emptied the component if (next_mx_nvals == 0) break; ok(GrB_Matrix_eWiseAdd_BinaryOp(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_PAIR_BOOL, seen_mx.get(), next_mx.get(), GrB_NULL)); } // strictly lower triangular matrix is enough for reachable persons // source persons were filtered at the beginning // drop friends in different place ok(GxB_Matrix_select(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_OFFDIAG, seen_mx.get(), GrB_NULL, GrB_NULL)); ok(GxB_Matrix_select(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_TRIL, seen_mx.get(), GrB_NULL, GrB_NULL)); ok(GrB_mxm(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, seen_mx.get(), persons_diag_mx.get(), GrB_NULL)); auto h_reachable_knows_tril = std::move(seen_mx); // calculate common interests between persons in h hop distance common_interests = GB(GrB_Matrix_new, GrB_INT64, input.persons.size(), input.persons.size()); ok(GrB_mxm(common_interests.get(), h_reachable_knows_tril.get(), GrB_NULL, GxB_PLUS_TIMES_INT64, hasInterest.get(), input.hasInterestTran.matrix.get(), GrB_DESC_S)); // count tag scores per person pairs ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests.get())); if (common_interests_nvals < topKLimit) { // there are not enough non-zero scores // add reachable persons with zero common tags // assign 0 to every reachable person pair, but select non-zero score (first operand) if present // common_interests <h_reachable_knows_tril> 1ST= 0 ok(GrB_Matrix_assign_INT64(common_interests.get(), h_reachable_knows_tril.get(), GrB_FIRST_INT64, 0, GrB_ALL, 0, GrB_ALL, 0, GrB_DESC_S)); // recount nvals ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests.get())); } // extract result from matrix std::vector<GrB_Index> common_interests_rows(common_interests_nvals), common_interests_cols(common_interests_nvals); std::vector<int64_t> common_interests_vals(common_interests_nvals); { GrB_Index nvals = common_interests_nvals; ok(GrB_Matrix_extractTuples_INT64(common_interests_rows.data(), common_interests_cols.data(), common_interests_vals.data(), &nvals, common_interests.get())); assert(common_interests_nvals == nvals); } person_scores.removeElements(); // collect top scores for (size_t i = 0; i < common_interests_vals.size(); ++i) { GrB_Index p1_index = common_interests_rows[i], p2_index = common_interests_cols[i]; int64_t score = common_interests_vals[i]; uint64_t p1_id = input.persons.vertexIds[p1_index]; uint64_t p2_id = input.persons.vertexIds[p2_index]; // put the smallest ID first if (p1_id > p2_id) std::swap(p1_id, p2_id); // DESC score person_scores.add({-score, p1_id, p2_id}); } if (person_scores.size() == topKLimit && std::get<0>(person_scores.max()) == -lower_tag_count) { #ifndef NDEBUG std::cerr << "stopped at min(top scores)=" << lower_tag_count << std::endl; #endif break; } if (lower_tag_count != 0) --lower_tag_count; else break; } } inline __attribute__((always_inline)) void push_next(GrB_Matrix Next, GrB_Matrix Seen, GrB_Matrix Edges) { // next<!seen> = next * A ok(GrB_mxm(Next, Seen, NULL, GxB_ANY_PAIR_UINT8, Next, Edges, GrB_DESC_RSC)); // seen pair= next ok(GrB_transpose(Seen, NULL, GxB_PAIR_UINT8, Next, GrB_DESC_T0)); } void tagCount_msbfs_strategy(GrB_Vector const local_persons, SmallestElementsContainer<score_type, std::less<score_type>> &person_scores) { // maximum value: 10 -> UINT8 auto tag_count_per_person = GB(GrB_Vector_new, GrB_UINT8, input.persons.size()); // every local person has at least 0 tags ok(GrB_Vector_assign_UINT8(tag_count_per_person.get(), local_persons, GrB_NULL, 0, GrB_ALL, 0, GrB_NULL)); // count tags per person ok(GrB_Matrix_reduce_Monoid(tag_count_per_person.get(), local_persons, GrB_NULL, GrB_PLUS_MONOID_UINT8, hasInterest.get(), GrB_NULL)); uint8_t max_tag_count; ok(GrB_Vector_reduce_UINT8(&max_tag_count, GrB_NULL, GrB_MAX_MONOID_UINT8, tag_count_per_person.get(), GrB_NULL)); #ifndef NDEBUG std::cerr << "max_tag_count: " << (unsigned) max_tag_count << std::endl; #endif GBxx_Object<GrB_Matrix> last_common_interests_pattern = GB(GrB_Matrix_new, GrB_BOOL, input.persons.size(), input.persons.size()); // persons with 10 tags, persons with 9..10 tags, ... auto relevant_persons = GB(GrB_Vector_new, GrB_BOOL, input.persons.size()); for (int lower_tag_count = max_tag_count;;) { #ifndef NDEBUG std::cerr << "Loop:" << lower_tag_count << std::endl; #endif // add persons with less tags auto limit = GB(GxB_Scalar_new, GrB_UINT8); ok(GxB_Scalar_setElement_INT32(limit.get(), lower_tag_count)); ok(GxB_Vector_select(relevant_persons.get(), GrB_NULL, GxB_PAIR_BOOL, GxB_EQ_THUNK, tag_count_per_person.get(), limit.get(), GrB_NULL)); GrB_Index relevant_persons_nvals; ok(GrB_Vector_nvals(&relevant_persons_nvals, relevant_persons.get())); // extract relevant person indices std::vector<GrB_Index> relevant_persons_indices(relevant_persons_nvals); { GrB_Index nvals = relevant_persons_nvals; ok(GrB_Vector_extractTuples_BOOL(relevant_persons_indices.data(), GrB_NULL, &nvals, relevant_persons.get())); assert(relevant_persons_nvals == nvals); } // build diagonal matrix of relevant persons auto persons_diag_mx = GB(GrB_Matrix_new, GrB_UINT8, input.persons.size(), input.persons.size()); ok(GrB_Matrix_build_UINT8(persons_diag_mx.get(), relevant_persons_indices.data(), relevant_persons_indices.data(), (uint8_t *) array_of_true(relevant_persons_nvals).get(), relevant_persons_nvals, GxB_PAIR_BOOL)); auto next_mx = GB(GrB_Matrix_dup, persons_diag_mx.get()); auto seen_mx = GB(GrB_Matrix_dup, next_mx.get()); // MSBFS from relevant persons for (int i = 0; i < maximumHopCount / 2; ++i) { push_next(next_mx.get(), seen_mx.get(), input.knows.matrix.get()); } // persons reached in the first (maximumHopCount / 2) steps are marked with 2 ok(GrB_Matrix_assign_UINT8(seen_mx.get(), seen_mx.get(), GrB_NULL, 2, GrB_ALL, 0, GrB_ALL, 0, GrB_NULL)); // one more "half" step is needed for odd distances: nodes reached now are marked with 1 if (maximumHopCount % 2 == 1) { // next<!seen> = next * A ok(GrB_mxm(next_mx.get(), seen_mx.get(), NULL, GxB_ANY_PAIR_BOOL, next_mx.get(), input.knows.matrix.get(), GrB_DESC_RSC)); ok(GrB_Matrix_assign_UINT8(seen_mx.get(), next_mx.get(), GrB_NULL, 1, GrB_ALL, 0, GrB_ALL, 0, GrB_NULL)); } // TODO: offdiag? tril? // ok(GxB_Matrix_select(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_OFFDIAG, seen_mx.get(), GrB_NULL, GrB_NULL)); auto half_reachable = std::move(seen_mx); // find vertices where relevant persons meet: reduce to row vector auto columns_where_vertices_meet = GB(GrB_Vector_new, GrB_UINT64, input.persons.size()); ok(GrB_Matrix_reduce_Monoid(columns_where_vertices_meet.get(), GrB_NULL, GrB_NULL, GrB_PLUS_MONOID_UINT64, half_reachable.get(), GrB_DESC_T0)); #ifndef NDEBUG { GrB_Index nvals; ok(GrB_Vector_nvals(&nvals, columns_where_vertices_meet.get())); std::cerr << "columns_where_vertices_meet nvals:" << nvals << std::endl; } #endif // prune: goal: keep vertices where at least 2 vertices meet // invalid values: // - 1: single person reached (in the last "half" step) // - 2: single person reached in the first (maximumHopCount / 2) steps, // or for odd maximumHopCount (2*e+1): 2 persons in the last "half" step with 1+1 values: // two 1 values mean the distance is (floor((2*e+1) / 2) + 1)*2 = (2*e+1)+1 > maximumHopCount // valid values: // - 3 (odd only): 1 + 2 values: floor((2*e+1) / 2) + (floor((2*e+1) / 2) + 1) = 2*e+1 = maximumHopCount // - 4: 2 + 2 values: two persons reached each other in the first (maximumHopCount / 2) steps // false positives: // - >= 3: 1+1+1+... without value 2 auto scalar3 = GB(GxB_Scalar_new, GrB_UINT64); ok(GxB_Scalar_setElement_UINT64(scalar3.get(), 3)); ok(GxB_Vector_select(columns_where_vertices_meet.get(), GrB_NULL, GrB_NULL, GxB_GE_THUNK, columns_where_vertices_meet.get(), scalar3.get(), GrB_NULL)); // extract columns_where_vertices_meet GrB_Index columns_where_vertices_meet_nvals; ok(GrB_Vector_nvals(&columns_where_vertices_meet_nvals, columns_where_vertices_meet.get())); std::vector<GrB_Index> columns_where_vertices_meet_indices(columns_where_vertices_meet_nvals); { GrB_Index nvals = columns_where_vertices_meet_nvals; ok(GrB_Vector_extractTuples_UINT64(columns_where_vertices_meet_indices.data(), nullptr, &nvals, columns_where_vertices_meet.get())); assert(columns_where_vertices_meet_nvals == nvals); } #ifndef NDEBUG std::cerr << "columns_where_vertices_meet_nvals after select:" << columns_where_vertices_meet_nvals << std::endl; #endif // calculate common interests between persons in h hop distance GBxx_Object<GrB_Matrix> common_interests_global = GB(GrB_Matrix_new, GrB_UINT64, input.persons.size(), input.persons.size()); #pragma omp parallel num_threads(GlobalNThreads) { // thread-local GBxx_Object<GrB_Matrix> common_interests = GB(GrB_Matrix_new, GrB_UINT64, input.persons.size(), input.persons.size()); #pragma omp for schedule(static) for (GrB_Index i = 0; i < columns_where_vertices_meet_nvals; ++i) { GrB_Index meet_column = columns_where_vertices_meet_indices[i]; // get persons who meet at vertex meet_column auto meeting_vertices = GB(GrB_Vector_new, GrB_UINT8, input.persons.size()); ok(GrB_Col_extract(meeting_vertices.get(), GrB_NULL, GrB_NULL, half_reachable.get(), GrB_ALL, 0, meet_column, GrB_NULL)); GrB_Index meeting_vertices_nvals; ok(GrB_Vector_nvals(&meeting_vertices_nvals, meeting_vertices.get())); std::vector<GrB_Index> meeting_vertices_indices(meeting_vertices_nvals); std::vector<uint8_t> meeting_vertices_vals(meeting_vertices_nvals); { GrB_Index nvals = meeting_vertices_nvals; ok(GrB_Vector_extractTuples_UINT8(meeting_vertices_indices.data(), meeting_vertices_vals.data(), &nvals, meeting_vertices.get())); assert(meeting_vertices_nvals == nvals); } for (GrB_Index p1_iter = 0; p1_iter < meeting_vertices_nvals; ++p1_iter) { auto val1 = meeting_vertices_vals[p1_iter]; bool is_from_next1 = val1 == 1; for (GrB_Index p2_iter = 0; p2_iter < p1_iter; ++p2_iter) { auto val2 = meeting_vertices_vals[p2_iter]; // 1 & 1 values means the persons meet after maximumHopCount + 1, which is invalid if (is_from_next1 && val2 == 1) continue; GrB_Index p1 = meeting_vertices_indices[p1_iter]; GrB_Index p2 = meeting_vertices_indices[p2_iter]; ok(GrB_Matrix_setElement_UINT64(common_interests.get(), 0, p1, p2)); } } } #pragma omp critical(Q3_merge_thread_local_matrices) { ok(GrB_transpose(common_interests_global.get(), last_common_interests_pattern.get(), GxB_PAIR_UINT64, common_interests.get(), GrB_DESC_SCT0)); auto ptr = common_interests_global.release(); ok(GrB_Matrix_wait(&ptr)); common_interests_global.reset(ptr); } } // store current pattern to evaluate only once ok(GrB_transpose(last_common_interests_pattern.get(), GrB_NULL, GxB_PAIR_BOOL, common_interests_global.get(), GrB_DESC_T0)); // keep offdiag tril ok(GxB_Matrix_select(common_interests_global.get(), GrB_NULL, GrB_NULL, GxB_OFFDIAG, common_interests_global.get(), GrB_NULL, GrB_NULL)); ok(GxB_Matrix_select(common_interests_global.get(), GrB_NULL, GrB_NULL, GxB_TRIL, common_interests_global.get(), GrB_NULL, GrB_NULL)); auto common_interests_pattern = GB(GrB_Matrix_dup, common_interests_global.get()); ok(GrB_mxm(common_interests_global.get(), common_interests_global.get(), GrB_NULL, GxB_PLUS_TIMES_UINT64, hasInterest.get(), input.hasInterestTran.matrix.get(), GrB_DESC_S)); #ifndef NDEBUG ok(GxB_Matrix_fprint(common_interests_global.get(), "common_interests", GxB_SUMMARY, stdout)); #endif // count tag scores per person pairs GrB_Index common_interests_nvals; ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests_global.get())); bool might_contain_duplicates = false; if (lower_tag_count == 0 && person_scores.size() < topKLimit) { might_contain_duplicates = true; // there are not enough non-zero scores // add reachable persons with zero common tags // assign 0 to every reachable person pair, but select non-zero score (first operand) if present // common_interests <h_reachable_knows_tril> 1ST= 0 ok(GrB_Matrix_assign_INT64(common_interests_global.get(), last_common_interests_pattern.get(), GrB_FIRST_INT64, 0, GrB_ALL, 0, GrB_ALL, 0, GrB_DESC_S)); // recount nvals ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests_global.get())); } // extract result from matrix std::vector<GrB_Index> common_interests_rows(common_interests_nvals), common_interests_cols(common_interests_nvals); std::vector<int64_t> common_interests_vals(common_interests_nvals); { GrB_Index nvals = common_interests_nvals; ok(GrB_Matrix_extractTuples_INT64(common_interests_rows.data(), common_interests_cols.data(), common_interests_vals.data(), &nvals, common_interests_global.get())); assert(common_interests_nvals == nvals); } // collect top scores for (size_t i = 0; i < common_interests_vals.size(); ++i) { GrB_Index p1_index = common_interests_rows[i], p2_index = common_interests_cols[i]; int64_t score = common_interests_vals[i]; uint64_t p1_id = input.persons.vertexIds[p1_index]; uint64_t p2_id = input.persons.vertexIds[p2_index]; // put the smallest ID first if (p1_id > p2_id) std::swap(p1_id, p2_id); // do not add duplicates if (!might_contain_duplicates || std::find_if(person_scores.elements.begin(), person_scores.elements.end(), [&](auto &t) { return std::get<1>(t) == p1_id && std::get<2>(t) == p2_id; }) == person_scores.elements.end()) { // DESC score person_scores.add({-score, p1_id, p2_id}); } } if (person_scores.size() == topKLimit && std::get<0>(person_scores.max()) == -lower_tag_count) { #ifndef NDEBUG std::cerr << "stopped at min(top scores)=" << lower_tag_count << std::endl; #endif break; } if (lower_tag_count != 0) --lower_tag_count; else break; } } void reachable_count_tags_strategy(GrB_Vector const local_persons, GrB_Index const local_persons_nvals, SmallestElementsContainer<score_type, std::less<score_type>> &person_scores) { std::vector<GrB_Index> local_persons_indices(local_persons_nvals); { GrB_Index nvals = local_persons_nvals; ok(GrB_Vector_extractTuples_BOOL(local_persons_indices.data(), GrB_NULL, &nvals, local_persons)); assert(local_persons_nvals == nvals); } // build diagonal matrix of local persons auto persons_diag_mx = GB(GrB_Matrix_new, GrB_BOOL, input.persons.size(), input.persons.size()); ok(GrB_Matrix_build_BOOL(persons_diag_mx.get(), local_persons_indices.data(), local_persons_indices.data(), array_of_true(local_persons_nvals).get(), local_persons_nvals, GxB_PAIR_BOOL)); auto next_mx = GB(GrB_Matrix_dup, persons_diag_mx.get()); auto seen_mx = GB(GrB_Matrix_dup, next_mx.get()); // MSBFS from relevant persons for (int i = 0; i < maximumHopCount; ++i) { ok(GrB_mxm(next_mx.get(), seen_mx.get(), GrB_NULL, GxB_ANY_PAIR_BOOL, next_mx.get(), input.knows.matrix.get(), GrB_DESC_RSC)); GrB_Index next_mx_nvals; ok(GrB_Matrix_nvals(&next_mx_nvals, next_mx.get())); // if emptied the component if (next_mx_nvals == 0) break; ok(GrB_Matrix_eWiseAdd_BinaryOp(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_PAIR_BOOL, seen_mx.get(), next_mx.get(), GrB_NULL)); } // strictly lower triangular matrix is enough for reachable persons // source persons were filtered at the beginning // drop friends in different place ok(GxB_Matrix_select(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_OFFDIAG, seen_mx.get(), GrB_NULL, GrB_NULL)); ok(GxB_Matrix_select(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_TRIL, seen_mx.get(), GrB_NULL, GrB_NULL)); ok(GrB_mxm(seen_mx.get(), GrB_NULL, GrB_NULL, GxB_ANY_PAIR_BOOL, seen_mx.get(), persons_diag_mx.get(), GrB_NULL)); auto h_reachable_knows_tril = std::move(seen_mx); // calculate common interests between persons in h hop distance auto common_interests = GB(GrB_Matrix_new, GrB_INT64, input.persons.size(), input.persons.size()); ok(GrB_mxm(common_interests.get(), h_reachable_knows_tril.get(), GrB_NULL, GxB_PLUS_TIMES_INT64, hasInterest.get(), input.hasInterestTran.matrix.get(), GrB_DESC_S)); // count tag scores per person pairs GrB_Index common_interests_nvals; ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests.get())); if (common_interests_nvals < topKLimit) { // there are not enough non-zero scores // add reachable persons with zero common tags // assign 0 to every reachable person pair, but select non-zero score (first operand) if present // common_interests <h_reachable_knows_tril> 1ST= 0 ok(GrB_Matrix_assign_INT64(common_interests.get(), h_reachable_knows_tril.get(), GrB_FIRST_INT64, 0, GrB_ALL, 0, GrB_ALL, 0, GrB_DESC_S)); // recount nvals ok(GrB_Matrix_nvals(&common_interests_nvals, common_interests.get())); } // extract result from matrix std::vector<GrB_Index> common_interests_rows(common_interests_nvals), common_interests_cols(common_interests_nvals); std::vector<int64_t> common_interests_vals(common_interests_nvals); { GrB_Index nvals = common_interests_nvals; ok(GrB_Matrix_extractTuples_INT64(common_interests_rows.data(), common_interests_cols.data(), common_interests_vals.data(), &nvals, common_interests.get())); assert(common_interests_nvals == nvals); } // collect top scores for (size_t i = 0; i < common_interests_vals.size(); ++i) { GrB_Index p1_index = common_interests_rows[i], p2_index = common_interests_cols[i]; int64_t score = common_interests_vals[i]; uint64_t p1_id = input.persons.vertexIds[p1_index]; uint64_t p2_id = input.persons.vertexIds[p2_index]; // put the smallest ID first if (p1_id > p2_id) std::swap(p1_id, p2_id); // DESC score person_scores.add({-score, p1_id, p2_id}); } } std::tuple<std::string, std::string> initial_calculation() override { hasInterest = GB(GrB_Matrix_new, GrB_BOOL, input.hasInterestTran.trg->size(), input.hasInterestTran.src->size()); ok(GrB_transpose(hasInterest.get(), GrB_NULL, GrB_NULL, input.hasInterestTran.matrix.get(), GrB_NULL)); auto local_persons = getRelevantPersons(); // extract person indices GrB_Index relevant_persons_nvals; ok(GrB_Vector_nvals(&relevant_persons_nvals, local_persons.get())); if (relevant_persons_nvals == 0) return {"", "Nobody lives/studies/works there."}; auto person_scores = makeSmallestElementsContainer<score_type>(topKLimit); // reachable_count_tags_strategy(local_persons.get(), relevant_persons_nvals, person_scores); // tagCount_filtered_reachable_count_tags_strategy(local_persons.get(), person_scores); tagCount_msbfs_strategy(local_persons.get(), person_scores); std::string result, comment; bool firstIter = true; for (auto[neg_score, p1_id, p2_id]: person_scores.removeElements()) { if (firstIter) firstIter = false; else { result += ' '; comment += ' '; } result += std::to_string(p1_id); result += '|'; result += std::to_string(p2_id); comment += std::to_string(-neg_score); } hasInterest.reset(); return {result, comment}; } public: int getQueryId() const override { return 3; } Query3(BenchmarkParameters const &benchmark_parameters, ParameterType query_params, QueryInput const &input) : Query(benchmark_parameters, std::move(query_params), input) { std::tie(topKLimit, maximumHopCount, placeName) = queryParams; } };
GB_unaryop__lnot_uint64_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_int8 // op(A') function: GB_tran__lnot_uint64_int8 // C type: uint64_t // A type: int8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_int8 ( uint64_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_int8 // op(A') function: GB_tran__lnot_uint64_int8 // C type: uint64_t // A type: int8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_int8 ( uint64_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_int8 // op(A') function: GB_tran__lnot_uint64_int8 // C type: uint64_t // A type: int8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_int8 ( uint64_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 16; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(3*t1-3*t2,2)),ceild(3*t1-2,4)),ceild(24*t2-Nz-3,16));t3<=min(min(min(floord(4*Nt+Ny-9,16),floord(12*t1+Ny+15,16)),floord(24*t2+Ny+11,16)),floord(24*t1-24*t2+Nz+Ny+13,16));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-62,64)),ceild(3*t1-126,128)),ceild(24*t2-Nz-499,512)),ceild(16*t3-Ny-499,512));t4<=min(min(min(min(floord(4*Nt+Nx-9,512),floord(12*t1+Nx+15,512)),floord(24*t2+Nx+11,512)),floord(16*t3+Nx+3,512)),floord(24*t1-24*t2+Nz+Nx+13,512));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),4*t3+2),128*t4+126);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) { lbv=max(512*t4,4*t5+4); ubv=min(512*t4+511,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 16; tile_size[3] = 512; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= floord(Nt - 1, 3); t1++) { lbp = max(ceild(t1, 2), ceild(6 * t1 - Nt + 2, 6)); ubp = min(floord(4 * Nt + Nz - 9, 24), floord(12 * t1 + Nz + 6, 24)); for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(max(max(0, ceild(3 * t1 - 3 * t2, 2)), ceild(3 * t1 - 2, 4)), ceild(24 * t2 - Nz - 3, 16)); t3 <= min(min(min(floord(4 * Nt + Ny - 9, 16), floord(12 * t1 + Ny + 15, 16)), floord(24 * t2 + Ny + 11, 16)), floord(24 * t1 - 24 * t2 + Nz + Ny + 13, 16)); t3++) { for (t4 = max(max(max(max(0, ceild(3 * t1 - 3 * t2 - 62, 64)), ceild(3 * t1 - 126, 128)), ceild(24 * t2 - Nz - 499, 512)), ceild(16 * t3 - Ny - 499, 512)); t4 <= min(min(min(min(floord(4 * Nt + Nx - 9, 512), floord(12 * t1 + Nx + 15, 512)), floord(24 * t2 + Nx + 11, 512)), floord(16 * t3 + Nx + 3, 512)), floord(24 * t1 - 24 * t2 + Nz + Nx + 13, 512)); t4++) { for (t5 = max(max(max(max(max(0, ceild(24 * t2 - Nz + 5, 4)), ceild(16 * t3 - Ny + 5, 4)), ceild(512 * t4 - Nx + 5, 4)), 3 * t1), 6 * t1 - 6 * t2 + 1); t5 <= min(min(min(min(min(floord(24 * t1 - 24 * t2 + Nz + 18, 4), Nt - 1), 3 * t1 + 5), 6 * t2 + 4), 4 * t3 + 2), 128 * t4 + 126); t5++) { for (t6 = max(max(24 * t2, 4 * t5 + 4), -24 * t1 + 24 * t2 + 8 * t5 - 23); t6 <= min(min(24 * t2 + 23, -24 * t1 + 24 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = max(16 * t3, 4 * t5 + 4); t7 <= min(16 * t3 + 15, 4 * t5 + Ny - 5); t7++) { lbv = max(512 * t4, 4 * t5 + 4); ubv = min(512 * t4 + 511, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((((((((((((coef[0][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef[1][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]))) + (coef[3][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef[4][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[5][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]))) + (coef[6][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef[7][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[8][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]))) + (coef[9][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef[10][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[11][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]))) + (coef[12][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 16; tile_size[3] = 512; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= floord(Nt - 1, 3); t1++) { lbp = max(ceild(t1, 2), ceild(6 * t1 - Nt + 2, 6)); ubp = min(floord(4 * Nt + Nz - 9, 24), floord(12 * t1 + Nz + 6, 24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(max(max(0, ceild(3 * t1 - 3 * t2, 2)), ceild(3 * t1 - 2, 4)), ceild(24 * t2 - Nz - 3, 16)); t3 <= min(min(min(floord(4 * Nt + Ny - 9, 16), floord(12 * t1 + Ny + 15, 16)), floord(24 * t2 + Ny + 11, 16)), floord(24 * t1 - 24 * t2 + Nz + Ny + 13, 16)); t3++) { for (t4 = max(max(max(max(0, ceild(3 * t1 - 3 * t2 - 62, 64)), ceild(3 * t1 - 126, 128)), ceild(24 * t2 - Nz - 499, 512)), ceild(16 * t3 - Ny - 499, 512)); t4 <= min(min(min(min(floord(4 * Nt + Nx - 9, 512), floord(12 * t1 + Nx + 15, 512)), floord(24 * t2 + Nx + 11, 512)), floord(16 * t3 + Nx + 3, 512)), floord(24 * t1 - 24 * t2 + Nz + Nx + 13, 512)); t4++) { for (t5 = max(max(max(max(max(0, ceild(24 * t2 - Nz + 5, 4)), ceild(16 * t3 - Ny + 5, 4)), ceild(512 * t4 - Nx + 5, 4)), 3 * t1), 6 * t1 - 6 * t2 + 1); t5 <= min(min(min(min(min(floord(24 * t1 - 24 * t2 + Nz + 18, 4), Nt - 1), 3 * t1 + 5), 6 * t2 + 4), 4 * t3 + 2), 128 * t4 + 126); t5++) { for (t6 = max(max(24 * t2, 4 * t5 + 4), -24 * t1 + 24 * t2 + 8 * t5 - 23); t6 <= min(min(24 * t2 + 23, -24 * t1 + 24 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = max(16 * t3, 4 * t5 + 4); t7 <= min(16 * t3 + 15, 4 * t5 + Ny - 5); t7++) { lbv = max(512 * t4, 4 * t5 + 4); ubv = min(512 * t4 + 511, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((((((((((((coef[0][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef[1][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]))) + (coef[3][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef[4][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[5][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]))) + (coef[6][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef[7][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[8][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]))) + (coef[9][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef[10][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[11][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]))) + (coef[12][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
8403.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose void kernel_heat_3d(int tsteps, int n, double A[200 + 0][200 + 0][200 + 0], double B[200 + 0][200 + 0][200 + 0]) { int t12; int t10; int t8; int t6; int t4; int t2; for (t2 = 1; t2 <= 1000; t2 += 1) { #pragma omp parallel for private(t4,t6,t8,t10,t12) for (t4 = 1; t4 <= n - 2; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < n - 2 ? t4 + 15 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 64) for (t10 = t8; t10 <= (t8 + 63 < n - 2 ? t8 + 63 : n - 2); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 1) B[t6][t10][t12] = 0.125 * (A[t6 + 1][t10][t12] - 2 * A[t6][t10][t12] + A[t6 - 1][t10][t12]) + 0.125 * (A[t6][t10 + 1][t12] - 2 * A[t6][t10][t12] + A[t6][t10 - 1][t12]) + 0.125 * (A[t6][t10][t12 + 1] - 2 * A[t6][t10][t12] + A[t6][t10][t12 - 1]) + A[t6][t10][t12]; #pragma omp parallel for private(t4,t6,t8,t10,t12) for (t4 = 1; t4 <= n - 2; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < n - 2 ? t4 + 15 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 64) for (t10 = t8; t10 <= (t8 + 63 < n - 2 ? t8 + 63 : n - 2); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 1) A[t6][t10][t12] = 0.125 * (B[t6 + 1][t10][t12] - 2 * B[t6][t10][t12] + B[t6 - 1][t10][t12]) + 0.125 * (B[t6][t10 + 1][t12] - 2 * B[t6][t10][t12] + B[t6][t10 - 1][t12]) + 0.125 * (B[t6][t10][t12 + 1] - 2 * B[t6][t10][t12] + B[t6][t10][t12 - 1]) + B[t6][t10][t12]; } }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2022 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack4to1_sse(const Mat & bottom_im2col, Mat & top_blob, const Mat & kernel, const Mat & _bias, const Option & opt) { //Mat bottom_im2col(size, maxk, inch, 4u * 4, 4, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const float *bias = _bias; Mat tmp; if (size >= 12) tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + size % 12 % 4, 4u * 4, 4, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 4u * 4, 4, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + size % 4, 4u * 4, 4, opt.workspace_allocator); else tmp.create(maxk, inch, size, 4u * 4, 4, opt.workspace_allocator); { int remain_size_start = 0; int nn_size = size / 12; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 12; float *tmpptr = tmp.channel(i / 12); for (int q = 0; q < inch; q++) { const float *img0 = (const float *)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { //transpose 4 x12 __m128 _r0 = _mm_load_ps(img0); __m128 _r1 = _mm_load_ps(img0 + 4); __m128 _r2 = _mm_load_ps(img0 + 4 * 2); __m128 _r3 = _mm_load_ps(img0 + 4 * 3); __m128 _r4 = _mm_load_ps(img0 + 4 * 4); __m128 _r5 = _mm_load_ps(img0 + 4 * 5); __m128 _r6 = _mm_load_ps(img0 + 4 * 6); __m128 _r7 = _mm_load_ps(img0 + 4 * 7); __m128 _r8 = _mm_load_ps(img0 + 4 * 8); __m128 _r9 = _mm_load_ps(img0 + 4 * 9); __m128 _ra = _mm_load_ps(img0 + 4 * 10); __m128 _rb = _mm_load_ps(img0 + 4 * 11); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r8); _mm_store_ps(tmpptr + 4 * 3, _r1); _mm_store_ps(tmpptr + 4 * 4, _r5); _mm_store_ps(tmpptr + 4 * 5, _r9); _mm_store_ps(tmpptr + 4 * 6, _r2); _mm_store_ps(tmpptr + 4 * 7, _r6); _mm_store_ps(tmpptr + 4 * 8, _ra); _mm_store_ps(tmpptr + 4 * 9, _r3); _mm_store_ps(tmpptr + 4 * 10, _r7); _mm_store_ps(tmpptr + 4 * 11, _rb); img0 += size * 4; tmpptr += 48; } } } remain_size_start += nn_size * 12; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = re
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose void kernel_heat_3d(int tsteps, int n, double A[200 + 0][200 + 0][200 + 0], double B[200 + 0][200 + 0][200 + 0]) { int t12; int t10; int t8; int t6; int t4; int t2; for (t2 = 1; t2 <= 1000; t2 += 1) { #pragma omp parallel for private(t4,t6,t8,t10,t12) for (t4 = 1; t4 <= n - 2; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < n - 2 ? t4 + 15 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 64) for (t10 = t8; t10 <= (t8 + 63 < n - 2 ? t8 + 63 : n - 2); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 1) B[t6][t10][t12] = 0.125 * (A[t6 + 1][t10][t12] - 2 * A[t6][t10][t12] + A[t6 - 1][t10][t12]) + 0.125 * (A[t6][t10 + 1][t12] - 2 * A[t6][t10][t12] + A[t6][t10 - 1][t12]) + 0.125 * (A[t6][t10][t12 + 1] - 2 * A[t6][t10][t12] + A[t6][t10][t12 - 1]) + A[t6][t10][t12]; #pragma omp parallel for private(t4,t6,t8,t10,t12) for (t4 = 1; t4 <= n - 2; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < n - 2 ? t4 + 15 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 64) for (t10 = t8; t10 <= (t8 + 63 < n - 2 ? t8 + 63 : n - 2); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 1) A[t6][t10][t12] = 0.125 * (B[t6 + 1][t10][t12] - 2 * B[t6][t10][t12] + B[t6 - 1][t10][t12]) + 0.125 * (B[t6][t10 + 1][t12] - 2 * B[t6][t10][t12] + B[t6][t10 - 1][t12]) + 0.125 * (B[t6][t10][t12 + 1] - 2 * B[t6][t10][t12] + B[t6][t10][t12 - 1]) + B[t6][t10][t12]; } }
Stmt.h
//===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/iterator.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <string> namespace llvm { class FoldingSetNodeID; } namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class IdentifierInfo; class LabelDecl; class ParmVarDecl; class PrinterHelper; struct PrintingPolicy; class QualType; class RecordDecl; class SourceManager; class StringLiteral; class SwitchStmt; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class LLVM_ALIGNAS(LLVM_PTR_SIZE) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: void *operator new(size_t bytes) LLVM_NOEXCEPT { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) LLVM_NOEXCEPT { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } class StmtBitfields { friend class Stmt; /// \brief The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class CompoundStmtBitfields { friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; }; class ExprBitfields { friend class Expr; friend class DeclRefExpr; // computeDependence friend class InitListExpr; // ctor friend class DesignatedInitExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class ASTStmtReader; // deserialization friend class CXXNewExpr; // ctor friend class DependentScopeDeclRefExpr; // ctor friend class CXXConstructExpr; // ctor friend class CallExpr; // ctor friend class OffsetOfExpr; // ctor friend class ObjCMessageExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ShuffleVectorExpr; // ctor friend class ParenListExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class OverloadExpr; // ctor friend class PseudoObjectExpr; // ctor friend class AtomicExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 2; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = 16 }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 2; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 2; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class DeclRefExprBitfields { friend class DeclRefExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; }; class CastExprBitfields { friend class CastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned BasePathSize : 32 - 6 - NumExprBits; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; }; class ExprWithCleanupsBitfields { friend class ExprWithCleanups; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned NumObjects : 32 - NumExprBits; }; class PseudoObjectExprBitfields { friend class PseudoObjectExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class TypeTraitExprBitfields { friend class TypeTraitExpr; friend class ASTStmtReader; friend class ASTStmtWriter; unsigned : NumExprBits; /// \brief The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// \brief If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// \brief The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; union { StmtBitfields StmtBits; CompoundStmtBitfields CompoundStmtBits; ExprBitfields ExprBits; CharacterLiteralBitfields CharacterLiteralBits; FloatingLiteralBitfields FloatingLiteralBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; DeclRefExprBitfields DeclRefExprBits; CastExprBitfields CastExprBits; CallExprBitfields CallExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; PseudoObjectExprBitfields PseudoObjectExprBits; ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; InitListExprBitfields InitListExprBits; TypeTraitExprBitfields TypeTraitExprBits; }; friend class ASTStmtReader; friend class ASTStmtWriter; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) LLVM_NOEXCEPT { return mem; } void operator delete(void *, const ASTContext &, unsigned) LLVM_NOEXCEPT {} void operator delete(void *, const ASTContext *, unsigned) LLVM_NOEXCEPT {} void operator delete(void *, size_t) LLVM_NOEXCEPT {} void operator delete(void *, void *) LLVM_NOEXCEPT {} public: /// \brief A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell { }; protected: /// Iterator for iterating over Stmt * arrays that contain only Expr * /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). struct ExprIterator : llvm::iterator_adaptor_base<ExprIterator, Stmt **, std::random_access_iterator_tag, Expr *> { ExprIterator() : iterator_adaptor_base(nullptr) {} ExprIterator(Stmt **I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<Expr **>(I); } }; /// Const iterator for iterating over Stmt * arrays that contain only Expr * struct ConstExprIterator : llvm::iterator_adaptor_base<ConstExprIterator, const Stmt *const *, std::random_access_iterator_tag, const Expr *const> { ConstExprIterator() : iterator_adaptor_base(nullptr) {} ConstExprIterator(const Stmt *const *I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<const Expr *const *>(I); } }; private: /// \brief Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// \brief Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt(StmtClass SC) { static_assert(sizeof(*this) % llvm::AlignOf<void *>::Alignment == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getLocStart() const LLVM_READONLY; SourceLocation getLocEnd() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \brief Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); /// \brief Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. typedef StmtIterator child_iterator; typedef ConstStmtIterator const_child_iterator; typedef llvm::iterator_range<child_iterator> child_range; typedef llvm::iterator_range<const_child_iterator> const_child_range; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// \brief Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. /// class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// \brief Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { } /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } SourceLocation getStartLoc() const { return StartLoc; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } typedef DeclGroupRef::iterator decl_iterator; typedef DeclGroupRef::const_iterator const_decl_iterator; typedef llvm::iterator_range<decl_iterator> decl_range; typedef llvm::iterator_range<const_decl_iterator> decl_const_range; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { SourceLocation SemiLoc; /// \brief True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode bool HasLeadingEmptyMacro; public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass), SemiLoc(L), HasLeadingEmptyMacro(hasLeadingEmptyMacro) {} /// \brief Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty), HasLeadingEmptyMacro(false) { } SourceLocation getSemiLoc() const { return SemiLoc; } void setSemiLoc(SourceLocation L) { SemiLoc = L; } bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; } SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } friend class ASTStmtReader; friend class ASTStmtWriter; }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. /// class CompoundStmt : public Stmt { Stmt** Body; SourceLocation LBraceLoc, RBraceLoc; friend class ASTStmtReader; public: CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts, SourceLocation LB, SourceLocation RB); // \brief Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), Body(nullptr), LBraceLoc(Loc), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; } // \brief Build an empty compound statement. explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty), Body(nullptr) { CompoundStmtBits.NumStmts = 0; } void setStmts(const ASTContext &C, ArrayRef<Stmt *> Stmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } typedef Stmt** body_iterator; typedef llvm::iterator_range<body_iterator> body_range; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return Body; } body_iterator body_end() { return Body + size(); } Stmt *body_front() { return !body_empty() ? Body[0] : nullptr; } Stmt *body_back() { return !body_empty() ? Body[size()-1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); Body[size()-1] = S; } typedef Stmt* const * const_body_iterator; typedef llvm::iterator_range<const_body_iterator> body_const_range; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return Body; } const_body_iterator body_end() const { return Body + size(); } const Stmt *body_front() const { return !body_empty() ? Body[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? Body[size() - 1] : nullptr; } typedef std::reverse_iterator<body_iterator> reverse_body_iterator; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } typedef std::reverse_iterator<const_body_iterator> const_reverse_body_iterator; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getLocStart() const LLVM_READONLY { return LBraceLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RBraceLoc; } SourceLocation getLBracLoc() const { return LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(Body, Body + CompoundStmtBits.NumStmts); } const_child_range children() const { return const_child_range(child_iterator(Body), child_iterator(Body + CompoundStmtBits.NumStmts)); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: // A pointer to the following CaseStmt or DefaultStmt class, // used by SwitchStmt. SwitchCase *NextSwitchCase; SourceLocation KeywordLoc; SourceLocation ColonLoc; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), NextSwitchCase(nullptr), KeywordLoc(KWLoc), ColonLoc(ColonLoc) { } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC), NextSwitchCase(nullptr) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return KeywordLoc; } void setKeywordLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase*>(this)->getSubStmt(); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; class CaseStmt : public SwitchCase { SourceLocation EllipsisLoc; enum { LHS, RHS, SUBSTMT, END_EXPR }; Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for // GNU "case 1 ... 4" extension public: CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { SubExprs[SUBSTMT] = nullptr; SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs); SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs); EllipsisLoc = ellipsisLoc; } /// \brief Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) { } SourceLocation getCaseLoc() const { return KeywordLoc; } void setCaseLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getEllipsisLoc() const { return EllipsisLoc; } void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); } Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); } Stmt *getSubStmt() { return SubExprs[SUBSTMT]; } const Expr *getLHS() const { return reinterpret_cast<const Expr*>(SubExprs[LHS]); } const Expr *getRHS() const { return reinterpret_cast<const Expr*>(SubExprs[RHS]); } const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; } void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; } void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); } void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[END_EXPR]); } }; class DefaultStmt : public SwitchCase { Stmt* SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// \brief Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) { } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return KeywordLoc; } void setDefaultLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt+1); } }; inline SourceLocation SwitchCase::getLocEnd() const { if (const CaseStmt *CS = dyn_cast<CaseStmt>(this)) return CS->getLocEnd(); return cast<DefaultStmt>(this)->getLocEnd(); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; /// class LabelStmt : public Stmt { SourceLocation IdentLoc; LabelDecl *TheDecl; Stmt *SubStmt; public: LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), IdentLoc(IL), TheDecl(D), SubStmt(substmt) { static_assert(sizeof(LabelStmt) == 2 * sizeof(SourceLocation) + 2 * sizeof(void *), "LabelStmt too big"); } // \brief Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { } SourceLocation getIdentLoc() const { return IdentLoc; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setIdentLoc(SourceLocation L) { IdentLoc = L; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// \brief Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } /// class AttributedStmt : public Stmt { Stmt *SubStmt; SourceLocation AttrLoc; unsigned NumAttrs; friend class ASTStmtReader; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc), NumAttrs(Attrs.size()) { std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) { std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return reinterpret_cast<const Attr *const *>(this + 1); } const Attr **getAttrArrayPtr() { return reinterpret_cast<const Attr **>(this + 1); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); // \brief Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttrLoc; } ArrayRef<const Attr*> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. /// class IfStmt : public Stmt { enum { VAR, COND, THEN, ELSE, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation IfLoc; SourceLocation ElseLoc; public: IfStmt(const ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond, Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = nullptr); /// \brief Build an empty if/then/else statement explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } const Stmt *getThen() const { return SubExprs[THEN]; } void setThen(Stmt *S) { SubExprs[THEN] = S; } const Stmt *getElse() const { return SubExprs[ELSE]; } void setElse(Stmt *S) { SubExprs[ELSE] = S; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Stmt *getThen() { return SubExprs[THEN]; } Stmt *getElse() { return SubExprs[ELSE]; } SourceLocation getIfLoc() const { return IfLoc; } void setIfLoc(SourceLocation L) { IfLoc = L; } SourceLocation getElseLoc() const { return ElseLoc; } void setElseLoc(SourceLocation L) { ElseLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; } SourceLocation getLocEnd() const LLVM_READONLY { if (SubExprs[ELSE]) return SubExprs[ELSE]->getLocEnd(); else return SubExprs[THEN]->getLocEnd(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. /// class SwitchStmt : public Stmt { SourceLocation SwitchLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // This points to a linked list of case and default statements and, if the // SwitchStmt is a switch on an enum value, records whether all the enum // values were covered by CaseStmts. The coverage information value is meant // to be a hint for possible clients. llvm::PointerIntPair<SwitchCase *, 1, bool> FirstCase; public: SwitchStmt(const ASTContext &C, VarDecl *Var, Expr *cond); /// \brief Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Stmt *getBody() const { return SubExprs[BODY]; } const SwitchCase *getSwitchCaseList() const { return FirstCase.getPointer(); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } Stmt *getBody() { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SwitchCase *getSwitchCaseList() { return FirstCase.getPointer(); } /// \brief Set the case list for this switch statement. void setSwitchCaseList(SwitchCase *SC) { FirstCase.setPointer(SC); } SourceLocation getSwitchLoc() const { return SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { SubExprs[BODY] = S; SwitchLoc = SL; } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase.getPointer()); FirstCase.setPointer(SC); } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { FirstCase.setInt(true); } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return FirstCase.getInt(); } SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd(); } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. /// class WhileStmt : public Stmt { SourceLocation WhileLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; public: WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, SourceLocation WL); /// \brief Build an empty while statement. explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// DoStmt - This represents a 'do/while' stmt. /// class DoStmt : public Stmt { SourceLocation DoLoc; enum { BODY, COND, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) { SubExprs[COND] = reinterpret_cast<Stmt*>(cond); SubExprs[BODY] = body; } /// \brief Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getDoLoc() const { return DoLoc; } void setDoLoc(SourceLocation L) { DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. /// class ForStmt : public Stmt { SourceLocation ForLoc; enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// \brief Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { } Stmt *getInit() { return SubExprs[INIT]; } /// \brief Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForLoc; } void setForLoc(SourceLocation L) { ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. /// class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation GotoLoc; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {} /// \brief Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { } LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoLoc; } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. /// class IndirectGotoStmt : public Stmt { SourceLocation GotoLoc; SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc), Target((Stmt*)target) {} /// \brief Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) { } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr*>(Target); } const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);} void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt*>(this)->getConstantTarget(); } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target+1); } }; /// ContinueStmt - This represents a continue. /// class ContinueStmt : public Stmt { SourceLocation ContinueLoc; public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {} /// \brief Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { } SourceLocation getContinueLoc() const { return ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// BreakStmt - This represents a break. /// class BreakStmt : public Stmt { SourceLocation BreakLoc; public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) { static_assert(sizeof(BreakStmt) == 2 * sizeof(SourceLocation), "BreakStmt too large"); } /// \brief Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { } SourceLocation getBreakLoc() const { return BreakLoc; } void setBreakLoc(SourceLocation L) { BreakLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. /// class ReturnStmt : public Stmt { SourceLocation RetLoc; Stmt *RetExpr; const VarDecl *NRVOCandidate; public: explicit ReturnStmt(SourceLocation RL) : ReturnStmt(RL, nullptr, nullptr) {} ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate) : Stmt(ReturnStmtClass), RetLoc(RL), RetExpr((Stmt *)E), NRVOCandidate(NRVOCandidate) {} /// \brief Build an empty return expression. explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { } const Expr *getRetValue() const; Expr *getRetValue(); void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); } SourceLocation getReturnLoc() const { return RetLoc; } void setReturnLoc(SourceLocation L) { RetLoc = L; } /// \brief Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return NRVOCandidate; } void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; } SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RetExpr ? RetExpr->getLocEnd() : RetLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr+1); return child_range(child_iterator(), child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. /// class AsmStmt : public Stmt { protected: SourceLocation AsmLoc; /// \brief True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// \brief If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { } friend class ASTStmtReader; public: /// \brief Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty), Exprs(nullptr) { } SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); } SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. typedef ExprIterator inputs_iterator; typedef ConstExprIterator const_inputs_iterator; typedef llvm::iterator_range<inputs_iterator> inputs_range; typedef llvm::iterator_range<const_inputs_iterator> inputs_const_range; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. typedef ExprIterator outputs_iterator; typedef ConstExprIterator const_outputs_iterator; typedef llvm::iterator_range<outputs_iterator> outputs_range; typedef llvm::iterator_range<const_outputs_iterator> outputs_const_range; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. /// class GCCAsmStmt : public AsmStmt { SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints; StringLiteral **Clobbers; IdentifierInfo **Names; friend class ASTStmtReader; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// \brief Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty), Constraints(nullptr), Clobbers(nullptr), Names(nullptr) { } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) { } bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. /// class MSAsmStmt : public AsmStmt { SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks; Token *AsmToks; StringRef *Constraints; StringRef *Clobbers; friend class ASTStmtReader; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// \brief Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty), NumAsmToks(0), AsmToks(nullptr), Constraints(nullptr), Clobbers(nullptr) { } SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { } public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { } public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getLocEnd(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); friend class ASTReader; friend class ASTStmtReader; explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { } public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. /// class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// \brief Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) { } SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// \brief The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// \brief Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: /// \brief Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. /// Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// \brief Determine the kind of capture. VariableCaptureKind getCaptureKind() const { return VarAndKind.getInt(); } /// \brief Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// \brief Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// \brief Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// \brief Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// \brief Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// \brief Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const { assert((capturesVariable() || capturesVariableByCopy()) && "No variable available for 'this' or VAT capture"); return VarAndKind.getPointer(); } friend class ASTStmtReader; }; private: /// \brief The number of variable captured, including 'this'. unsigned NumCaptures; /// \brief The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind; /// \brief The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl; /// \brief Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// \brief Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// \brief Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// \brief Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl() { return CapDeclAndKind.getPointer(); } const CapturedDecl *getCapturedDecl() const { return CapDeclAndKind.getPointer(); } /// \brief Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D) { assert(D && "null CapturedDecl"); CapDeclAndKind.setPointer(D); } /// \brief Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const { return CapDeclAndKind.getInt(); } /// \brief Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind) { CapDeclAndKind.setInt(Kind); } /// \brief Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// \brief Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// \brief True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// \brief An iterator that walks over the captures. typedef Capture *capture_iterator; typedef const Capture *const_capture_iterator; typedef llvm::iterator_range<capture_iterator> capture_range; typedef llvm::iterator_range<const_capture_iterator> capture_const_range; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// \brief Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// \brief Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// \brief Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// \brief Iterator that walks over the capture initialization arguments. typedef Expr **capture_init_iterator; typedef llvm::iterator_range<capture_init_iterator> capture_init_range; /// \brief Const iterator that walks over the capture initialization /// arguments. typedef Expr *const *const_capture_init_iterator; typedef llvm::iterator_range<const_capture_init_iterator> const_capture_init_range; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// \brief Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// \brief Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getLocStart() const LLVM_READONLY { return getCapturedStmt()->getLocStart(); } SourceLocation getLocEnd() const LLVM_READONLY { return getCapturedStmt()->getLocEnd(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); friend class ASTStmtReader; }; } // end namespace clang #endif
#include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <stdint.h> #include <omp.h> #include "../../support/common.h" #include "../../support/graph.h" #include "../../support/params.h" #include "../../support/timer.h" #include "../../support/utils.h" int main(int argc, char **argv) { //Process parameters struct Params p = input_params(argc, argv); //Initialize BFS data structures PRINT_INFO(p.verbosity >= 1, "Reading graph %s", p.fileName); struct COOGraph cooGraph = readCOOGraph(p.fileName); PRINT_INFO(p.verbosity >= 1, " Graph has %d nodes and %d edges", cooGraph.numNodes, cooGraph.numEdges); struct CSRGraph csrGraph = coo2csr(cooGraph); uint32_t *nodeLevel = (uint32_t *) malloc(csrGraph.numNodes * sizeof(uint32_t)); uint32_t *nodeLevelRef = (uint32_t *) malloc(csrGraph.numNodes * sizeof(uint32_t)); for (uint32_t i = 0; i < csrGraph.numNodes; ++i) { nodeLevel[i] = UINT32_MAX; //Unreachable nodeLevelRef[i] = UINT32_MAX; //Unreachable } uint32_t srcNode = 0; //Initialize frontier double buffers uint32_t * buffer1 = (uint32_t *) malloc(csrGraph.numNodes * sizeof(uint32_t)); uint32_t *buffer2 = (uint32_t *) malloc(csrGraph.numNodes * sizeof(uint32_t)); uint32_t *prevFrontier = buffer1; uint32_t *currFrontier = buffer2; //Calculating result on CPU PRINT_INFO(p.verbosity >= 1, "Calculating result on CPU (OpenMP)"); omp_set_num_threads(4); Timer timer; startTimer(&timer); nodeLevel[srcNode] = 0; prevFrontier[0] = srcNode; uint32_t numPrevFrontier = 1; for (uint32_t level = 1; numPrevFrontier > 0; ++level) { uint32_t numCurrFrontier = 0; //Visit nodes in the previous frontier #pragma omp parallel for for (uint32_t i = 0; i < numPrevFrontier; ++i) { uint32_t node = prevFrontier[i]; for (uint32_t edge = csrGraph.nodePtrs[node]; edge < csrGraph.nodePtrs[node + 1]; ++edge) { uint32_t neighbor = csrGraph.neighborIdxs[edge]; uint32_t justVisited = 0; #pragma omp critical { if (nodeLevel[neighbor] == UINT32_MAX) { //Node not previously visited nodeLevel[neighbor] = level; justVisited = 1; } } if (justVisited) { uint32_t currFrontierIdx; #pragma omp critical { currFrontierIdx = numCurrFrontier++; } currFrontier[currFrontierIdx] = neighbor; } } } //Swap buffers uint32_t * tmp = prevFrontier; prevFrontier = currFrontier; currFrontier = tmp; numPrevFrontier = numCurrFrontier; } stopTimer(&timer); if (p.verbosity == 0) PRINT("%f", getElapsedTime(timer) * 1e3); PRINT_INFO(p.verbosity >= 1, "Elapsed time: %f ms", getElapsedTime(timer) * 1e3); //Calculating result on CPU sequentially PRINT_INFO(p.verbosity >= 1, "Calculating result on CPU (sequential)"); startTimer(&timer); nodeLevelRef[srcNode] = 0; prevFrontier[0] = srcNode; numPrevFrontier = 1; for (uint32_t level = 1; numPrevFrontier > 0; ++level) { uint32_t numCurrFrontier = 0; //Visit nodes in the previous frontier for (uint32_t i = 0; i < numPrevFrontier; ++i) { uint32_t node = prevFrontier[i]; for (uint32_t edge = csrGraph.nodePtrs[node]; edge < csrGraph.nodePtrs[node + 1]; ++edge) { uint32_t neighbor = csrGraph.neighborIdxs[edge]; uint32_t justVisited = 0; if (nodeLevelRef[neighbor] == UINT32_MAX) { //Node not previously visited nodeLevelRef[neighbor] = level; justVisited = 1; } if (justVisited) { uint32_t currFrontierIdx; currFrontierIdx = numCurrFrontier++; currFrontier[currFrontierIdx] = neighbor; } } } //Swap buffers uint32_t * tmp = prevFrontier; prevFrontier = currFrontier; currFrontier = tmp; numPrevFrontier = numCurrFrontier; } stopTimer(&timer); if (p.verbosity == 0) PRINT("%f", getElapsedTime(timer) * 1e3); PRINT_INFO(p.verbosity >= 1, "Elapsed time: %f ms", getElapsedTime(timer) * 1e3); //Verifying result PRINT_INFO(p.verbosity >= 1, "Verifying the result"); for (uint32_t nodeIdx = 0; nodeIdx < csrGraph.numNodes; ++nodeIdx) { if (nodeLevel[nodeIdx] != nodeLevelRef[nodeIdx]) { PRINT_ERROR("Mismatch at node %u (CPU sequential result = level %u, CPU parallel result = level %u)", nodeIdx, nodeLevelRef[nodeIdx], nodeLevel[nodeIdx]); } } //Deallocate data structures freeCOOGraph(cooGraph); freeCSRGraph(csrGraph); free(nodeLevel); free(buffer1); free(buffer2); return 0; }
//===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/iterator.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <string> namespace llvm { class FoldingSetNodeID; } namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class IdentifierInfo; class LabelDecl; class ParmVarDecl; class PrinterHelper; struct PrintingPolicy; class QualType; class RecordDecl; class SourceManager; class StringLiteral; class SwitchStmt; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class LLVM_ALIGNAS(LLVM_PTR_SIZE) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: void *operator new(size_t bytes) LLVM_NOEXCEPT { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) LLVM_NOEXCEPT { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } class StmtBitfields { friend class Stmt; /// \brief The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class CompoundStmtBitfields { friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; }; class ExprBitfields { friend class Expr; friend class DeclRefExpr; // computeDependence friend class InitListExpr; // ctor friend class DesignatedInitExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class ASTStmtReader; // deserialization friend class CXXNewExpr; // ctor friend class DependentScopeDeclRefExpr; // ctor friend class CXXConstructExpr; // ctor friend class CallExpr; // ctor friend class OffsetOfExpr; // ctor friend class ObjCMessageExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ShuffleVectorExpr; // ctor friend class ParenListExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class OverloadExpr; // ctor friend class PseudoObjectExpr; // ctor friend class AtomicExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 2; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = 16 }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 2; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 2; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class DeclRefExprBitfields { friend class DeclRefExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; }; class CastExprBitfields { friend class CastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned BasePathSize : 32 - 6 - NumExprBits; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; }; class ExprWithCleanupsBitfields { friend class ExprWithCleanups; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned NumObjects : 32 - NumExprBits; }; class PseudoObjectExprBitfields { friend class PseudoObjectExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class TypeTraitExprBitfields { friend class TypeTraitExpr; friend class ASTStmtReader; friend class ASTStmtWriter; unsigned : NumExprBits; /// \brief The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// \brief If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// \brief The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; union { StmtBitfields StmtBits; CompoundStmtBitfields CompoundStmtBits; ExprBitfields ExprBits; CharacterLiteralBitfields CharacterLiteralBits; FloatingLiteralBitfields FloatingLiteralBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; DeclRefExprBitfields DeclRefExprBits; CastExprBitfields CastExprBits; CallExprBitfields CallExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; PseudoObjectExprBitfields PseudoObjectExprBits; ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; InitListExprBitfields InitListExprBits; TypeTraitExprBitfields TypeTraitExprBits; }; friend class ASTStmtReader; friend class ASTStmtWriter; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) LLVM_NOEXCEPT { return mem; } void operator delete(void *, const ASTContext &, unsigned) LLVM_NOEXCEPT {} void operator delete(void *, const ASTContext *, unsigned) LLVM_NOEXCEPT {} void operator delete(void *, size_t) LLVM_NOEXCEPT {} void operator delete(void *, void *) LLVM_NOEXCEPT {} public: /// \brief A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell { }; protected: /// Iterator for iterating over Stmt * arrays that contain only Expr * /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). struct ExprIterator : llvm::iterator_adaptor_base<ExprIterator, Stmt **, std::random_access_iterator_tag, Expr *> { ExprIterator() : iterator_adaptor_base(nullptr) {} ExprIterator(Stmt **I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<Expr **>(I); } }; /// Const iterator for iterating over Stmt * arrays that contain only Expr * struct ConstExprIterator : llvm::iterator_adaptor_base<ConstExprIterator, const Stmt *const *, std::random_access_iterator_tag, const Expr *const> { ConstExprIterator() : iterator_adaptor_base(nullptr) {} ConstExprIterator(const Stmt *const *I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<const Expr *const *>(I); } }; private: /// \brief Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// \brief Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt(StmtClass SC) { static_assert(sizeof(*this) % llvm::AlignOf<void *>::Alignment == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getLocStart() const LLVM_READONLY; SourceLocation getLocEnd() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \brief Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); /// \brief Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. typedef StmtIterator child_iterator; typedef ConstStmtIterator const_child_iterator; typedef llvm::iterator_range<child_iterator> child_range; typedef llvm::iterator_range<const_child_iterator> const_child_range; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// \brief Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. /// class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// \brief Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { } /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } SourceLocation getStartLoc() const { return StartLoc; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } typedef DeclGroupRef::iterator decl_iterator; typedef DeclGroupRef::const_iterator const_decl_iterator; typedef llvm::iterator_range<decl_iterator> decl_range; typedef llvm::iterator_range<const_decl_iterator> decl_const_range; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { SourceLocation SemiLoc; /// \brief True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode bool HasLeadingEmptyMacro; public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass), SemiLoc(L), HasLeadingEmptyMacro(hasLeadingEmptyMacro) {} /// \brief Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty), HasLeadingEmptyMacro(false) { } SourceLocation getSemiLoc() const { return SemiLoc; } void setSemiLoc(SourceLocation L) { SemiLoc = L; } bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; } SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } friend class ASTStmtReader; friend class ASTStmtWriter; }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. /// class CompoundStmt : public Stmt { Stmt** Body; SourceLocation LBraceLoc, RBraceLoc; friend class ASTStmtReader; public: CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts, SourceLocation LB, SourceLocation RB); // \brief Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), Body(nullptr), LBraceLoc(Loc), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; } // \brief Build an empty compound statement. explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty), Body(nullptr) { CompoundStmtBits.NumStmts = 0; } void setStmts(const ASTContext &C, ArrayRef<Stmt *> Stmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } typedef Stmt** body_iterator; typedef llvm::iterator_range<body_iterator> body_range; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return Body; } body_iterator body_end() { return Body + size(); } Stmt *body_front() { return !body_empty() ? Body[0] : nullptr; } Stmt *body_back() { return !body_empty() ? Body[size()-1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); Body[size()-1] = S; } typedef Stmt* const * const_body_iterator; typedef llvm::iterator_range<const_body_iterator> body_const_range; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return Body; } const_body_iterator body_end() const { return Body + size(); } const Stmt *body_front() const { return !body_empty() ? Body[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? Body[size() - 1] : nullptr; } typedef std::reverse_iterator<body_iterator> reverse_body_iterator; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } typedef std::reverse_iterator<const_body_iterator> const_reverse_body_iterator; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getLocStart() const LLVM_READONLY { return LBraceLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RBraceLoc; } SourceLocation getLBracLoc() const { return LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(Body, Body + CompoundStmtBits.NumStmts); } const_child_range children() const { return const_child_range(child_iterator(Body), child_iterator(Body + CompoundStmtBits.NumStmts)); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: // A pointer to the following CaseStmt or DefaultStmt class, // used by SwitchStmt. SwitchCase *NextSwitchCase; SourceLocation KeywordLoc; SourceLocation ColonLoc; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), NextSwitchCase(nullptr), KeywordLoc(KWLoc), ColonLoc(ColonLoc) { } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC), NextSwitchCase(nullptr) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return KeywordLoc; } void setKeywordLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase*>(this)->getSubStmt(); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; class CaseStmt : public SwitchCase { SourceLocation EllipsisLoc; enum { LHS, RHS, SUBSTMT, END_EXPR }; Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for // GNU "case 1 ... 4" extension public: CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { SubExprs[SUBSTMT] = nullptr; SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs); SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs); EllipsisLoc = ellipsisLoc; } /// \brief Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) { } SourceLocation getCaseLoc() const { return KeywordLoc; } void setCaseLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getEllipsisLoc() const { return EllipsisLoc; } void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); } Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); } Stmt *getSubStmt() { return SubExprs[SUBSTMT]; } const Expr *getLHS() const { return reinterpret_cast<const Expr*>(SubExprs[LHS]); } const Expr *getRHS() const { return reinterpret_cast<const Expr*>(SubExprs[RHS]); } const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; } void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; } void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); } void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[END_EXPR]); } }; class DefaultStmt : public SwitchCase { Stmt* SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// \brief Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) { } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return KeywordLoc; } void setDefaultLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt+1); } }; inline SourceLocation SwitchCase::getLocEnd() const { if (const CaseStmt *CS = dyn_cast<CaseStmt>(this)) return CS->getLocEnd(); return cast<DefaultStmt>(this)->getLocEnd(); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; /// class LabelStmt : public Stmt { SourceLocation IdentLoc; LabelDecl *TheDecl; Stmt *SubStmt; public: LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), IdentLoc(IL), TheDecl(D), SubStmt(substmt) { static_assert(sizeof(LabelStmt) == 2 * sizeof(SourceLocation) + 2 * sizeof(void *), "LabelStmt too big"); } // \brief Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { } SourceLocation getIdentLoc() const { return IdentLoc; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setIdentLoc(SourceLocation L) { IdentLoc = L; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// \brief Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } /// class AttributedStmt : public Stmt { Stmt *SubStmt; SourceLocation AttrLoc; unsigned NumAttrs; friend class ASTStmtReader; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc), NumAttrs(Attrs.size()) { std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) { std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return reinterpret_cast<const Attr *const *>(this + 1); } const Attr **getAttrArrayPtr() { return reinterpret_cast<const Attr **>(this + 1); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); // \brief Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttrLoc; } ArrayRef<const Attr*> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. /// class IfStmt : public Stmt { enum { VAR, COND, THEN, ELSE, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation IfLoc; SourceLocation ElseLoc; public: IfStmt(const ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond, Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = nullptr); /// \brief Build an empty if/then/else statement explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } const Stmt *getThen() const { return SubExprs[THEN]; } void setThen(Stmt *S) { SubExprs[THEN] = S; } const Stmt *getElse() const { return SubExprs[ELSE]; } void setElse(Stmt *S) { SubExprs[ELSE] = S; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Stmt *getThen() { return SubExprs[THEN]; } Stmt *getElse() { return SubExprs[ELSE]; } SourceLocation getIfLoc() const { return IfLoc; } void setIfLoc(SourceLocation L) { IfLoc = L; } SourceLocation getElseLoc() const { return ElseLoc; } void setElseLoc(SourceLocation L) { ElseLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; } SourceLocation getLocEnd() const LLVM_READONLY { if (SubExprs[ELSE]) return SubExprs[ELSE]->getLocEnd(); else return SubExprs[THEN]->getLocEnd(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. /// class SwitchStmt : public Stmt { SourceLocation SwitchLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // This points to a linked list of case and default statements and, if the // SwitchStmt is a switch on an enum value, records whether all the enum // values were covered by CaseStmts. The coverage information value is meant // to be a hint for possible clients. llvm::PointerIntPair<SwitchCase *, 1, bool> FirstCase; public: SwitchStmt(const ASTContext &C, VarDecl *Var, Expr *cond); /// \brief Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Stmt *getBody() const { return SubExprs[BODY]; } const SwitchCase *getSwitchCaseList() const { return FirstCase.getPointer(); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } Stmt *getBody() { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SwitchCase *getSwitchCaseList() { return FirstCase.getPointer(); } /// \brief Set the case list for this switch statement. void setSwitchCaseList(SwitchCase *SC) { FirstCase.setPointer(SC); } SourceLocation getSwitchLoc() const { return SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { SubExprs[BODY] = S; SwitchLoc = SL; } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase.getPointer()); FirstCase.setPointer(SC); } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { FirstCase.setInt(true); } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return FirstCase.getInt(); } SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd(); } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. /// class WhileStmt : public Stmt { SourceLocation WhileLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; public: WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, SourceLocation WL); /// \brief Build an empty while statement. explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// DoStmt - This represents a 'do/while' stmt. /// class DoStmt : public Stmt { SourceLocation DoLoc; enum { BODY, COND, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) { SubExprs[COND] = reinterpret_cast<Stmt*>(cond); SubExprs[BODY] = body; } /// \brief Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getDoLoc() const { return DoLoc; } void setDoLoc(SourceLocation L) { DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. /// class ForStmt : public Stmt { SourceLocation ForLoc; enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// \brief Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { } Stmt *getInit() { return SubExprs[INIT]; } /// \brief Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForLoc; } void setForLoc(SourceLocation L) { ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. /// class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation GotoLoc; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {} /// \brief Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { } LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoLoc; } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. /// class IndirectGotoStmt : public Stmt { SourceLocation GotoLoc; SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc), Target((Stmt*)target) {} /// \brief Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) { } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr*>(Target); } const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);} void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt*>(this)->getConstantTarget(); } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target+1); } }; /// ContinueStmt - This represents a continue. /// class ContinueStmt : public Stmt { SourceLocation ContinueLoc; public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {} /// \brief Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { } SourceLocation getContinueLoc() const { return ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// BreakStmt - This represents a break. /// class BreakStmt : public Stmt { SourceLocation BreakLoc; public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) { static_assert(sizeof(BreakStmt) == 2 * sizeof(SourceLocation), "BreakStmt too large"); } /// \brief Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { } SourceLocation getBreakLoc() const { return BreakLoc; } void setBreakLoc(SourceLocation L) { BreakLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. /// class ReturnStmt : public Stmt { SourceLocation RetLoc; Stmt *RetExpr; const VarDecl *NRVOCandidate; public: explicit ReturnStmt(SourceLocation RL) : ReturnStmt(RL, nullptr, nullptr) {} ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate) : Stmt(ReturnStmtClass), RetLoc(RL), RetExpr((Stmt *)E), NRVOCandidate(NRVOCandidate) {} /// \brief Build an empty return expression. explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { } const Expr *getRetValue() const; Expr *getRetValue(); void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); } SourceLocation getReturnLoc() const { return RetLoc; } void setReturnLoc(SourceLocation L) { RetLoc = L; } /// \brief Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return NRVOCandidate; } void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; } SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RetExpr ? RetExpr->getLocEnd() : RetLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr+1); return child_range(child_iterator(), child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. /// class AsmStmt : public Stmt { protected: SourceLocation AsmLoc; /// \brief True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// \brief If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { } friend class ASTStmtReader; public: /// \brief Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty), Exprs(nullptr) { } SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); } SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. typedef ExprIterator inputs_iterator; typedef ConstExprIterator const_inputs_iterator; typedef llvm::iterator_range<inputs_iterator> inputs_range; typedef llvm::iterator_range<const_inputs_iterator> inputs_const_range; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. typedef ExprIterator outputs_iterator; typedef ConstExprIterator const_outputs_iterator; typedef llvm::iterator_range<outputs_iterator> outputs_range; typedef llvm::iterator_range<const_outputs_iterator> outputs_const_range; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. /// class GCCAsmStmt : public AsmStmt { SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints; StringLiteral **Clobbers; IdentifierInfo **Names; friend class ASTStmtReader; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// \brief Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty), Constraints(nullptr), Clobbers(nullptr), Names(nullptr) { } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) { } bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. /// class MSAsmStmt : public AsmStmt { SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks; Token *AsmToks; StringRef *Constraints; StringRef *Clobbers; friend class ASTStmtReader; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// \brief Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty), NumAsmToks(0), AsmToks(nullptr), Constraints(nullptr), Clobbers(nullptr) { } SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { } public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { } public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getLocEnd(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); friend class ASTReader; friend class ASTStmtReader; explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { } public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. /// class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// \brief Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) { } SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// \brief The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// \brief Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: /// \brief Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. /// Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// \brief Determine the kind of capture. VariableCaptureKind getCaptureKind() const { return VarAndKind.getInt(); } /// \brief Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// \brief Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// \brief Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// \brief Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// \brief Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// \brief Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const { assert((capturesVariable() || capturesVariableByCopy()) && "No variable available for 'this' or VAT capture"); return VarAndKind.getPointer(); } friend class ASTStmtReader; }; private: /// \brief The number of variable captured, including 'this'. unsigned NumCaptures; /// \brief The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind; /// \brief The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl; /// \brief Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// \brief Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// \brief Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// \brief Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl() { return CapDeclAndKind.getPointer(); } const CapturedDecl *getCapturedDecl() const { return CapDeclAndKind.getPointer(); } /// \brief Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D) { assert(D && "null CapturedDecl"); CapDeclAndKind.setPointer(D); } /// \brief Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const { return CapDeclAndKind.getInt(); } /// \brief Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind) { CapDeclAndKind.setInt(Kind); } /// \brief Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// \brief Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// \brief True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// \brief An iterator that walks over the captures. typedef Capture *capture_iterator; typedef const Capture *const_capture_iterator; typedef llvm::iterator_range<capture_iterator> capture_range; typedef llvm::iterator_range<const_capture_iterator> capture_const_range; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// \brief Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// \brief Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// \brief Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// \brief Iterator that walks over the capture initialization arguments. typedef Expr **capture_init_iterator; typedef llvm::iterator_range<capture_init_iterator> capture_init_range; /// \brief Const iterator that walks over the capture initialization /// arguments. typedef Expr *const *const_capture_init_iterator; typedef llvm::iterator_range<const_capture_init_iterator> const_capture_init_range; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// \brief Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// \brief Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getLocStart() const LLVM_READONLY { return getCapturedStmt()->getLocStart(); } SourceLocation getLocEnd() const LLVM_READONLY { return getCapturedStmt()->getLocEnd(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); friend class ASTStmtReader; }; } // end namespace clang #endif
condense.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <[email protected]> */ #include <math.h> #define MIN(X,Y) ((X)<(Y) ? (X) : (Y)) #define MAX(X,Y) ((X)>(Y) ? (X) : (Y)) /* * def condense(op, a, loc): * nd = loc[-1] * out = numpy.empty((nd,nd)) * for i,i0 in enumerate(loc): * i1 = loc[i+1] * for j,j0 in enumerate(loc): * j1 = loc[j+1] * out[i,j] = op(a[i0:i1,j0:j1]) * return out */ void NPcondense(double (*op)(double *, int, int, int), double *out, double *a, int *loc_x, int *loc_y, int nloc_x, int nloc_y) { const int nj = loc_y[nloc_y]; #pragma omp parallel { int i, j, i0, j0, di, dj; #pragma omp for for (i = 0; i < nloc_x; i++) { i0 = loc_x[i]; di = loc_x[i+1] - i0; for (j = 0; j < nloc_y; j++) { j0 = loc_y[j]; dj = loc_y[j+1] - j0; out[i*nloc_y+j] = op(a+i0*nj+j0, nj, di, dj); } } } } double NP_sum(double *a, int nd, int di, int dj) { int i, j; double out = 0; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out += a[i*nd+j]; } } return out; } double NP_max(double *a, int nd, int di, int dj) { int i, j; double out = a[0]; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out = MAX(out, a[i*nd+j]); } } return out; } double NP_min(double *a, int nd, int di, int dj) { int i, j; double out = a[0]; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out = MIN(out, a[i*nd+j]); } } return out; } double NP_abssum(double *a, int nd, int di, int dj) { int i, j; double out = 0; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out += fabs(a[i*nd+j]); } } return out; } double NP_absmax(double *a, int nd, int di, int dj) { int i, j; double out = fabs(a[0]); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out = MAX(out, fabs(a[i*nd+j])); } } return out; } double NP_absmin(double *a, int nd, int di, int dj) { int i, j; double out = fabs(a[0]); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out = MIN(out, fabs(a[i*nd+j])); } } return out; } double NP_norm(double *a, int nd, int di, int dj) { int i, j; double out = 0; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out += a[i*nd+j] * a[i*nd+j]; } } return sqrt(out); }
#include <math.h> #define MIN(X,Y) ((X)<(Y) ? (X) : (Y)) #define MAX(X,Y) ((X)>(Y) ? (X) : (Y)) /* * def condense(op, a, loc): nd = loc[-1] out = numpy.empty((nd,nd)) for i,i0 * in enumerate(loc): i1 = loc[i+1] for j,j0 in enumerate(loc): j1 = loc[j+1] * out[i,j] = op(a[i0:i1,j0:j1]) return out */ void NPcondense(double (*op) (double *, int, int, int), double *out, double *a, int *loc_x, int *loc_y, int nloc_x, int nloc_y) { const int nj = loc_y[nloc_y]; int i, j, i0, j0, di, dj; for (i = 0; i < nloc_x; i++) { i0 = loc_x[i]; di = loc_x[i + 1] - i0; for (j = 0; j < nloc_y; j++) { j0 = loc_y[j]; dj = loc_y[j + 1] - j0; out[i * nloc_y + j] = op(a + i0 * nj + j0, nj, di, dj); } } } double NP_sum(double *a, int nd, int di, int dj) { int i, j; double out = 0; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out += a[i * nd + j]; } } return out; } double NP_max(double *a, int nd, int di, int dj) { int i, j; double out = a[0]; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out = MAX(out, a[i * nd + j]); } } return out; } double NP_min(double *a, int nd, int di, int dj) { int i, j; double out = a[0]; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out = MIN(out, a[i * nd + j]); } } return out; } double NP_abssum(double *a, int nd, int di, int dj) { int i, j; double out = 0; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out += fabs(a[i * nd + j]); } } return out; } double NP_absmax(double *a, int nd, int di, int dj) { int i, j; double out = fabs(a[0]); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out = MAX(out, fabs(a[i * nd + j])); } } return out; } double NP_absmin(double *a, int nd, int di, int dj) { int i, j; double out = fabs(a[0]); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out = MIN(out, fabs(a[i * nd + j])); } } return out; } double NP_norm(double *a, int nd, int di, int dj) { int i, j; double out = 0; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out += a[i * nd + j] * a[i * nd + j]; } } return sqrt(out); }
#include <math.h> #define MIN(X,Y) ((X)<(Y) ? (X) : (Y)) #define MAX(X,Y) ((X)>(Y) ? (X) : (Y)) /* * def condense(op, a, loc): nd = loc[-1] out = numpy.empty((nd,nd)) for i,i0 * in enumerate(loc): i1 = loc[i+1] for j,j0 in enumerate(loc): j1 = loc[j+1] * out[i,j] = op(a[i0:i1,j0:j1]) return out */ void NPcondense(double (*op) (double *, int, int, int), double *out, double *a, int *loc_x, int *loc_y, int nloc_x, int nloc_y) { const int nj = loc_y[nloc_y]; #pragma omp parallel { int i, j, i0, j0, di, dj; #pragma omp for for (i = 0; i < nloc_x; i++) { i0 = loc_x[i]; di = loc_x[i + 1] - i0; for (j = 0; j < nloc_y; j++) { j0 = loc_y[j]; dj = loc_y[j + 1] - j0; out[i * nloc_y + j] = op(a + i0 * nj + j0, nj, di, dj); } } } } double NP_sum(double *a, int nd, int di, int dj) { int i, j; double out = 0; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out += a[i * nd + j]; } } return out; } double NP_max(double *a, int nd, int di, int dj) { int i, j; double out = a[0]; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out = MAX(out, a[i * nd + j]); } } return out; } double NP_min(double *a, int nd, int di, int dj) { int i, j; double out = a[0]; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out = MIN(out, a[i * nd + j]); } } return out; } double NP_abssum(double *a, int nd, int di, int dj) { int i, j; double out = 0; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out += fabs(a[i * nd + j]); } } return out; } double NP_absmax(double *a, int nd, int di, int dj) { int i, j; double out = fabs(a[0]); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out = MAX(out, fabs(a[i * nd + j])); } } return out; } double NP_absmin(double *a, int nd, int di, int dj) { int i, j; double out = fabs(a[0]); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out = MIN(out, fabs(a[i * nd + j])); } } return out; } double NP_norm(double *a, int nd, int di, int dj) { int i, j; double out = 0; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { out += a[i * nd + j] * a[i * nd + j]; } } return sqrt(out); }
wpapsk.h
/* * This software is Copyright (c) 2012 Lukas Odzioba <lukas dot odzioba at gmail dot com> * and Copyright (c) 2012-2014 magnum * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. * * hccap format was introduced by oclHashcat-plus (now renamed to hashcat), * and it is described here: http://hashcat.net/wiki/hccap * Code is based on Aircrack-ng source */ #ifndef _WPAPSK_H #define _WPAPSK_H #include <stdint.h> #include <assert.h> #if HAVE_OPENSSL_CMAC_H #include <openssl/cmac.h> #endif #include "arch.h" #include "params.h" #include "common.h" #include "johnswap.h" #include "hmacmd5.h" #include "hmac_sha.h" #include "sha2.h" #include "hccap.h" #define BINARY_SIZE sizeof(mic_t) #define BINARY_ALIGN 4 #define PLAINTEXT_LENGTH 63 /* We can do 64 but spec. says 63 */ #define SALT_SIZE (sizeof(hccap_t) - sizeof(mic_t)) #define SALT_ALIGN MEM_ALIGN_NONE #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define FORMAT_TAG "$WPAPSK$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) typedef struct { unsigned char keymic[16]; } mic_t; typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH + 1]; } wpapsk_password; typedef struct { uint32_t v[8]; } wpapsk_hash; typedef struct { uint32_t length; #ifdef JOHN_OCL_WPAPSK uint8_t eapol[256 + 64]; uint32_t eapol_size; uint8_t data[64 + 12]; #endif uint8_t salt[36]; // essid } wpapsk_salt; static struct fmt_tests tests[] = { /* WPA2 testcase from http://wiki.wireshark.org/SampleCaptures */ {"$WPAPSK$Coherer#..l/Uf7J..qHUXMunTE3nfbMWSwxv27Ua0XutIOrfRSuv9gOCIugIVGlosMyXdNxfBZUAYmgKqeb6GBPxLiIZr56NtWTGR/Cp5ldAk61.5I0.Ec.2...........nTE3nfbMWSwxv27Ua0XutIOrfRSuv9gOCIugIVGlosM.................................................................3X.I.E..1uk0.E..1uk2.E..1uk0....................................................................................................................................................................................../t.....U...8FWdk8OpPckhewBwt4MXYI", "Induction"}, {"$WPAPSK$Harkonen#./FgTY0../B4zX6AKFO9kuLT4BQSyqEXwo.6XOiS4u8vlMNNs5grN91SVL.WK3GkF2rXfkPFGGi38MHkHDMbH.sm49Vc3pO4HPSUJE21.5I0.Ec.2........../KFO9kuLT4BQSyqEXwo.6XOiS4u8vlMNNs5grN91SVL..................................................................3X.I.E..1uk2.E..1uk2.E..1uk0.E..................................................................................................................................................................................../t.....U...BIpIs8sePU4r8yNnOxKHfM", "12345678"}, /* WPA (MD5), from aircrack-ng tests */ {"$WPAPSK$test#..qHuv0A..ZPYJBRzZwAKpEXUJwpza/b69itFaq4.OWoGHfonpc13zCAUsRIfQN2Zar6EXp2BYcRuSkWEJIWjEJJvb4DWZCspbZ51.21.3zy.EY.6........../zZwAKpEXUJwpza/b69itFaq4.OWoGHfonpc13zCAUsQ..................................................................BoK.31m.E2..31m.U2..31m.U2..31m.U................................................................................................................................................................................/X.....E...AkkDQmDg9837LBHG.dGlKA", "biscotte"}, /* Maximum length, 63 characters */ {"$WPAPSK$Greased Lighting#kA5.CDNB.07cofsOMXEEUwFTkO/RX2sQUaW9eteI8ynpFMwRgFZC6kk7bGqgvfcXnuF1f7L5fgn4fQMLmDrKjdBNjb6LClRmfLiTYk21.5I0.Ec............7MXEEUwFTkO/RX2sQUaW9eteI8ynpFMwRgFZC6kk7bGo.................................................................3X.I.E..1uk2.E..1uk2.E..1uk00...................................................................................................................................................................................../t.....U...D06LUdWVfGPaP1Oa3AV9Hg", "W*A5z&1?op2_L&Hla-OA$#5i_Lu@F+6d?je?u5!6+6766eluu7-l+jOEkIwLe90"}, {"$WPAPSK$hello#JUjQmBbOHUY4RTqMpGc9EjqGdCxMZPWNXBNd1ejNDoFuemrLl27juYlDDUDMgZfery1qJTHYVn2Faso/kUDDjr3y8gspK7viz8BCJE21.5I0.Ec............/pGc9EjqGdCxMZPWNXBNd1ejNDoFuemrLl27juYlDDUA.................................................................3X.I.E..1uk2.E..1uk2.E..1uk0....................................................................................................................................................................................../t.....U...9Py59nqygwiar49oOKA3RY", "12345678"}, #if HAVE_OPENSSL_CMAC_H || defined(JOHN_OCL_WPAPSK) /* 802.11w with WPA-PSK-SHA256 */ {"$WPAPSK$hello#HY6.hTXZv.v27BkPGuhkCnLAKxYHlTWYs.4yuqVSNAip3SeixhErtNMV30LZAA3uaEfy2U2tJQi.VICk4hqn3V5m7W3lNHSJYW5vLE21.5I0.Eg............/GuhkCnLAKxYHlTWYs.4yuqVSNAip3SeixhErtNMV30I.................................................................3X.I.E..1uk2.E..1uk2.E..1uk4....................................................................................................................................................................................../t.....k.../Ms4UxzvlNw5hOM1igIeo6", "password"}, /* 802.11w with WPA-PSK-SHA256, https://github.com/neheb */ {"$WPAPSK$Neheb#g9a8Jcre9D0WrPnEN4QXDbA5NwAy5TVpkuoChMdFfL/8Dus4i/X.lTnfwuw04ASqHgvo12wJYJywulb6pWM6C5uqiMPNKNe9pkr6LE61.5I0.Eg.2..........1N4QXDbA5NwAy5TVpkuoChMdFfL/8Dus4i/X.lTnfwuw.................................................................3X.I.E..1uk2.E..1uk2.E..1uk4X...................................................................................................................................................................................../t.....k...0sHl.mVkiHW.ryNchcMd4g", "bo$$password"}, #endif {NULL} }; /** Below are common variables used by wpapsk_fmt.c and opencl_wpapsk_fmt.c **/ static hccap_t hccap; ///structure with hccap data static wpapsk_salt currentsalt; ///structure for essid static mic_t *mic; ///table for MIC keys #ifndef JOHN_OCL_WPAPSK static wpapsk_password *inbuffer; ///table for candidate passwords static wpapsk_hash *outbuffer; ///table for PMK calculated by GPU #endif static int new_keys = 1; static char last_ssid[sizeof(hccap.essid)]; /** Below are common functions used by wpapsk_fmt.c and opencl_wpapsk_fmt.c **/ static hccap_t *decode_hccap(char *ciphertext) { static hccap_t hccap; char *essid = ciphertext + FORMAT_TAG_LEN; char *hash = strrchr(ciphertext, '#'); char *d = hccap.essid; char *cap = hash + 1; unsigned char tbuf[sizeof(hccap_t)]; unsigned char *dst = tbuf; int i; memset(&hccap, 0, sizeof(hccap)); if (hash == NULL) return &hccap; while (essid != hash) { ///copy essid to hccap *d++ = *essid++; } *d = '\0'; assert(*essid == '#'); for (i = 0; i < 118; i++) { dst[0] = (atoi64[ARCH_INDEX(cap[0])] << 2) | (atoi64[ARCH_INDEX(cap[1])] >> 4); dst[1] = (atoi64[ARCH_INDEX(cap[1])] << 4) | (atoi64[ARCH_INDEX(cap[2])] >> 2); dst[2] = (atoi64[ARCH_INDEX(cap[2])] << 6) | (atoi64[ARCH_INDEX(cap[3])]); dst += 3; cap += 4; } dst[0] = (atoi64[ARCH_INDEX(cap[0])] << 2) | (atoi64[ARCH_INDEX(cap[1])] >> 4); dst[1] = (atoi64[ARCH_INDEX(cap[1])] << 4) | (atoi64[ARCH_INDEX(cap[2])] >> 2); /* This emits warnings on some compilers */ //memcpy(&hccap.mac1,tbuf,sizeof(hccap_t)-36); memcpy(((char*)&hccap) + 36, tbuf, sizeof(hccap_t) - 36); #if !ARCH_LITTLE_ENDIAN hccap.eapol_size = JOHNSWAP(hccap.eapol_size); hccap.keyver = JOHNSWAP(hccap.keyver); #endif return &hccap; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } binary; hccap_t *hccap = decode_hccap(ciphertext); memcpy(binary.c, hccap->keymic, BINARY_SIZE); return binary.c; } static void *get_salt(char *ciphertext) { static hccap_t s; memcpy(&s, decode_hccap(ciphertext), SALT_SIZE); return &s; } static int valid(char *ciphertext, struct fmt_main *self) { char *hash; int hashlength = 0; hccap_t *hccap; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; hash = strrchr(ciphertext, '#'); if (hash == NULL || hash - (ciphertext + FORMAT_TAG_LEN) > 32) return 0; hash++; while (hash < ciphertext + strlen(ciphertext)) { if (atoi64[ARCH_INDEX(*hash++)] == 0x7f) return 0; hashlength++; } if (hashlength != 475) return 0; hccap = decode_hccap(ciphertext); if (strlen(hccap->essid) > 32) /* real life limit */ return 0; if (hccap->eapol_size > 256) return 0; if (hccap->eapol_size < 0) return 0; if (hccap->keyver < 1) return 0; #if HAVE_OPENSSL_CMAC_H || defined(JOHN_OCL_WPAPSK) if (hccap->keyver > 3) return 0; #else if (hccap->keyver > 2) return 0; #endif return 1; } #ifndef JOHN_OCL_WPAPSK static MAYBE_INLINE void prf_512(uint32_t * key, uint8_t * data, uint32_t * ret) { char *text = (char*)"Pairwise key expansion"; unsigned char buff[100]; memcpy(buff, text, 22); memcpy(buff + 23, data, 76); buff[22] = 0; buff[76 + 23] = 0; hmac_sha1((unsigned char*)key, 32, buff, 100, (unsigned char*)ret, 20); } #endif static void insert_mac(uint8_t * data) { int k = memcmp(hccap.mac1, hccap.mac2, 6); if (k > 0) { memcpy(data, hccap.mac2, 6); memcpy(data + 6, hccap.mac1, 6); } else { memcpy(data, hccap.mac1, 6); memcpy(data + 6, hccap.mac2, 6); } } static void insert_nonce(uint8_t * data) { int k = memcmp(hccap.nonce1, hccap.nonce2, 32); if (k > 0) { memcpy(data, hccap.nonce2, 32); memcpy(data + 32, hccap.nonce1, 32); } else { memcpy(data, hccap.nonce1, 32); memcpy(data + 32, hccap.nonce2, 32); } } #ifdef WPAPSK_DEBUG static char *tomac(unsigned char *p) { static char buf[48]; sprintf(buf, "%02X:%02X:%02X:%02X:%02X:%02X", p[0], p[1], p[2], p[3], p[4], p[5]); return buf; } static char *hex(unsigned char *p, int len) { static char buf[1024]; char *op=buf; int i; if (len > 32) { do { for (i = 0; i < 32; ++i) { op += sprintf (op, "%02X", p[i]); if (i<31&&i%4==3) op += sprintf (op, " "); if (i==15) op += sprintf (op, ": "); } len -= 32; p += 32; op += sprintf (op, "\n "); } while (len > 32); } for (i = 0; i < len; ++i) { op += sprintf (op, "%02X", p[i]); if (i<31&&i%4==3) op += sprintf (op, " "); if (i==15) op += sprintf (op, ": "); } return buf; } static void Debug_hccap() { printf("essid: %s\n", hccap.essid); printf("mac1: %s\n", tomac(hccap.mac1)); printf("mac2: %s\n", tomac(hccap.mac2)); printf("nonce1: %s\n", hex(hccap.nonce1, 32)); printf("nonce2: %s\n", hex(hccap.nonce2, 32)); printf("eapol: %s\n", hex(hccap.eapol, 256)); printf("epol_sz: %d (0x%02X)\n", hccap.eapol_size, hccap.eapol_size); printf("keyver: %d\n", hccap.keyver); printf("keymic: %s\n", hex(hccap.keymic, 16)); } #endif static void set_salt(void *salt) { memcpy(&hccap, salt, SALT_SIZE); strncpy((char*)currentsalt.salt, hccap.essid, sizeof(currentsalt.salt)); currentsalt.length = strlen(hccap.essid); #ifdef JOHN_OCL_WPAPSK currentsalt.eapol_size = hccap.eapol_size; memcpy(currentsalt.eapol, hccap.eapol, hccap.eapol_size); memset(currentsalt.eapol + hccap.eapol_size, 0x80, 1); memset(currentsalt.eapol + hccap.eapol_size + 1, 0, 256 + 64 - hccap.eapol_size - 1); if (hccap.keyver == 2) alter_endianity(currentsalt.eapol, 256+56); ((unsigned int*)currentsalt.eapol)[16 * ((hccap.eapol_size + 8) / 64) + ((hccap.keyver == 1) ? 14 : 15)] = (64 + hccap.eapol_size) << 3; insert_mac(currentsalt.data); insert_nonce(currentsalt.data + 12); if (hccap.keyver < 3) alter_endianity(currentsalt.data, 64 + 12); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(wpapsk_salt), &currentsalt, 0, NULL, NULL), "Copy setting to gpu"); #endif //Debug_hccap(); } #ifndef JOHN_OCL_WPAPSK static void clear_keys(void) { new_keys = 1; } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); new_keys = 1; } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } #if HAVE_OPENSSL_CMAC_H /* Code borrowed from https://w1.fi/wpa_supplicant/ starts */ #define SHA256_MAC_LEN 32 typedef uint16_t u16; typedef uint8_t u8; static inline void WPA_PUT_LE16(u8 *a, u16 val) { a[1] = val >> 8; a[0] = val & 0xff; } static void sha256_vector(size_t num_elem, const u8 *addr[], const size_t *len, u8 *mac) { SHA256_CTX ctx; size_t i; SHA256_Init(&ctx); for (i = 0; i < num_elem; i++) { SHA256_Update(&ctx, addr[i], len[i]); } SHA256_Final(mac, &ctx); } static void hmac_sha256_vector(const u8 *key, size_t key_len, size_t num_elem, const u8 *addr[], const size_t *len, u8 *mac) { unsigned char k_pad[64]; /* padding - key XORd with ipad/opad */ const u8 *_addr[6]; size_t _len[6], i; /* the HMAC_SHA256 transform looks like: * * SHA256(K XOR opad, SHA256(K XOR ipad, text)) * * where K is an n byte key * ipad is the byte 0x36 repeated 64 times * opad is the byte 0x5c repeated 64 times * and text is the data being protected */ /* start out by storing key in ipad */ memset(k_pad, 0, sizeof(k_pad)); memcpy(k_pad, key, key_len); /* XOR key with ipad values */ for (i = 0; i < 64; i++) k_pad[i] ^= 0x36; /* perform inner SHA256 */ _addr[0] = k_pad; _len[0] = 64; for (i = 0; i < num_elem; i++) { _addr[i + 1] = addr[i]; _len[i + 1] = len[i]; } sha256_vector(1 + num_elem, _addr, _len, mac); memset(k_pad, 0, sizeof(k_pad)); memcpy(k_pad, key, key_len); /* XOR key with opad values */ for (i = 0; i < 64; i++) k_pad[i] ^= 0x5c; /* perform outer SHA256 */ _addr[0] = k_pad; _len[0] = 64; _addr[1] = mac; _len[1] = SHA256_MAC_LEN; sha256_vector(2, _addr, _len, mac); } static void sha256_prf_bits(const u8 *key, size_t key_len, const char *label, const u8 *data, size_t data_len, u8 *buf, size_t buf_len_bits) { u16 counter = 1; size_t pos, plen; u8 hash[SHA256_MAC_LEN]; const u8 *addr[4]; size_t len[4]; u8 counter_le[2], length_le[2]; size_t buf_len = (buf_len_bits + 7) / 8; addr[0] = counter_le; len[0] = 2; addr[1] = (u8 *) label; len[1] = strlen(label); addr[2] = data; len[2] = data_len; addr[3] = length_le; len[3] = sizeof(length_le); WPA_PUT_LE16(length_le, buf_len_bits); pos = 0; while (pos < buf_len) { plen = buf_len - pos; WPA_PUT_LE16(counter_le, counter); if (plen >= SHA256_MAC_LEN) { hmac_sha256_vector(key, key_len, 4, addr, len, &buf[pos]); pos += SHA256_MAC_LEN; } else { hmac_sha256_vector(key, key_len, 4, addr, len, hash); memcpy(&buf[pos], hash, plen); pos += plen; break; } counter++; } /* * Mask out unused bits in the last octet if it does not use all the * bits. */ if (buf_len_bits % 8) { u8 mask = 0xff << (8 - buf_len_bits % 8); buf[pos - 1] &= mask; } } #endif /* HAVE_OPENSSL_CMAC_H */ /* Code borrowed from https://w1.fi/wpa_supplicant/ ends */ static void wpapsk_postprocess(int keys) { int i; uint8_t data[64 + 12]; insert_mac(data); insert_nonce(data + 12); if (hccap.keyver == 1) { #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(keys, outbuffer, data, hccap, mic) #endif for (i = 0; i < keys; i++) { uint32_t prf[20/4]; HMACMD5Context ctx; prf_512(outbuffer[i].v, data, prf); // PTK hmac_md5_init_K16((unsigned char*)prf, &ctx); hmac_md5_update(hccap.eapol, hccap.eapol_size, &ctx); hmac_md5_final(mic[i].keymic, &ctx); } } else if (hccap.keyver == 2) { #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(keys, outbuffer, data, hccap, mic) #endif for (i = 0; i < keys; i++) { uint32_t prf[20/4]; prf_512(outbuffer[i].v, data, prf); // PTK hmac_sha1((unsigned char*)prf, 16, hccap.eapol, hccap.eapol_size, mic[i].keymic, 16); } #if HAVE_OPENSSL_CMAC_H } else if (hccap.keyver == 3) { // 802.11w, WPA-PSK-SHA256 #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(keys, outbuffer, data, hccap, mic) #endif for (i = 0; i < keys; i++) { unsigned char ptk[48]; unsigned char cmic[16]; size_t miclen; CMAC_CTX *ctx; sha256_prf_bits((unsigned char*)outbuffer[i].v, 32, "Pairwise key expansion", data, 76, ptk, 48 * 8); // PTK // Compute MIC ctx = CMAC_CTX_new(); CMAC_Init(ctx, ptk, 16, EVP_aes_128_cbc(), 0); CMAC_Update(ctx, hccap.eapol, hccap.eapol_size); CMAC_Final(ctx, cmic, &miclen); memcpy(mic[i].keymic, cmic, 16); CMAC_CTX_free(ctx); } #endif /* HAVE_OPENSSL_CMAC_H */ } } #endif /* #ifndef JOHN_OCL_WPAPSK */ static int binary_hash_0(void *binary) { #ifdef WPAPSK_DEBUG puts("binary"); uint32_t i, *b = binary; for (i = 0; i < 4; i++) printf("%08x ", b[i]); puts(""); #endif return ((uint32_t *) binary)[0] & PH_MASK_0; } static int get_hash_0(int index) { #ifdef WPAPSK_DEBUG int i; puts("get_hash"); uint32_t *b = (uint32_t *)mic[index].keymic; for (i = 0; i < 4; i++) printf("%08x ", b[i]); puts(""); #endif uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_0; } static int get_hash_1(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_1; } static int get_hash_2(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_2; } static int get_hash_3(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_3; } static int get_hash_4(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_4; } static int get_hash_5(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_5; } static int get_hash_6(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_6; } static int cmp_all(void *binary, int count) { uint32_t i, b = ((uint32_t *) binary)[0]; for (i = 0; i < count; i++) { uint32_t *m = (uint32_t*) mic[i].keymic; if (b == m[0]) return 1; } return 0; } static int cmp_one(void *binary, int index) { uint8_t i; uint32_t *b = (uint32_t*) binary; uint32_t *m = (uint32_t*) mic[index].keymic; for (i = 0; i < BINARY_SIZE / 4; i++) if (b[i] != m[i]) return 0; return 1; } static int cmp_exact(char *source, int index) { return 1; } static int salt_compare(const void *x, const void *y) { int c = strncmp((const char*)x, (const char*)y, 36); if (c) return c; return memcmp((const char*)x, (const char*)y, SALT_SIZE); } /* * key version as first tunable cost * 1=WPA (MD5) * 2=WPA2 (SHA1) * 3=802.11w (SHA256) */ static unsigned int get_keyver(void *salt) { hccap_t *my_salt = salt; return (unsigned int) my_salt->keyver; } #endif
#ifndef _WPAPSK_H #define _WPAPSK_H #include <stdint.h> #include <assert.h> #if HAVE_OPENSSL_CMAC_H #include <openssl/cmac.h> #endif #include "arch.h" #include "params.h" #include "common.h" #include "johnswap.h" #include "hmacmd5.h" #include "hmac_sha.h" #include "sha2.h" #include "hccap.h" #define BINARY_SIZE sizeof(mic_t) #define BINARY_ALIGN 4 #define PLAINTEXT_LENGTH 63 /* We can do 64 but spec. says 63 */ #define SALT_SIZE (sizeof(hccap_t) - sizeof(mic_t)) #define SALT_ALIGN MEM_ALIGN_NONE #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define FORMAT_TAG "$WPAPSK$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) typedef struct { unsigned char keymic[16]; } mic_t; typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH + 1]; } wpapsk_password; typedef struct { uint32_t v[8]; } wpapsk_hash; typedef struct { uint32_t length; #ifdef JOHN_OCL_WPAPSK uint8_t eapol[256 + 64]; uint32_t eapol_size; uint8_t data[64 + 12]; #endif uint8_t salt[36]; //essid } wpapsk_salt; static struct fmt_tests tests[] = { /* WPA2 testcase from http://wiki.wireshark.org/SampleCaptures */ {"$WPAPSK$Coherer#..l/Uf7J..qHUXMunTE3nfbMWSwxv27Ua0XutIOrfRSuv9gOCIugIVGlosMyXdNxfBZUAYmgKqeb6GBPxLiIZr56NtWTGR/Cp5ldAk61.5I0.Ec.2...........nTE3nfbMWSwxv27Ua0XutIOrfRSuv9gOCIugIVGlosM.................................................................3X.I.E..1uk0.E..1uk2.E..1uk0....................................................................................................................................................................................../t.....U...8FWdk8OpPckhewBwt4MXYI", "Induction"}, {"$WPAPSK$Harkonen#./FgTY0../B4zX6AKFO9kuLT4BQSyqEXwo.6XOiS4u8vlMNNs5grN91SVL.WK3GkF2rXfkPFGGi38MHkHDMbH.sm49Vc3pO4HPSUJE21.5I0.Ec.2........../KFO9kuLT4BQSyqEXwo.6XOiS4u8vlMNNs5grN91SVL..................................................................3X.I.E..1uk2.E..1uk2.E..1uk0.E..................................................................................................................................................................................../t.....U...BIpIs8sePU4r8yNnOxKHfM", "12345678"}, /* WPA (MD5), from aircrack-ng tests */ {"$WPAPSK$test#..qHuv0A..ZPYJBRzZwAKpEXUJwpza/b69itFaq4.OWoGHfonpc13zCAUsRIfQN2Zar6EXp2BYcRuSkWEJIWjEJJvb4DWZCspbZ51.21.3zy.EY.6........../zZwAKpEXUJwpza/b69itFaq4.OWoGHfonpc13zCAUsQ..................................................................BoK.31m.E2..31m.U2..31m.U2..31m.U................................................................................................................................................................................/X.....E...AkkDQmDg9837LBHG.dGlKA", "biscotte"}, /* Maximum length, 63 characters */ {"$WPAPSK$Greased Lighting#kA5.CDNB.07cofsOMXEEUwFTkO/RX2sQUaW9eteI8ynpFMwRgFZC6kk7bGqgvfcXnuF1f7L5fgn4fQMLmDrKjdBNjb6LClRmfLiTYk21.5I0.Ec............7MXEEUwFTkO/RX2sQUaW9eteI8ynpFMwRgFZC6kk7bGo.................................................................3X.I.E..1uk2.E..1uk2.E..1uk00...................................................................................................................................................................................../t.....U...D06LUdWVfGPaP1Oa3AV9Hg", "W*A5z&1?op2_L&Hla-OA$#5i_Lu@F+6d?je?u5!6+6766eluu7-l+jOEkIwLe90"}, {"$WPAPSK$hello#JUjQmBbOHUY4RTqMpGc9EjqGdCxMZPWNXBNd1ejNDoFuemrLl27juYlDDUDMgZfery1qJTHYVn2Faso/kUDDjr3y8gspK7viz8BCJE21.5I0.Ec............/pGc9EjqGdCxMZPWNXBNd1ejNDoFuemrLl27juYlDDUA.................................................................3X.I.E..1uk2.E..1uk2.E..1uk0....................................................................................................................................................................................../t.....U...9Py59nqygwiar49oOKA3RY", "12345678"}, #if HAVE_OPENSSL_CMAC_H || defined(JOHN_OCL_WPAPSK) /* 802.11w with WPA-PSK-SHA256 */ {"$WPAPSK$hello#HY6.hTXZv.v27BkPGuhkCnLAKxYHlTWYs.4yuqVSNAip3SeixhErtNMV30LZAA3uaEfy2U2tJQi.VICk4hqn3V5m7W3lNHSJYW5vLE21.5I0.Eg............/GuhkCnLAKxYHlTWYs.4yuqVSNAip3SeixhErtNMV30I.................................................................3X.I.E..1uk2.E..1uk2.E..1uk4....................................................................................................................................................................................../t.....k.../Ms4UxzvlNw5hOM1igIeo6", "password"}, /* 802.11w with WPA-PSK-SHA256, https://github.com/neheb */ {"$WPAPSK$Neheb#g9a8Jcre9D0WrPnEN4QXDbA5NwAy5TVpkuoChMdFfL/8Dus4i/X.lTnfwuw04ASqHgvo12wJYJywulb6pWM6C5uqiMPNKNe9pkr6LE61.5I0.Eg.2..........1N4QXDbA5NwAy5TVpkuoChMdFfL/8Dus4i/X.lTnfwuw.................................................................3X.I.E..1uk2.E..1uk2.E..1uk4X...................................................................................................................................................................................../t.....k...0sHl.mVkiHW.ryNchcMd4g", "bo$$password"}, #endif {NULL} }; /** Below are common variables used by wpapsk_fmt.c and opencl_wpapsk_fmt.c **/ static hccap_t hccap; ///structure with hccap data static wpapsk_salt currentsalt; ///structure for essid static mic_t *mic; ///table for MIC keys #ifndef JOHN_OCL_WPAPSK static wpapsk_password *inbuffer; ///table for candidate passwords static wpapsk_hash *outbuffer; ///table for PMK calculated by GPU #endif static int new_keys = 1; static char last_ssid[sizeof(hccap.essid)]; /** Below are common functions used by wpapsk_fmt.c and opencl_wpapsk_fmt.c **/ static hccap_t * decode_hccap(char *ciphertext) { static hccap_t hccap; char *essid = ciphertext + FORMAT_TAG_LEN; char *hash = strrchr(ciphertext, '#'); char *d = hccap.essid; char *cap = hash + 1; unsigned char tbuf[sizeof(hccap_t)]; unsigned char *dst = tbuf; int i; memset(&hccap, 0, sizeof(hccap)); if (hash == NULL) return &hccap; while (essid != hash) { ///copy essid to hccap * d++ = *essid++; } *d = '\0'; assert(*essid == '#'); for (i = 0; i < 118; i++) { dst[0] = (atoi64[ARCH_INDEX(cap[0])] << 2) | (atoi64[ARCH_INDEX(cap[1])] >> 4); dst[1] = (atoi64[ARCH_INDEX(cap[1])] << 4) | (atoi64[ARCH_INDEX(cap[2])] >> 2); dst[2] = (atoi64[ARCH_INDEX(cap[2])] << 6) | (atoi64[ARCH_INDEX(cap[3])]); dst += 3; cap += 4; } dst[0] = (atoi64[ARCH_INDEX(cap[0])] << 2) | (atoi64[ARCH_INDEX(cap[1])] >> 4); dst[1] = (atoi64[ARCH_INDEX(cap[1])] << 4) | (atoi64[ARCH_INDEX(cap[2])] >> 2); /* This emits warnings on some compilers */ //memcpy(&hccap.mac1, tbuf, sizeof(hccap_t) - 36); memcpy(((char *)&hccap) + 36, tbuf, sizeof(hccap_t) - 36); #if !ARCH_LITTLE_ENDIAN hccap.eapol_size = JOHNSWAP(hccap.eapol_size); hccap.keyver = JOHNSWAP(hccap.keyver); #endif return &hccap; } static void * get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } binary; hccap_t *hccap = decode_hccap(ciphertext); memcpy(binary.c, hccap->keymic, BINARY_SIZE); return binary.c; } static void * get_salt(char *ciphertext) { static hccap_t s; memcpy(&s, decode_hccap(ciphertext), SALT_SIZE); return &s; } static int valid(char *ciphertext, struct fmt_main *self) { char *hash; int hashlength = 0; hccap_t *hccap; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; hash = strrchr(ciphertext, '#'); if (hash == NULL || hash - (ciphertext + FORMAT_TAG_LEN) > 32) return 0; hash++; while (hash < ciphertext + strlen(ciphertext)) { if (atoi64[ARCH_INDEX(*hash++)] == 0x7f) return 0; hashlength++; } if (hashlength != 475) return 0; hccap = decode_hccap(ciphertext); if (strlen(hccap->essid) > 32) /* real life limit */ return 0; if (hccap->eapol_size > 256) return 0; if (hccap->eapol_size < 0) return 0; if (hccap->keyver < 1) return 0; #if HAVE_OPENSSL_CMAC_H || defined(JOHN_OCL_WPAPSK) if (hccap->keyver > 3) return 0; #else if (hccap->keyver > 2) return 0; #endif return 1; } #ifndef JOHN_OCL_WPAPSK static MAYBE_INLINE void prf_512(uint32_t * key, uint8_t * data, uint32_t * ret) { char *text = (char *)"Pairwise key expansion"; unsigned char buff[100]; memcpy(buff, text, 22); memcpy(buff + 23, data, 76); buff[22] = 0; buff[76 + 23] = 0; hmac_sha1((unsigned char *)key, 32, buff, 100, (unsigned char *)ret, 20); } #endif static void insert_mac(uint8_t * data) { int k = memcmp(hccap.mac1, hccap.mac2, 6); if (k > 0) { memcpy(data, hccap.mac2, 6); memcpy(data + 6, hccap.mac1, 6); } else { memcpy(data, hccap.mac1, 6); memcpy(data + 6, hccap.mac2, 6); } } static void insert_nonce(uint8_t * data) { int k = memcmp(hccap.nonce1, hccap.nonce2, 32); if (k > 0) { memcpy(data, hccap.nonce2, 32); memcpy(data + 32, hccap.nonce1, 32); } else { memcpy(data, hccap.nonce1, 32); memcpy(data + 32, hccap.nonce2, 32); } } #ifdef WPAPSK_DEBUG static char * tomac(unsigned char *p) { static char buf[48]; sprintf(buf, "%02X:%02X:%02X:%02X:%02X:%02X", p[0], p[1], p[2], p[3], p[4], p[5]); return buf; } static char * hex(unsigned char *p, int len) { static char buf[1024]; char *op = buf; int i; if (len > 32) { do { for (i = 0; i < 32; ++i) { op += sprintf(op, "%02X", p[i]); if (i < 31 && i % 4 == 3) op += sprintf(op, " "); if (i == 15) op += sprintf(op, ": "); } len -= 32; p += 32; op += sprintf(op, "\n "); } while (len > 32); } for (i = 0; i < len; ++i) { op += sprintf(op, "%02X", p[i]); if (i < 31 && i % 4 == 3) op += sprintf(op, " "); if (i == 15) op += sprintf(op, ": "); } return buf; } static void Debug_hccap() { printf("essid: %s\n", hccap.essid); printf("mac1: %s\n", tomac(hccap.mac1)); printf("mac2: %s\n", tomac(hccap.mac2)); printf("nonce1: %s\n", hex(hccap.nonce1, 32)); printf("nonce2: %s\n", hex(hccap.nonce2, 32)); printf("eapol: %s\n", hex(hccap.eapol, 256)); printf("epol_sz: %d (0x%02X)\n", hccap.eapol_size, hccap.eapol_size); printf("keyver: %d\n", hccap.keyver); printf("keymic: %s\n", hex(hccap.keymic, 16)); } #endif static void set_salt(void *salt) { memcpy(&hccap, salt, SALT_SIZE); strncpy((char *)currentsalt.salt, hccap.essid, sizeof(currentsalt.salt)); currentsalt.length = strlen(hccap.essid); #ifdef JOHN_OCL_WPAPSK currentsalt.eapol_size = hccap.eapol_size; memcpy(currentsalt.eapol, hccap.eapol, hccap.eapol_size); memset(currentsalt.eapol + hccap.eapol_size, 0x80, 1); memset(currentsalt.eapol + hccap.eapol_size + 1, 0, 256 + 64 - hccap.eapol_size - 1); if (hccap.keyver == 2) alter_endianity(currentsalt.eapol, 256 + 56); ((unsigned int *)currentsalt.eapol)[16 * ((hccap.eapol_size + 8) / 64) + ((hccap.keyver == 1) ? 14 : 15)] = (64 + hccap.eapol_size) << 3; insert_mac(currentsalt.data); insert_nonce(currentsalt.data + 12); if (hccap.keyver < 3) alter_endianity(currentsalt.data, 64 + 12); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(wpapsk_salt), &currentsalt, 0, NULL, NULL), "Copy setting to gpu"); #endif //Debug_hccap(); } #ifndef JOHN_OCL_WPAPSK static void clear_keys(void) { new_keys = 1; } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); new_keys = 1; } static char * get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } #if HAVE_OPENSSL_CMAC_H /* Code borrowed from https://w1.fi/wpa_supplicant/ starts */ #define SHA256_MAC_LEN 32 typedef uint16_t u16; typedef uint8_t u8; static inline void WPA_PUT_LE16(u8 * a, u16 val) { a[1] = val >> 8; a[0] = val & 0xff; } static void sha256_vector(size_t num_elem, const u8 * addr[], const size_t * len, u8 * mac) { SHA256_CTX ctx; size_t i; SHA256_Init(&ctx); for (i = 0; i < num_elem; i++) { SHA256_Update(&ctx, addr[i], len[i]); } SHA256_Final(mac, &ctx); } static void hmac_sha256_vector(const u8 * key, size_t key_len, size_t num_elem, const u8 * addr[], const size_t * len, u8 * mac) { unsigned char k_pad[64]; /* padding - key XORd with ipad/opad */ const u8 *_addr[6]; size_t _len[6], i; /* * the HMAC_SHA256 transform looks like: * * SHA256(K XOR opad, SHA256(K XOR ipad, text)) * * where K is an n byte key ipad is the byte 0x36 repeated 64 times opad is * the byte 0x5c repeated 64 times and text is the data being protected */ /* start out by storing key in ipad */ memset(k_pad, 0, sizeof(k_pad)); memcpy(k_pad, key, key_len); /* XOR key with ipad values */ for (i = 0; i < 64; i++) k_pad[i] ^= 0x36; /* perform inner SHA256 */ _addr[0] = k_pad; _len[0] = 64; for (i = 0; i < num_elem; i++) { _addr[i + 1] = addr[i]; _len[i + 1] = len[i]; } sha256_vector(1 + num_elem, _addr, _len, mac); memset(k_pad, 0, sizeof(k_pad)); memcpy(k_pad, key, key_len); /* XOR key with opad values */ for (i = 0; i < 64; i++) k_pad[i] ^= 0x5c; /* perform outer SHA256 */ _addr[0] = k_pad; _len[0] = 64; _addr[1] = mac; _len[1] = SHA256_MAC_LEN; sha256_vector(2, _addr, _len, mac); } static void sha256_prf_bits(const u8 * key, size_t key_len, const char *label, const u8 * data, size_t data_len, u8 * buf, size_t buf_len_bits) { u16 counter = 1; size_t pos, plen; u8 hash[SHA256_MAC_LEN]; const u8 *addr[4]; size_t len[4]; u8 counter_le[2], length_le[2]; size_t buf_len = (buf_len_bits + 7) / 8; addr[0] = counter_le; len[0] = 2; addr[1] = (u8 *) label; len[1] = strlen(label); addr[2] = data; len[2] = data_len; addr[3] = length_le; len[3] = sizeof(length_le); WPA_PUT_LE16(length_le, buf_len_bits); pos = 0; while (pos < buf_len) { plen = buf_len - pos; WPA_PUT_LE16(counter_le, counter); if (plen >= SHA256_MAC_LEN) { hmac_sha256_vector(key, key_len, 4, addr, len, &buf[pos]); pos += SHA256_MAC_LEN; } else { hmac_sha256_vector(key, key_len, 4, addr, len, hash); memcpy(&buf[pos], hash, plen); pos += plen; break; } counter++; } /* * Mask out unused bits in the last octet if it does not use all the * bits. */ if (buf_len_bits % 8) { u8 mask = 0xff << (8 - buf_len_bits % 8); buf[pos - 1] &= mask; } } #endif /* HAVE_OPENSSL_CMAC_H */ /* Code borrowed from https://w1.fi/wpa_supplicant/ ends */ static void wpapsk_postprocess(int keys) { int i; uint8_t data[64 + 12]; insert_mac(data); insert_nonce(data + 12); if (hccap.keyver == 1) { for (i = 0; i < keys; i++) { uint32_t prf[20 / 4]; HMACMD5Context ctx; prf_512(outbuffer[i].v, data, prf); //PTK hmac_md5_init_K16((unsigned char *)prf, &ctx); hmac_md5_update(hccap.eapol, hccap.eapol_size, &ctx); hmac_md5_final(mic[i].keymic, &ctx); } } else if (hccap.keyver == 2) { for (i = 0; i < keys; i++) { uint32_t prf[20 / 4]; prf_512(outbuffer[i].v, data, prf); //PTK hmac_sha1((unsigned char *)prf, 16, hccap.eapol, hccap.eapol_size, mic[i].keymic, 16); } #if HAVE_OPENSSL_CMAC_H } else if (hccap.keyver == 3) { //802.11 w, WPA - PSK - SHA256 for (i = 0; i < keys; i++) { unsigned char ptk[48]; unsigned char cmic[16]; size_t miclen; CMAC_CTX *ctx; sha256_prf_bits((unsigned char *)outbuffer[i].v, 32, "Pairwise key expansion", data, 76, ptk, 48 * 8); //PTK // Compute MIC ctx = CMAC_CTX_new(); CMAC_Init(ctx, ptk, 16, EVP_aes_128_cbc(), 0); CMAC_Update(ctx, hccap.eapol, hccap.eapol_size); CMAC_Final(ctx, cmic, &miclen); memcpy(mic[i].keymic, cmic, 16); CMAC_CTX_free(ctx); } #endif /* HAVE_OPENSSL_CMAC_H */ } } #endif /* #ifndef JOHN_OCL_WPAPSK */ static int binary_hash_0(void *binary) { #ifdef WPAPSK_DEBUG puts("binary"); uint32_t i, *b = binary; for (i = 0; i < 4; i++) printf("%08x ", b[i]); puts(""); #endif return ((uint32_t *) binary)[0] & PH_MASK_0; } static int get_hash_0(int index) { #ifdef WPAPSK_DEBUG int i; puts("get_hash"); uint32_t *b = (uint32_t *) mic[index].keymic; for (i = 0; i < 4; i++) printf("%08x ", b[i]); puts(""); #endif uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_0; } static int get_hash_1(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_1; } static int get_hash_2(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_2; } static int get_hash_3(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_3; } static int get_hash_4(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_4; } static int get_hash_5(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_5; } static int get_hash_6(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_6; } static int cmp_all(void *binary, int count) { uint32_t i, b = ((uint32_t *) binary)[0]; for (i = 0; i < count; i++) { uint32_t *m = (uint32_t *) mic[i].keymic; if (b == m[0]) return 1; } return 0; } static int cmp_one(void *binary, int index) { uint8_t i; uint32_t *b = (uint32_t *) binary; uint32_t *m = (uint32_t *) mic[index].keymic; for (i = 0; i < BINARY_SIZE / 4; i++) if (b[i] != m[i]) return 0; return 1; } static int cmp_exact(char *source, int index) { return 1; } static int salt_compare(const void *x, const void *y) { int c = strncmp((const char *)x, (const char *)y, 36); if (c) return c; return memcmp((const char *)x, (const char *)y, SALT_SIZE); } /* * key version as first tunable cost 1=WPA (MD5) 2=WPA2 (SHA1) * 3=802.11w (SHA256) */ static unsigned int get_keyver(void *salt) { hccap_t *my_salt = salt; return (unsigned int)my_salt->keyver; } #endif
#ifndef _WPAPSK_H #define _WPAPSK_H #include <stdint.h> #include <assert.h> #if HAVE_OPENSSL_CMAC_H #include <openssl/cmac.h> #endif #include "arch.h" #include "params.h" #include "common.h" #include "johnswap.h" #include "hmacmd5.h" #include "hmac_sha.h" #include "sha2.h" #include "hccap.h" #define BINARY_SIZE sizeof(mic_t) #define BINARY_ALIGN 4 #define PLAINTEXT_LENGTH 63 /* We can do 64 but spec. says 63 */ #define SALT_SIZE (sizeof(hccap_t) - sizeof(mic_t)) #define SALT_ALIGN MEM_ALIGN_NONE #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define FORMAT_TAG "$WPAPSK$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) typedef struct { unsigned char keymic[16]; } mic_t; typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH + 1]; } wpapsk_password; typedef struct { uint32_t v[8]; } wpapsk_hash; typedef struct { uint32_t length; #ifdef JOHN_OCL_WPAPSK uint8_t eapol[256 + 64]; uint32_t eapol_size; uint8_t data[64 + 12]; #endif uint8_t salt[36]; //essid } wpapsk_salt; static struct fmt_tests tests[] = { /* WPA2 testcase from http://wiki.wireshark.org/SampleCaptures */ {"$WPAPSK$Coherer#..l/Uf7J..qHUXMunTE3nfbMWSwxv27Ua0XutIOrfRSuv9gOCIugIVGlosMyXdNxfBZUAYmgKqeb6GBPxLiIZr56NtWTGR/Cp5ldAk61.5I0.Ec.2...........nTE3nfbMWSwxv27Ua0XutIOrfRSuv9gOCIugIVGlosM.................................................................3X.I.E..1uk0.E..1uk2.E..1uk0....................................................................................................................................................................................../t.....U...8FWdk8OpPckhewBwt4MXYI", "Induction"}, {"$WPAPSK$Harkonen#./FgTY0../B4zX6AKFO9kuLT4BQSyqEXwo.6XOiS4u8vlMNNs5grN91SVL.WK3GkF2rXfkPFGGi38MHkHDMbH.sm49Vc3pO4HPSUJE21.5I0.Ec.2........../KFO9kuLT4BQSyqEXwo.6XOiS4u8vlMNNs5grN91SVL..................................................................3X.I.E..1uk2.E..1uk2.E..1uk0.E..................................................................................................................................................................................../t.....U...BIpIs8sePU4r8yNnOxKHfM", "12345678"}, /* WPA (MD5), from aircrack-ng tests */ {"$WPAPSK$test#..qHuv0A..ZPYJBRzZwAKpEXUJwpza/b69itFaq4.OWoGHfonpc13zCAUsRIfQN2Zar6EXp2BYcRuSkWEJIWjEJJvb4DWZCspbZ51.21.3zy.EY.6........../zZwAKpEXUJwpza/b69itFaq4.OWoGHfonpc13zCAUsQ..................................................................BoK.31m.E2..31m.U2..31m.U2..31m.U................................................................................................................................................................................/X.....E...AkkDQmDg9837LBHG.dGlKA", "biscotte"}, /* Maximum length, 63 characters */ {"$WPAPSK$Greased Lighting#kA5.CDNB.07cofsOMXEEUwFTkO/RX2sQUaW9eteI8ynpFMwRgFZC6kk7bGqgvfcXnuF1f7L5fgn4fQMLmDrKjdBNjb6LClRmfLiTYk21.5I0.Ec............7MXEEUwFTkO/RX2sQUaW9eteI8ynpFMwRgFZC6kk7bGo.................................................................3X.I.E..1uk2.E..1uk2.E..1uk00...................................................................................................................................................................................../t.....U...D06LUdWVfGPaP1Oa3AV9Hg", "W*A5z&1?op2_L&Hla-OA$#5i_Lu@F+6d?je?u5!6+6766eluu7-l+jOEkIwLe90"}, {"$WPAPSK$hello#JUjQmBbOHUY4RTqMpGc9EjqGdCxMZPWNXBNd1ejNDoFuemrLl27juYlDDUDMgZfery1qJTHYVn2Faso/kUDDjr3y8gspK7viz8BCJE21.5I0.Ec............/pGc9EjqGdCxMZPWNXBNd1ejNDoFuemrLl27juYlDDUA.................................................................3X.I.E..1uk2.E..1uk2.E..1uk0....................................................................................................................................................................................../t.....U...9Py59nqygwiar49oOKA3RY", "12345678"}, #if HAVE_OPENSSL_CMAC_H || defined(JOHN_OCL_WPAPSK) /* 802.11w with WPA-PSK-SHA256 */ {"$WPAPSK$hello#HY6.hTXZv.v27BkPGuhkCnLAKxYHlTWYs.4yuqVSNAip3SeixhErtNMV30LZAA3uaEfy2U2tJQi.VICk4hqn3V5m7W3lNHSJYW5vLE21.5I0.Eg............/GuhkCnLAKxYHlTWYs.4yuqVSNAip3SeixhErtNMV30I.................................................................3X.I.E..1uk2.E..1uk2.E..1uk4....................................................................................................................................................................................../t.....k.../Ms4UxzvlNw5hOM1igIeo6", "password"}, /* 802.11w with WPA-PSK-SHA256, https://github.com/neheb */ {"$WPAPSK$Neheb#g9a8Jcre9D0WrPnEN4QXDbA5NwAy5TVpkuoChMdFfL/8Dus4i/X.lTnfwuw04ASqHgvo12wJYJywulb6pWM6C5uqiMPNKNe9pkr6LE61.5I0.Eg.2..........1N4QXDbA5NwAy5TVpkuoChMdFfL/8Dus4i/X.lTnfwuw.................................................................3X.I.E..1uk2.E..1uk2.E..1uk4X...................................................................................................................................................................................../t.....k...0sHl.mVkiHW.ryNchcMd4g", "bo$$password"}, #endif {NULL} }; /** Below are common variables used by wpapsk_fmt.c and opencl_wpapsk_fmt.c **/ static hccap_t hccap; ///structure with hccap data static wpapsk_salt currentsalt; ///structure for essid static mic_t *mic; ///table for MIC keys #ifndef JOHN_OCL_WPAPSK static wpapsk_password *inbuffer; ///table for candidate passwords static wpapsk_hash *outbuffer; ///table for PMK calculated by GPU #endif static int new_keys = 1; static char last_ssid[sizeof(hccap.essid)]; /** Below are common functions used by wpapsk_fmt.c and opencl_wpapsk_fmt.c **/ static hccap_t * decode_hccap(char *ciphertext) { static hccap_t hccap; char *essid = ciphertext + FORMAT_TAG_LEN; char *hash = strrchr(ciphertext, '#'); char *d = hccap.essid; char *cap = hash + 1; unsigned char tbuf[sizeof(hccap_t)]; unsigned char *dst = tbuf; int i; memset(&hccap, 0, sizeof(hccap)); if (hash == NULL) return &hccap; while (essid != hash) { ///copy essid to hccap * d++ = *essid++; } *d = '\0'; assert(*essid == '#'); for (i = 0; i < 118; i++) { dst[0] = (atoi64[ARCH_INDEX(cap[0])] << 2) | (atoi64[ARCH_INDEX(cap[1])] >> 4); dst[1] = (atoi64[ARCH_INDEX(cap[1])] << 4) | (atoi64[ARCH_INDEX(cap[2])] >> 2); dst[2] = (atoi64[ARCH_INDEX(cap[2])] << 6) | (atoi64[ARCH_INDEX(cap[3])]); dst += 3; cap += 4; } dst[0] = (atoi64[ARCH_INDEX(cap[0])] << 2) | (atoi64[ARCH_INDEX(cap[1])] >> 4); dst[1] = (atoi64[ARCH_INDEX(cap[1])] << 4) | (atoi64[ARCH_INDEX(cap[2])] >> 2); /* This emits warnings on some compilers */ //memcpy(&hccap.mac1, tbuf, sizeof(hccap_t) - 36); memcpy(((char *)&hccap) + 36, tbuf, sizeof(hccap_t) - 36); #if !ARCH_LITTLE_ENDIAN hccap.eapol_size = JOHNSWAP(hccap.eapol_size); hccap.keyver = JOHNSWAP(hccap.keyver); #endif return &hccap; } static void * get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } binary; hccap_t *hccap = decode_hccap(ciphertext); memcpy(binary.c, hccap->keymic, BINARY_SIZE); return binary.c; } static void * get_salt(char *ciphertext) { static hccap_t s; memcpy(&s, decode_hccap(ciphertext), SALT_SIZE); return &s; } static int valid(char *ciphertext, struct fmt_main *self) { char *hash; int hashlength = 0; hccap_t *hccap; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; hash = strrchr(ciphertext, '#'); if (hash == NULL || hash - (ciphertext + FORMAT_TAG_LEN) > 32) return 0; hash++; while (hash < ciphertext + strlen(ciphertext)) { if (atoi64[ARCH_INDEX(*hash++)] == 0x7f) return 0; hashlength++; } if (hashlength != 475) return 0; hccap = decode_hccap(ciphertext); if (strlen(hccap->essid) > 32) /* real life limit */ return 0; if (hccap->eapol_size > 256) return 0; if (hccap->eapol_size < 0) return 0; if (hccap->keyver < 1) return 0; #if HAVE_OPENSSL_CMAC_H || defined(JOHN_OCL_WPAPSK) if (hccap->keyver > 3) return 0; #else if (hccap->keyver > 2) return 0; #endif return 1; } #ifndef JOHN_OCL_WPAPSK static MAYBE_INLINE void prf_512(uint32_t * key, uint8_t * data, uint32_t * ret) { char *text = (char *)"Pairwise key expansion"; unsigned char buff[100]; memcpy(buff, text, 22); memcpy(buff + 23, data, 76); buff[22] = 0; buff[76 + 23] = 0; hmac_sha1((unsigned char *)key, 32, buff, 100, (unsigned char *)ret, 20); } #endif static void insert_mac(uint8_t * data) { int k = memcmp(hccap.mac1, hccap.mac2, 6); if (k > 0) { memcpy(data, hccap.mac2, 6); memcpy(data + 6, hccap.mac1, 6); } else { memcpy(data, hccap.mac1, 6); memcpy(data + 6, hccap.mac2, 6); } } static void insert_nonce(uint8_t * data) { int k = memcmp(hccap.nonce1, hccap.nonce2, 32); if (k > 0) { memcpy(data, hccap.nonce2, 32); memcpy(data + 32, hccap.nonce1, 32); } else { memcpy(data, hccap.nonce1, 32); memcpy(data + 32, hccap.nonce2, 32); } } #ifdef WPAPSK_DEBUG static char * tomac(unsigned char *p) { static char buf[48]; sprintf(buf, "%02X:%02X:%02X:%02X:%02X:%02X", p[0], p[1], p[2], p[3], p[4], p[5]); return buf; } static char * hex(unsigned char *p, int len) { static char buf[1024]; char *op = buf; int i; if (len > 32) { do { for (i = 0; i < 32; ++i) { op += sprintf(op, "%02X", p[i]); if (i < 31 && i % 4 == 3) op += sprintf(op, " "); if (i == 15) op += sprintf(op, ": "); } len -= 32; p += 32; op += sprintf(op, "\n "); } while (len > 32); } for (i = 0; i < len; ++i) { op += sprintf(op, "%02X", p[i]); if (i < 31 && i % 4 == 3) op += sprintf(op, " "); if (i == 15) op += sprintf(op, ": "); } return buf; } static void Debug_hccap() { printf("essid: %s\n", hccap.essid); printf("mac1: %s\n", tomac(hccap.mac1)); printf("mac2: %s\n", tomac(hccap.mac2)); printf("nonce1: %s\n", hex(hccap.nonce1, 32)); printf("nonce2: %s\n", hex(hccap.nonce2, 32)); printf("eapol: %s\n", hex(hccap.eapol, 256)); printf("epol_sz: %d (0x%02X)\n", hccap.eapol_size, hccap.eapol_size); printf("keyver: %d\n", hccap.keyver); printf("keymic: %s\n", hex(hccap.keymic, 16)); } #endif static void set_salt(void *salt) { memcpy(&hccap, salt, SALT_SIZE); strncpy((char *)currentsalt.salt, hccap.essid, sizeof(currentsalt.salt)); currentsalt.length = strlen(hccap.essid); #ifdef JOHN_OCL_WPAPSK currentsalt.eapol_size = hccap.eapol_size; memcpy(currentsalt.eapol, hccap.eapol, hccap.eapol_size); memset(currentsalt.eapol + hccap.eapol_size, 0x80, 1); memset(currentsalt.eapol + hccap.eapol_size + 1, 0, 256 + 64 - hccap.eapol_size - 1); if (hccap.keyver == 2) alter_endianity(currentsalt.eapol, 256 + 56); ((unsigned int *)currentsalt.eapol)[16 * ((hccap.eapol_size + 8) / 64) + ((hccap.keyver == 1) ? 14 : 15)] = (64 + hccap.eapol_size) << 3; insert_mac(currentsalt.data); insert_nonce(currentsalt.data + 12); if (hccap.keyver < 3) alter_endianity(currentsalt.data, 64 + 12); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(wpapsk_salt), &currentsalt, 0, NULL, NULL), "Copy setting to gpu"); #endif //Debug_hccap(); } #ifndef JOHN_OCL_WPAPSK static void clear_keys(void) { new_keys = 1; } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); new_keys = 1; } static char * get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } #if HAVE_OPENSSL_CMAC_H /* Code borrowed from https://w1.fi/wpa_supplicant/ starts */ #define SHA256_MAC_LEN 32 typedef uint16_t u16; typedef uint8_t u8; static inline void WPA_PUT_LE16(u8 * a, u16 val) { a[1] = val >> 8; a[0] = val & 0xff; } static void sha256_vector(size_t num_elem, const u8 * addr[], const size_t * len, u8 * mac) { SHA256_CTX ctx; size_t i; SHA256_Init(&ctx); for (i = 0; i < num_elem; i++) { SHA256_Update(&ctx, addr[i], len[i]); } SHA256_Final(mac, &ctx); } static void hmac_sha256_vector(const u8 * key, size_t key_len, size_t num_elem, const u8 * addr[], const size_t * len, u8 * mac) { unsigned char k_pad[64]; /* padding - key XORd with ipad/opad */ const u8 *_addr[6]; size_t _len[6], i; /* * the HMAC_SHA256 transform looks like: * * SHA256(K XOR opad, SHA256(K XOR ipad, text)) * * where K is an n byte key ipad is the byte 0x36 repeated 64 times opad is * the byte 0x5c repeated 64 times and text is the data being protected */ /* start out by storing key in ipad */ memset(k_pad, 0, sizeof(k_pad)); memcpy(k_pad, key, key_len); /* XOR key with ipad values */ for (i = 0; i < 64; i++) k_pad[i] ^= 0x36; /* perform inner SHA256 */ _addr[0] = k_pad; _len[0] = 64; for (i = 0; i < num_elem; i++) { _addr[i + 1] = addr[i]; _len[i + 1] = len[i]; } sha256_vector(1 + num_elem, _addr, _len, mac); memset(k_pad, 0, sizeof(k_pad)); memcpy(k_pad, key, key_len); /* XOR key with opad values */ for (i = 0; i < 64; i++) k_pad[i] ^= 0x5c; /* perform outer SHA256 */ _addr[0] = k_pad; _len[0] = 64; _addr[1] = mac; _len[1] = SHA256_MAC_LEN; sha256_vector(2, _addr, _len, mac); } static void sha256_prf_bits(const u8 * key, size_t key_len, const char *label, const u8 * data, size_t data_len, u8 * buf, size_t buf_len_bits) { u16 counter = 1; size_t pos, plen; u8 hash[SHA256_MAC_LEN]; const u8 *addr[4]; size_t len[4]; u8 counter_le[2], length_le[2]; size_t buf_len = (buf_len_bits + 7) / 8; addr[0] = counter_le; len[0] = 2; addr[1] = (u8 *) label; len[1] = strlen(label); addr[2] = data; len[2] = data_len; addr[3] = length_le; len[3] = sizeof(length_le); WPA_PUT_LE16(length_le, buf_len_bits); pos = 0; while (pos < buf_len) { plen = buf_len - pos; WPA_PUT_LE16(counter_le, counter); if (plen >= SHA256_MAC_LEN) { hmac_sha256_vector(key, key_len, 4, addr, len, &buf[pos]); pos += SHA256_MAC_LEN; } else { hmac_sha256_vector(key, key_len, 4, addr, len, hash); memcpy(&buf[pos], hash, plen); pos += plen; break; } counter++; } /* * Mask out unused bits in the last octet if it does not use all the * bits. */ if (buf_len_bits % 8) { u8 mask = 0xff << (8 - buf_len_bits % 8); buf[pos - 1] &= mask; } } #endif /* HAVE_OPENSSL_CMAC_H */ /* Code borrowed from https://w1.fi/wpa_supplicant/ ends */ static void wpapsk_postprocess(int keys) { int i; uint8_t data[64 + 12]; insert_mac(data); insert_nonce(data + 12); if (hccap.keyver == 1) { #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(keys, outbuffer, data, hccap, mic) #endif for (i = 0; i < keys; i++) { uint32_t prf[20 / 4]; HMACMD5Context ctx; prf_512(outbuffer[i].v, data, prf); //PTK hmac_md5_init_K16((unsigned char *)prf, &ctx); hmac_md5_update(hccap.eapol, hccap.eapol_size, &ctx); hmac_md5_final(mic[i].keymic, &ctx); } } else if (hccap.keyver == 2) { #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(keys, outbuffer, data, hccap, mic) #endif for (i = 0; i < keys; i++) { uint32_t prf[20 / 4]; prf_512(outbuffer[i].v, data, prf); //PTK hmac_sha1((unsigned char *)prf, 16, hccap.eapol, hccap.eapol_size, mic[i].keymic, 16); } #if HAVE_OPENSSL_CMAC_H } else if (hccap.keyver == 3) { //802.11 w, WPA - PSK - SHA256 #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(keys, outbuffer, data, hccap, mic) #endif for (i = 0; i < keys; i++) { unsigned char ptk[48]; unsigned char cmic[16]; size_t miclen; CMAC_CTX *ctx; sha256_prf_bits((unsigned char *)outbuffer[i].v, 32, "Pairwise key expansion", data, 76, ptk, 48 * 8); //PTK // Compute MIC ctx = CMAC_CTX_new(); CMAC_Init(ctx, ptk, 16, EVP_aes_128_cbc(), 0); CMAC_Update(ctx, hccap.eapol, hccap.eapol_size); CMAC_Final(ctx, cmic, &miclen); memcpy(mic[i].keymic, cmic, 16); CMAC_CTX_free(ctx); } #endif /* HAVE_OPENSSL_CMAC_H */ } } #endif /* #ifndef JOHN_OCL_WPAPSK */ static int binary_hash_0(void *binary) { #ifdef WPAPSK_DEBUG puts("binary"); uint32_t i, *b = binary; for (i = 0; i < 4; i++) printf("%08x ", b[i]); puts(""); #endif return ((uint32_t *) binary)[0] & PH_MASK_0; } static int get_hash_0(int index) { #ifdef WPAPSK_DEBUG int i; puts("get_hash"); uint32_t *b = (uint32_t *) mic[index].keymic; for (i = 0; i < 4; i++) printf("%08x ", b[i]); puts(""); #endif uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_0; } static int get_hash_1(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_1; } static int get_hash_2(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_2; } static int get_hash_3(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_3; } static int get_hash_4(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_4; } static int get_hash_5(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_5; } static int get_hash_6(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_6; } static int cmp_all(void *binary, int count) { uint32_t i, b = ((uint32_t *) binary)[0]; for (i = 0; i < count; i++) { uint32_t *m = (uint32_t *) mic[i].keymic; if (b == m[0]) return 1; } return 0; } static int cmp_one(void *binary, int index) { uint8_t i; uint32_t *b = (uint32_t *) binary; uint32_t *m = (uint32_t *) mic[index].keymic; for (i = 0; i < BINARY_SIZE / 4; i++) if (b[i] != m[i]) return 0; return 1; } static int cmp_exact(char *source, int index) { return 1; } static int salt_compare(const void *x, const void *y) { int c = strncmp((const char *)x, (const char *)y, 36); if (c) return c; return memcmp((const char *)x, (const char *)y, SALT_SIZE); } /* * key version as first tunable cost 1=WPA (MD5) 2=WPA2 (SHA1) * 3=802.11w (SHA256) */ static unsigned int get_keyver(void *salt) { hccap_t *my_salt = salt; return (unsigned int)my_salt->keyver; } #endif
vc12.c
#include <immintrin.h> #include <stdbool.h> #define A(a, x, y, z) (a[(z) * ny * nx + (y) * nx + x]) void step(float *restrict f, float *restrict fp, const int nx, const int ny, const int nz, const int nxi, const float *restrict const model_padded2_dt2, const float dx, const float dt, const float *restrict const sources, const int *restrict const sources_x, const int *restrict const sources_y, const int *restrict const sources_z, const int num_sources, const int source_len, const int num_steps) { int step; int x; int y; int z; int i; int sx; int sy; int sz; float *tmp; float fd_coeff[9] = { -924708642.0f / 302702400 / (dx * dx) / 2, 538137600.0f / 302702400 / (dx * dx), -94174080.0f / 302702400 / (dx * dx), 22830080.0f / 302702400 / (dx * dx), -5350800.0f / 302702400 / (dx * dx), 1053696.0f / 302702400 / (dx * dx), -156800.0f / 302702400 / (dx * dx), 15360.0f / 302702400 / (dx * dx), -735.0f / 302702400 / (dx * dx) }; const int last_avx_x = ((nxi - 1) / 8) * 8 + 8; float mask[8]; for (i = 0; i < 8; i++) { if (last_avx_x + i < nxi + 8) { mask[i] = 1.0f; } else { mask[i] = 0.0f; } } const __m256 ymask = _mm256_set_ps(mask[7], mask[6], mask[5], mask[4], mask[3], mask[2], mask[1], mask[0]); const __m256 ytwo = _mm256_set1_ps(2.0f); for (step = 0; step < num_steps; step++) { #pragma omp parallel for default(none) private(y, x, i) shared(fd_coeff, f, fp) for (z = 8; z < nz - 8; z++) { for (y = 8; y < ny - 8; y++) { for (x = 8; x < nxi; x+=8) { __m256 yf_xx = _mm256_setzero_ps(); __m256 ymodel_padded2_dt2 = _mm256_loadu_ps(&A(model_padded2_dt2, x, y, z)); for (i = 0; i < 9; i++) { __m256 yfd_coeff = _mm256_set1_ps(fd_coeff[i]); __m256 yfx1 = _mm256_loadu_ps(&A(f, x + i, y, z)); __m256 yfx2 = _mm256_loadu_ps(&A(f, x - i, y, z)); __m256 yfy1 = _mm256_loadu_ps(&A(f, x, y + i, z)); __m256 yfy2 = _mm256_loadu_ps(&A(f, x, y - i, z)); __m256 yfz1 = _mm256_loadu_ps(&A(f, x, y, z + i)); __m256 yfz2 = _mm256_loadu_ps(&A(f, x, y, z - i)); yf_xx = _mm256_fmadd_ps(yfx1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfx2, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfy1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfy2, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfz1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfz2, yfd_coeff, yf_xx); } yf_xx = _mm256_mul_ps(ymodel_padded2_dt2, yf_xx); __m256 yf = _mm256_loadu_ps(&A(f, x, y, z)); __m256 yfp = _mm256_loadu_ps(&A(fp, x, y, z)); yfp = _mm256_sub_ps(yf_xx, yfp); yfp = _mm256_fmadd_ps(yf, ytwo, yfp); _mm256_storeu_ps(&A(fp, x, y, z), yfp); } if (last_avx_x < nxi + 8) { x = last_avx_x; __m256 yf_xx = _mm256_setzero_ps(); __m256 ymodel_padded2_dt2 = _mm256_loadu_ps(&A(model_padded2_dt2, x, y, z)); for (i = 0; i < 9; i++) { __m256 yfd_coeff = _mm256_set1_ps(fd_coeff[i]); __m256 yfx1 = _mm256_loadu_ps(&A(f, x + i, y, z)); __m256 yfx2 = _mm256_loadu_ps(&A(f, x - i, y, z)); __m256 yfy1 = _mm256_loadu_ps(&A(f, x, y + i, z)); __m256 yfy2 = _mm256_loadu_ps(&A(f, x, y - i, z)); __m256 yfz1 = _mm256_loadu_ps(&A(f, x, y, z + i)); __m256 yfz2 = _mm256_loadu_ps(&A(f, x, y, z - i)); yf_xx = _mm256_fmadd_ps(yfx1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfx2, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfy1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfy2, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfz1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfz2, yfd_coeff, yf_xx); } yf_xx = _mm256_mul_ps(ymodel_padded2_dt2, yf_xx); __m256 yf = _mm256_loadu_ps(&A(f, x, y, z)); __m256 yfp = _mm256_loadu_ps(&A(fp, x, y, z)); yfp = _mm256_sub_ps(yf_xx, yfp); yfp = _mm256_fmadd_ps(yf, ytwo, yfp); yfp = _mm256_mul_ps(yfp, ymask); _mm256_storeu_ps(&A(fp, x, y, z), yfp); } } } for (i = 0; i < num_sources; i++) { sx = sources_x[i] + 8; sy = sources_y[i] + 8; sz = sources_z[i] + 8; A(fp, sx, sy, sz) += A(model_padded2_dt2, sx, sy, sz) * sources[i * source_len + step] * dt; } tmp = f; f = fp; fp = tmp; } }
#include <immintrin.h> #include <stdbool.h> #define A(a, x, y, z) (a[(z) * ny * nx + (y) * nx + x]) void step(float *restrict f, float *restrict fp, const int nx, const int ny, const int nz, const int nxi, const float *restrict const model_padded2_dt2, const float dx, const float dt, const float *restrict const sources, const int *restrict const sources_x, const int *restrict const sources_y, const int *restrict const sources_z, const int num_sources, const int source_len, const int num_steps) { int step; int x; int y; int z; int i; int sx; int sy; int sz; float *tmp; float fd_coeff[9] = { -924708642.0 f / 302702400 / (dx * dx) / 2, 538137600.0 f / 302702400 / (dx * dx), -94174080.0 f / 302702400 / (dx * dx), 22830080.0 f / 302702400 / (dx * dx), -5350800.0 f / 302702400 / (dx * dx), 1053696.0 f / 302702400 / (dx * dx), -156800.0 f / 302702400 / (dx * dx), 15360.0 f / 302702400 / (dx * dx), -735.0 f / 302702400 / (dx * dx) }; const int last_avx_x = ((nxi - 1) / 8) * 8 + 8; float mask[8]; for (i = 0; i < 8; i++) { if (last_avx_x + i < nxi + 8) { mask[i] = 1.0 f; } else { mask[i] = 0.0 f; } } const __m256 ymask = _mm256_set_ps(mask[7], mask[6], mask[5], mask[4], mask[3], mask[2], mask[1], mask[0]); const __m256 ytwo = _mm256_set1_ps(2.0 f); for (step = 0; step < num_steps; step++) { for (z = 8; z < nz - 8; z++) { for (y = 8; y < ny - 8; y++) { for (x = 8; x < nxi; x += 8) { __m256 yf_xx = _mm256_setzero_ps(); __m256 ymodel_padded2_dt2 = _mm256_loadu_ps(&A(model_padded2_dt2, x, y, z)); for (i = 0; i < 9; i++) { __m256 yfd_coeff = _mm256_set1_ps(fd_coeff[i]); __m256 yfx1 = _mm256_loadu_ps(&A(f, x + i, y, z)); __m256 yfx2 = _mm256_loadu_ps(&A(f, x - i, y, z)); __m256 yfy1 = _mm256_loadu_ps(&A(f, x, y + i, z)); __m256 yfy2 = _mm256_loadu_ps(&A(f, x, y - i, z)); __m256 yfz1 = _mm256_loadu_ps(&A(f, x, y, z + i)); __m256 yfz2 = _mm256_loadu_ps(&A(f, x, y, z - i)); yf_xx = _mm256_fmadd_ps(yfx1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfx2, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfy1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfy2, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfz1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfz2, yfd_coeff, yf_xx); } yf_xx = _mm256_mul_ps(ymodel_padded2_dt2, yf_xx); __m256 yf = _mm256_loadu_ps(&A(f, x, y, z)); __m256 yfp = _mm256_loadu_ps(&A(fp, x, y, z)); yfp = _mm256_sub_ps(yf_xx, yfp); yfp = _mm256_fmadd_ps(yf, ytwo, yfp); _mm256_storeu_ps(&A(fp, x, y, z), yfp); } if (last_avx_x < nxi + 8) { x = last_avx_x; __m256 yf_xx = _mm256_setzero_ps(); __m256 ymodel_padded2_dt2 = _mm256_loadu_ps(&A(model_padded2_dt2, x, y, z)); for (i = 0; i < 9; i++) { __m256 yfd_coeff = _mm256_set1_ps(fd_coeff[i]); __m256 yfx1 = _mm256_loadu_ps(&A(f, x + i, y, z)); __m256 yfx2 = _mm256_loadu_ps(&A(f, x - i, y, z)); __m256 yfy1 = _mm256_loadu_ps(&A(f, x, y + i, z)); __m256 yfy2 = _mm256_loadu_ps(&A(f, x, y - i, z)); __m256 yfz1 = _mm256_loadu_ps(&A(f, x, y, z + i)); __m256 yfz2 = _mm256_loadu_ps(&A(f, x, y, z - i)); yf_xx = _mm256_fmadd_ps(yfx1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfx2, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfy1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfy2, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfz1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfz2, yfd_coeff, yf_xx); } yf_xx = _mm256_mul_ps(ymodel_padded2_dt2, yf_xx); __m256 yf = _mm256_loadu_ps(&A(f, x, y, z)); __m256 yfp = _mm256_loadu_ps(&A(fp, x, y, z)); yfp = _mm256_sub_ps(yf_xx, yfp); yfp = _mm256_fmadd_ps(yf, ytwo, yfp); yfp = _mm256_mul_ps(yfp, ymask); _mm256_storeu_ps(&A(fp, x, y, z), yfp); } } } for (i = 0; i < num_sources; i++) { sx = sources_x[i] + 8; sy = sources_y[i] + 8; sz = sources_z[i] + 8; A(fp, sx, sy, sz) += A(model_padded2_dt2, sx, sy, sz) * sources[i * source_len + step] * dt; } tmp = f; f = fp; fp = tmp; } }
#include <immintrin.h> #include <stdbool.h> #define A(a, x, y, z) (a[(z) * ny * nx + (y) * nx + x]) void step(float *restrict f, float *restrict fp, const int nx, const int ny, const int nz, const int nxi, const float *restrict const model_padded2_dt2, const float dx, const float dt, const float *restrict const sources, const int *restrict const sources_x, const int *restrict const sources_y, const int *restrict const sources_z, const int num_sources, const int source_len, const int num_steps) { int step; int x; int y; int z; int i; int sx; int sy; int sz; float *tmp; float fd_coeff[9] = { -924708642.0 f / 302702400 / (dx * dx) / 2, 538137600.0 f / 302702400 / (dx * dx), -94174080.0 f / 302702400 / (dx * dx), 22830080.0 f / 302702400 / (dx * dx), -5350800.0 f / 302702400 / (dx * dx), 1053696.0 f / 302702400 / (dx * dx), -156800.0 f / 302702400 / (dx * dx), 15360.0 f / 302702400 / (dx * dx), -735.0 f / 302702400 / (dx * dx) }; const int last_avx_x = ((nxi - 1) / 8) * 8 + 8; float mask[8]; for (i = 0; i < 8; i++) { if (last_avx_x + i < nxi + 8) { mask[i] = 1.0 f; } else { mask[i] = 0.0 f; } } const __m256 ymask = _mm256_set_ps(mask[7], mask[6], mask[5], mask[4], mask[3], mask[2], mask[1], mask[0]); const __m256 ytwo = _mm256_set1_ps(2.0 f); for (step = 0; step < num_steps; step++) { #pragma omp parallel for default(none) private(y, x, i) shared(fd_coeff, f, fp) for (z = 8; z < nz - 8; z++) { for (y = 8; y < ny - 8; y++) { for (x = 8; x < nxi; x += 8) { __m256 yf_xx = _mm256_setzero_ps(); __m256 ymodel_padded2_dt2 = _mm256_loadu_ps(&A(model_padded2_dt2, x, y, z)); for (i = 0; i < 9; i++) { __m256 yfd_coeff = _mm256_set1_ps(fd_coeff[i]); __m256 yfx1 = _mm256_loadu_ps(&A(f, x + i, y, z)); __m256 yfx2 = _mm256_loadu_ps(&A(f, x - i, y, z)); __m256 yfy1 = _mm256_loadu_ps(&A(f, x, y + i, z)); __m256 yfy2 = _mm256_loadu_ps(&A(f, x, y - i, z)); __m256 yfz1 = _mm256_loadu_ps(&A(f, x, y, z + i)); __m256 yfz2 = _mm256_loadu_ps(&A(f, x, y, z - i)); yf_xx = _mm256_fmadd_ps(yfx1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfx2, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfy1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfy2, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfz1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfz2, yfd_coeff, yf_xx); } yf_xx = _mm256_mul_ps(ymodel_padded2_dt2, yf_xx); __m256 yf = _mm256_loadu_ps(&A(f, x, y, z)); __m256 yfp = _mm256_loadu_ps(&A(fp, x, y, z)); yfp = _mm256_sub_ps(yf_xx, yfp); yfp = _mm256_fmadd_ps(yf, ytwo, yfp); _mm256_storeu_ps(&A(fp, x, y, z), yfp); } if (last_avx_x < nxi + 8) { x = last_avx_x; __m256 yf_xx = _mm256_setzero_ps(); __m256 ymodel_padded2_dt2 = _mm256_loadu_ps(&A(model_padded2_dt2, x, y, z)); for (i = 0; i < 9; i++) { __m256 yfd_coeff = _mm256_set1_ps(fd_coeff[i]); __m256 yfx1 = _mm256_loadu_ps(&A(f, x + i, y, z)); __m256 yfx2 = _mm256_loadu_ps(&A(f, x - i, y, z)); __m256 yfy1 = _mm256_loadu_ps(&A(f, x, y + i, z)); __m256 yfy2 = _mm256_loadu_ps(&A(f, x, y - i, z)); __m256 yfz1 = _mm256_loadu_ps(&A(f, x, y, z + i)); __m256 yfz2 = _mm256_loadu_ps(&A(f, x, y, z - i)); yf_xx = _mm256_fmadd_ps(yfx1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfx2, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfy1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfy2, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfz1, yfd_coeff, yf_xx); yf_xx = _mm256_fmadd_ps(yfz2, yfd_coeff, yf_xx); } yf_xx = _mm256_mul_ps(ymodel_padded2_dt2, yf_xx); __m256 yf = _mm256_loadu_ps(&A(f, x, y, z)); __m256 yfp = _mm256_loadu_ps(&A(fp, x, y, z)); yfp = _mm256_sub_ps(yf_xx, yfp); yfp = _mm256_fmadd_ps(yf, ytwo, yfp); yfp = _mm256_mul_ps(yfp, ymask); _mm256_storeu_ps(&A(fp, x, y, z), yfp); } } } for (i = 0; i < num_sources; i++) { sx = sources_x[i] + 8; sy = sources_y[i] + 8; sz = sources_z[i] + 8; A(fp, sx, sy, sz) += A(model_padded2_dt2, sx, sy, sz) * sources[i * source_len + step] * dt; } tmp = f; f = fp; fp = tmp; } }
no_option.c
// RUN: %clang_cc1 -verify -o - %s // expected-no-diagnostics int a; #pragma omp threadprivate(a,b) #pragma omp parallel
// RUN:%clang_cc1 - verify - o - %s // expected - no - diagnostics int a;
// RUN:%clang_cc1 - verify - o - %s // expected - no - diagnostics int a; #pragma omp threadprivate(a,b) #pragma omp parallel
convolution_1x1_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static inline signed char float2int8(float v) { int int32 = round(v); if (int32 > 127) return 127; if (int32 < -128) return -128; return (signed char)int32; } #if __aarch64__ static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { const signed char* kernel = _kernel; // kernel memory packed 4 x 4 kernel_tm.create(4*4, inch/4 + inch%4, outch/4 + outch%4, (size_t)1u); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; const signed char* k0 = kernel + (p+0)*inch; const signed char* k1 = kernel + (p+1)*inch; const signed char* k2 = kernel + (p+2)*inch; const signed char* k3 = kernel + (p+3)*inch; signed char* ktmp = kernel_tm.channel(p/4); int q=0; for (; q+1<inch; q+=2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q<inch; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } for (int p=remain_outch_start; p<outch; p++) { const signed char* k0 = kernel + (p+0)*inch; signed char* ktmp = kernel_tm.channel(p/4 + p%4); int q=0; for (; q+1<inch; q=q+2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q<inch; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; // bottom_tm memory packed 4 x 4 ncnn::Mat bottom_tm(4, inch, size/4 + size%4, (size_t)1u, opt.workspace_allocator); { int nn_size = size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_blob.channel(0); const signed char* img1 = bottom_blob.channel(1); img0 += i; img1 += i; signed char* tmpptr = bottom_tm.channel(i/4); int q = 0; for (; q+1<inch; q=q+2) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img0[1]; tmpptr[3] = img1[1]; tmpptr[4] = img0[2]; tmpptr[5] = img1[2]; tmpptr[6] = img0[3]; tmpptr[7] = img1[3]; tmpptr += 8; img0 += bottom_blob.cstep; img0 += bottom_blob.cstep; img1 += bottom_blob.cstep; img1 += bottom_blob.cstep; } for (; q<inch; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<size; i++) { const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = bottom_tm.channel(i/4 + i%4); for (int q=0; q<inch; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += bottom_blob.cstep; } } } // sgemm process int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; int* outptr0 = top_blob.channel(p); int* outptr1 = top_blob.channel(p+1); int* outptr2 = top_blob.channel(p+2); int* outptr3 = top_blob.channel(p+3); int i = 0; for (; i+3<size; i+=4) { signed char* tmpptr = bottom_tm.channel(i/4); const signed char* kptr = kernel.channel(p/4); #if __ARM_NEON asm volatile( "prfm pldl1keep, [%4, #128] \n" "prfm pldl1keep, [%5, #128] \n" "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "lsr w4, %w12, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "ld1 {v0.16b}, [%4] \n"// i0, i1, i2, i3 "ld1 {v4.16b}, [%5] \n"// k0, k1, k2, k3 "add %4, %4, #16 \n" "add %5, %5, #16 \n" "rev32 v1.8h, v0.8h \n"// i1, i0, i3, i2 "rev64 v2.4s, v0.4s \n"// i2, i3, i0, i1 "rev64 v3.8h, v0.8h \n"// i3, i2, i1, i0 "smull v8.8h, v4.8b, v0.8b \n" "smull v9.8h, v4.8b, v1.8b \n" "smull v10.8h, v4.8b, v2.8b \n" "smull v11.8h, v4.8b, v3.8b \n" "prfm pldl1keep, [%4, #1024] \n" "prfm pldl1keep, [%5, #1024] \n" "smlal2 v8.8h, v4.16b, v0.16b \n" "smlal2 v9.8h, v4.16b, v1.16b \n" "smlal2 v10.8h, v4.16b, v2.16b \n" "smlal2 v11.8h, v4.16b, v3.16b \n" "sadalp v16.4s, v8.8h \n"// i0k0, i1k1, i2k2, i3k3 "sadalp v17.4s, v9.8h \n"// i1k0, i0k1, i3k2, i2k3 "sadalp v18.4s, v10.8h \n"// i2k0, i3k1, i0k2, i1k3 "sadalp v19.4s, v11.8h \n"// i3k0, i2k1, i1k2, i0k3 "subs w4, w4, #1 \n" "bne 0b \n" "1: \n"// for (; k+1<L; k=k+2) // remain loop "and w4, %w12, #3 \n"// w4 = remain = K & 3; "cmp w4, #0 \n" "beq 3f \n" "lsr w4, w4, #1 \n"// r4 = nn = L >> 1 "cmp w4, #0 \n" "beq 3f \n" "2: \n"// for (; k+1<L; k=k+2) "ld1 {v0.8b}, [%4] \n"// i0, i1, i2, i3 "ld1 {v4.8b}, [%5] \n"// k0, k1, k2, k3 "add %4, %4, #8 \n" "add %5, %5, #8 \n" "rev32 v1.4h, v0.4h \n"// i2, i3, i0, i1 "rev64 v2.2s, v0.2s \n"// i1, i0, i3, i2 "rev64 v3.4h, v0.4h \n"// i0, i1, i2, i3 "smull v8.8h, v4.8b, v0.8b \n" "smull v9.8h, v4.8b, v1.8b \n" "smull v10.8h, v4.8b, v2.8b \n" "smull v11.8h, v4.8b, v3.8b \n" "sadalp v16.4s, v8.8h \n" "sadalp v17.4s, v9.8h \n" "sadalp v18.4s,v10.8h \n" "sadalp v19.4s,v11.8h \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n"// realloc "mov v20.s[0], v16.s[0] \n" "mov v20.s[1], v17.s[0] \n" "mov v20.s[2], v18.s[0] \n" "mov v20.s[3], v19.s[0] \n" "mov v21.s[0], v17.s[1] \n" "mov v21.s[1], v16.s[1] \n" "mov v21.s[2], v19.s[1] \n" "mov v21.s[3], v18.s[1] \n" "mov v22.s[0], v18.s[2] \n" "mov v22.s[1], v19.s[2] \n" "mov v22.s[2], v16.s[2] \n" "mov v22.s[3], v17.s[2] \n" "mov v23.s[0], v19.s[3] \n" "mov v23.s[1], v18.s[3] \n" "mov v23.s[2], v17.s[3] \n" "mov v23.s[3], v16.s[3] \n" "and w4, %w12, #1 \n"// w4 = remain = K & 1; "cmp w4, #0 \n" "beq 5f \n" "4: \n" "ld1 {v0.8b}, [%4] \n" "ld1 {v1.8b}, [%5] \n" "add %4, %4, #4 \n" "add %5, %5, #4 \n" "sshll v0.8h, v0.8b, #0 \n"// i0[0], i1[0], i2[0], i3[0] "sshll v1.8h, v1.8b, #0 \n"// k0[0], k1[0], k2[0], k3[0] "smlal v20.4s, v0.4h, v1.h[0] \n"// i0k0, i1k0, i2k0, i3k0 "smlal v21.4s, v0.4h, v1.h[1] \n"// i0k1, i1k1, i2k1, i3k1 "smlal v22.4s, v0.4h, v1.h[2] \n"// i0k2, i1k2, i2k2, i3k2 "smlal v23.4s, v0.4h, v1.h[3] \n"// i0k3, i1k3, i2k3, i3k3 "subs w4, w4, #1 \n" "bne 2b \n" "5: \n" "st1 {v20.4s}, [%0] \n" "st1 {v21.4s}, [%1] \n" "st1 {v22.4s}, [%2] \n" "st1 {v23.4s}, [%3] \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int q=0; for (; q+1<inch; q=q+2) { sum0_0 += tmpptr[0] * kptr[0]; sum0_0 += tmpptr[1] * kptr[1]; sum0_1 += tmpptr[2] * kptr[0]; sum0_1 += tmpptr[3] * kptr[1]; sum0_2 += tmpptr[4] * kptr[0]; sum0_2 += tmpptr[5] * kptr[1]; sum0_3 += tmpptr[6] * kptr[0]; sum0_3 += tmpptr[7] * kptr[1]; sum1_0 += tmpptr[0] * kptr[2]; sum1_0 += tmpptr[1] * kptr[3]; sum1_1 += tmpptr[2] * kptr[2]; sum1_1 += tmpptr[3] * kptr[3]; sum1_2 += tmpptr[4] * kptr[2]; sum1_2 += tmpptr[5] * kptr[3]; sum1_3 += tmpptr[6] * kptr[2]; sum1_3 += tmpptr[7] * kptr[3]; sum2_0 += tmpptr[0] * kptr[4]; sum2_0 += tmpptr[1] * kptr[5]; sum2_1 += tmpptr[2] * kptr[4]; sum2_1 += tmpptr[3] * kptr[5]; sum2_2 += tmpptr[4] * kptr[4]; sum2_2 += tmpptr[5] * kptr[5]; sum2_3 += tmpptr[6] * kptr[4]; sum2_3 += tmpptr[7] * kptr[5]; sum3_0 += tmpptr[0] * kptr[6]; sum3_0 += tmpptr[1] * kptr[7]; sum3_1 += tmpptr[2] * kptr[6]; sum3_1 += tmpptr[3] * kptr[7]; sum3_2 += tmpptr[4] * kptr[6]; sum3_2 += tmpptr[5] * kptr[7]; sum3_3 += tmpptr[6] * kptr[6]; sum3_3 += tmpptr[7] * kptr[7]; tmpptr += 8; kptr += 8; } for (; q<inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; #endif outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; } for (; i<size; i++) { signed char* tmpptr = bottom_tm.channel(i/4 + i%4); const signed char* kptr = kernel.channel(p/4); #if __ARM_NEON int32x4_t _sum = vdupq_n_s32(0); int q=0; for (; q+3<inch; q=q+4) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3] int8x8x2_t _k = vld2_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1];k0[2-3], k1[2-3], k2[2-3], k3[2-3] int16x8_t _r0_s16 = vmovl_s8(_r0); // i0[0],i0[1],i0[2],i0[3] int16x8_t _k02_s16 = vmovl_s8(_k.val[0]); // k0[0],k1[0],k2[0],k3[0],k0[2],k1[2],k2[2],k3[2] int16x8_t _k13_s16 = vmovl_s8(_k.val[1]); // k0[1],k1[1],k2[1],k3[1],k0[3],k1[3],k2[3],k3[3] _sum = vmlal_lane_s16(_sum, vget_low_s16(_k02_s16), vget_low_s16(_r0_s16), 0); // i0[0]*k[0-3][0] _sum = vmlal_lane_s16(_sum, vget_low_s16(_k13_s16), vget_low_s16(_r0_s16), 1); // i0[1]*k[0-3][1] _sum = vmlal_lane_s16(_sum, vget_high_s16(_k02_s16), vget_low_s16(_r0_s16), 2); // i0[2]*k[0-3][2] _sum = vmlal_lane_s16(_sum, vget_high_s16(_k13_s16), vget_low_s16(_r0_s16), 3); // i0[3]*k[0-3][3] tmpptr += 4; kptr += 16; } for (; q+1<inch; q=q+2) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3] int8x8_t _k = vld1_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1] _r0[2] = _r0[0]; _r0[3] = _r0[1]; _r0[4] = _r0[0]; _r0[5] = _r0[1]; _r0[6] = _r0[0]; _r0[7] = _r0[1]; int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vpadalq_s16(_sum, _tp0); tmpptr += 2; kptr += 8; } for (; q<inch; q++) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3] int8x8_t _k = vld1_s8(kptr); // k[0-3][0] int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vaddw_s16(_sum, vget_low_s16(_tp0)); tmpptr += 1; kptr += 4; } vst1q_lane_s32(outptr0, _sum, 0); vst1q_lane_s32(outptr1, _sum, 1); vst1q_lane_s32(outptr2, _sum, 2); vst1q_lane_s32(outptr3, _sum, 3); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int q=0; for (; q+1<inch; q=q+2) { sum0 += tmpptr[0] * kptr[0]; sum0 += tmpptr[1] * kptr[1]; sum1 += tmpptr[0] * kptr[2]; sum1 += tmpptr[1] * kptr[3]; sum2 += tmpptr[0] * kptr[4]; sum2 += tmpptr[1] * kptr[5]; sum3 += tmpptr[0] * kptr[6]; sum3 += tmpptr[1] * kptr[7]; tmpptr += 2; kptr += 8; } for (; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr += 1; kptr += 4; } outptr0[0] = sum0; outptr1[0] = sum1; outptr2[0] = sum2; outptr3[0] = sum3; #endif outptr0++; outptr1++; outptr2++; outptr3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out0 = top_blob.channel(p); int* outptr0 = out0; int i = 0; for (; i+3<size; i+=4) { signed char* tmpptr = bottom_tm.channel(i/4); const signed char* kptr = kernel.channel(p/4 + p%4); #if __ARM_NEON int32x4_t _sum = vdupq_n_s32(0); int q=0; for (; q+1<inch; q=q+2) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-1], i1[0-1], i2[0-1], i3[0-1] int8x8_t _k = vld1_s8(kptr); // k0[0-1] _k[2] = _k[0]; _k[3] = _k[1]; _k[4] = _k[0]; _k[5] = _k[1]; _k[6] = _k[0]; _k[7] = _k[1]; int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vpadalq_s16(_sum, _tp0); tmpptr += 8; kptr += 2; } for (; q<inch; q++) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0], i1[0], i2[0], i3[0] int8x8_t _k = vld1_s8(kptr); // k[0][0] int16x8_t _r0_s16 = vmovl_s8(_r0); int16x8_t _k_s16 = vmovl_s8(_k); _sum = vmlal_lane_s16(_sum, vget_low_s16(_r0_s16), vget_low_s16(_k_s16), 0); // i0k0, i1k0, i2k0, i3k0 tmpptr += 4; kptr += 1; } vst1q_s32(outptr0, _sum); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int q=0; for (; q+1<inch; q=q+2) { sum0 += tmpptr[0] * kptr[0]; sum0 += tmpptr[1] * kptr[1]; sum1 += tmpptr[2] * kptr[0]; sum1 += tmpptr[3] * kptr[1]; sum2 += tmpptr[4] * kptr[0]; sum2 += tmpptr[5] * kptr[1]; sum3 += tmpptr[6] * kptr[0]; sum3 += tmpptr[7] * kptr[1]; tmpptr += 8; kptr += 2; } for (; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; #endif outptr0 += 4; } for (; i<size; i++) { signed char* tmpptr = bottom_tm.channel(i/4 + i%4); const signed char* kptr = kernel.channel(p/4 + p%4); int q = 0; int sum0 = 0; for (; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = sum0; outptr0++; } } } static void conv1x1s1_sgemm_int8_requant_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; const float* bias = _bias; // bottom_tm memory packed 4 x 4 ncnn::Mat bottom_tm(4, inch, size/4 + size%4, (size_t)1u, opt.workspace_allocator); { int nn_size = size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_blob.channel(0); const signed char* img1 = bottom_blob.channel(1); img0 += i; img1 += i; signed char* tmpptr = bottom_tm.channel(i/4); int q = 0; for (; q+1<inch; q=q+2) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img0[1]; tmpptr[3] = img1[1]; tmpptr[4] = img0[2]; tmpptr[5] = img1[2]; tmpptr[6] = img0[3]; tmpptr[7] = img1[3]; tmpptr += 8; img0 += bottom_blob.cstep; img0 += bottom_blob.cstep; img1 += bottom_blob.cstep; img1 += bottom_blob.cstep; } for (; q<inch; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<size; i++) { const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = bottom_tm.channel(i/4 + i%4); for (int q=0; q<inch; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += bottom_blob.cstep; } } } // sgemm process int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; signed char* outptr0 = top_blob.channel(p); signed char* outptr1 = top_blob.channel(p+1); signed char* outptr2 = top_blob.channel(p+2); signed char* outptr3 = top_blob.channel(p+3); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; const float bias2 = bias ? bias[p+2] : 0.f; const float bias3 = bias ? bias[p+3] : 0.f; const float scale_requant_in0 = scales_requant[2*p]; const float scale_requant_out0 = scales_requant[2*p+1]; const float scale_requant_in1 = scales_requant[2*(p+1)]; const float scale_requant_out1 = scales_requant[2*(p+1)+1]; const float scale_requant_in2 = scales_requant[2*(p+2)]; const float scale_requant_out2 = scales_requant[2*(p+2)+1]; const float scale_requant_in3 = scales_requant[2*(p+3)]; const float scale_requant_out3 = scales_requant[2*(p+3)+1]; float32x4_t _bias03, _scale_in03, _scale_out03; float32x4_t _bias0 = vdupq_n_f32(bias0); float32x4_t _bias1 = vdupq_n_f32(bias1); float32x4_t _bias2 = vdupq_n_f32(bias2); float32x4_t _bias3 = vdupq_n_f32(bias3); _bias03[0] = bias0; _bias03[1] = bias1; _bias03[2] = bias2; _bias03[3] = bias3; _scale_in03[0] = scale_requant_in0; _scale_in03[1] = scale_requant_in1; _scale_in03[2] = scale_requant_in2; _scale_in03[3] = scale_requant_in3; _scale_out03[0] = scale_requant_out0; _scale_out03[1] = scale_requant_out1; _scale_out03[2] = scale_requant_out2; _scale_out03[3] = scale_requant_out3; int i = 0; for (; i+3<size; i+=4) { signed char* tmpptr = bottom_tm.channel(i/4); const signed char* kptr = kernel.channel(p/4); #if 1 //__ARM_NEON asm volatile( "prfm pldl1keep, [%4, #128] \n" "prfm pldl1keep, [%5, #128] \n" "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "lsr w4, %w12, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "ld1 {v0.16b}, [%4] \n"// i0, i1, i2, i3 "ld1 {v4.16b}, [%5] \n"// k0, k1, k2, k3 "add %4, %4, #16 \n" "add %5, %5, #16 \n" "rev32 v1.8h, v0.8h \n"// i1, i0, i3, i2 "rev64 v2.4s, v0.4s \n"// i2, i3, i0, i1 "rev64 v3.8h, v0.8h \n"// i3, i2, i1, i0 "smull v8.8h, v4.8b, v0.8b \n" "smull v9.8h, v4.8b, v1.8b \n" "smull v10.8h, v4.8b, v2.8b \n" "smull v11.8h, v4.8b, v3.8b \n" "prfm pldl1keep, [%4, #1024] \n" "prfm pldl1keep, [%5, #1024] \n" "smlal2 v8.8h, v4.16b, v0.16b \n" "smlal2 v9.8h, v4.16b, v1.16b \n" "smlal2 v10.8h, v4.16b, v2.16b \n" "smlal2 v11.8h, v4.16b, v3.16b \n" "sadalp v16.4s, v8.8h \n"// i0k0, i1k1, i2k2, i3k3 "sadalp v17.4s, v9.8h \n"// i1k0, i0k1, i3k2, i2k3 "sadalp v18.4s, v10.8h \n"// i2k0, i3k1, i0k2, i1k3 "sadalp v19.4s, v11.8h \n"// i3k0, i2k1, i1k2, i0k3 "subs w4, w4, #1 \n" "bne 0b \n" "1: \n"// for (; k+1<L; k=k+2) // remain loop "and w4, %w12, #3 \n"// w4 = remain = K & 3; "cmp w4, #0 \n" "beq 3f \n" "lsr w4, w4, #1 \n"// r4 = nn = L >> 1 "cmp w4, #0 \n" "beq 3f \n" "2: \n"// for (; k+1<L; k=k+2) "ld1 {v0.8b}, [%4] \n"// i0, i1, i2, i3 "ld1 {v4.8b}, [%5] \n"// k0, k1, k2, k3 "add %4, %4, #8 \n" "add %5, %5, #8 \n" "rev32 v1.4h, v0.4h \n"// i2, i3, i0, i1 "rev64 v2.2s, v0.2s \n"// i1, i0, i3, i2 "rev64 v3.4h, v0.4h \n"// i0, i1, i2, i3 "smull v8.8h, v4.8b, v0.8b \n" "smull v9.8h, v4.8b, v1.8b \n" "smull v10.8h, v4.8b, v2.8b \n" "smull v11.8h, v4.8b, v3.8b \n" "sadalp v16.4s, v8.8h \n" "sadalp v17.4s, v9.8h \n" "sadalp v18.4s,v10.8h \n" "sadalp v19.4s,v11.8h \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n"// realloc "mov v20.s[0], v16.s[0] \n" "mov v20.s[1], v17.s[0] \n" "mov v20.s[2], v18.s[0] \n" "mov v20.s[3], v19.s[0] \n" "mov v21.s[0], v17.s[1] \n" "mov v21.s[1], v16.s[1] \n" "mov v21.s[2], v19.s[1] \n" "mov v21.s[3], v18.s[1] \n" "mov v22.s[0], v18.s[2] \n" "mov v22.s[1], v19.s[2] \n" "mov v22.s[2], v16.s[2] \n" "mov v22.s[3], v17.s[2] \n" "mov v23.s[0], v19.s[3] \n" "mov v23.s[1], v18.s[3] \n" "mov v23.s[2], v17.s[3] \n" "mov v23.s[3], v16.s[3] \n" "and w4, %w12, #1 \n"// w4 = remain = K & 1; "cmp w4, #0 \n" "beq 5f \n" "4: \n" "ld1 {v0.8b}, [%4] \n" "ld1 {v1.8b}, [%5] \n" "add %4, %4, #4 \n" "add %5, %5, #4 \n" "sshll v0.8h, v0.8b, #0 \n"// i0[0], i1[0], i2[0], i3[0] "sshll v1.8h, v1.8b, #0 \n"// k0[0], k1[0], k2[0], k3[0] "smlal v20.4s, v0.4h, v1.h[0] \n"// i0k0, i1k0, i2k0, i3k0 "smlal v21.4s, v0.4h, v1.h[1] \n"// i0k1, i1k1, i2k1, i3k1 "smlal v22.4s, v0.4h, v1.h[2] \n"// i0k2, i1k2, i2k2, i3k2 "smlal v23.4s, v0.4h, v1.h[3] \n"// i0k3, i1k3, i2k3, i3k3 "subs w4, w4, #1 \n" "bne 2b \n" "5: \n" // top_s32 -> top_f32 "scvtf v20.4s, v20.4s \n" "scvtf v21.4s, v21.4s \n" "scvtf v22.4s, v22.4s \n" "scvtf v23.4s, v23.4s \n" // top_f32 = top_f32 * scale_in "fmul v20.4s, v20.4s, %17.s[0] \n" "fmul v21.4s, v21.4s, %17.s[1] \n" "fmul v22.4s, v22.4s, %17.s[2] \n" "fmul v23.4s, v23.4s, %17.s[3] \n" // top_f32 = top_f32 + bias "fadd v20.4s, v20.4s, %13.4s \n" "fadd v21.4s, v21.4s, %14.4s \n" "fadd v22.4s, v22.4s, %15.4s \n" "fadd v23.4s, v23.4s, %16.4s \n" // top_f32 = top_f32 * scale_out "fmul v20.4s, v20.4s, %18.s[0] \n" "fmul v21.4s, v21.4s, %18.s[1] \n" "fmul v22.4s, v22.4s, %18.s[2] \n" "fmul v23.4s, v23.4s, %18.s[3] \n" // top_f32 -> top_s32 "fcvtas v20.4s, v20.4s \n" "fcvtas v21.4s, v21.4s \n" "fcvtas v22.4s, v22.4s \n" "fcvtas v23.4s, v23.4s \n" // top_s32 -> top_s16 "sqxtn v7.4h, v20.4s \n" "sqxtn2 v7.8h, v21.4s \n" "sqxtn v8.4h, v22.4s \n" "sqxtn2 v8.8h, v23.4s \n" // top_s16 -> top_s8 "sqxtn v0.8b, v7.8h \n" "sqxtn v1.8b, v8.8h \n" // save top_s8 "st1 {v0.s}[0], [%0] \n" "st1 {v0.s}[1], [%1] \n" "st1 {v1.s}[0], [%2] \n" "st1 {v1.s}[1], [%3] \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch), // %12 "w"(_bias0), // %13 "w"(_bias1), // %14 "w"(_bias2), // %15 "w"(_bias3), // %16 "w"(_scale_in03), // %17 "w"(_scale_out03) // %18 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int q=0; for (; q+1<inch; q=q+2) { sum0_0 += tmpptr[0] * kptr[0]; sum0_0 += tmpptr[1] * kptr[1]; sum0_1 += tmpptr[2] * kptr[0]; sum0_1 += tmpptr[3] * kptr[1]; sum0_2 += tmpptr[4] * kptr[0]; sum0_2 += tmpptr[5] * kptr[1]; sum0_3 += tmpptr[6] * kptr[0]; sum0_3 += tmpptr[7] * kptr[1]; sum1_0 += tmpptr[0] * kptr[2]; sum1_0 += tmpptr[1] * kptr[3]; sum1_1 += tmpptr[2] * kptr[2]; sum1_1 += tmpptr[3] * kptr[3]; sum1_2 += tmpptr[4] * kptr[2]; sum1_2 += tmpptr[5] * kptr[3]; sum1_3 += tmpptr[6] * kptr[2]; sum1_3 += tmpptr[7] * kptr[3]; sum2_0 += tmpptr[0] * kptr[4]; sum2_0 += tmpptr[1] * kptr[5]; sum2_1 += tmpptr[2] * kptr[4]; sum2_1 += tmpptr[3] * kptr[5]; sum2_2 += tmpptr[4] * kptr[4]; sum2_2 += tmpptr[5] * kptr[5]; sum2_3 += tmpptr[6] * kptr[4]; sum2_3 += tmpptr[7] * kptr[5]; sum3_0 += tmpptr[0] * kptr[6]; sum3_0 += tmpptr[1] * kptr[7]; sum3_1 += tmpptr[2] * kptr[6]; sum3_1 += tmpptr[3] * kptr[7]; sum3_2 += tmpptr[4] * kptr[6]; sum3_2 += tmpptr[5] * kptr[7]; sum3_3 += tmpptr[6] * kptr[6]; sum3_3 += tmpptr[7] * kptr[7]; tmpptr += 8; kptr += 8; } for (; q<inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = float2int8(((float)sum0_0 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[1] = float2int8(((float)sum0_1 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[2] = float2int8(((float)sum0_2 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[3] = float2int8(((float)sum0_3 * scale_requant_in0 + bias0) * scale_requant_out0); outptr1[0] = float2int8(((float)sum1_0 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[1] = float2int8(((float)sum1_1 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[2] = float2int8(((float)sum1_2 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[3] = float2int8(((float)sum1_3 * scale_requant_in1 + bias1) * scale_requant_out1); outptr2[0] = float2int8(((float)sum2_0 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[1] = float2int8(((float)sum2_1 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[2] = float2int8(((float)sum2_2 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[3] = float2int8(((float)sum2_3 * scale_requant_in2 + bias2) * scale_requant_out2); outptr3[0] = float2int8(((float)sum3_0 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[1] = float2int8(((float)sum3_1 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[2] = float2int8(((float)sum3_2 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[3] = float2int8(((float)sum3_3 * scale_requant_in3 + bias3) * scale_requant_out3); #endif outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; } for (; i<size; i++) { signed char* tmpptr = bottom_tm.channel(i/4 + i%4); const signed char* kptr = kernel.channel(p/4); #if 1 //__ARM_NEON int32x4_t _sum = vdupq_n_s32(0); int q=0; for (; q+3<inch; q=q+4) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3] int8x8x2_t _k = vld2_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1];k0[2-3], k1[2-3], k2[2-3], k3[2-3] int16x8_t _r0_s16 = vmovl_s8(_r0); // i0[0],i0[1],i0[2],i0[3] int16x8_t _k02_s16 = vmovl_s8(_k.val[0]); // k0[0],k1[0],k2[0],k3[0],k0[2],k1[2],k2[2],k3[2] int16x8_t _k13_s16 = vmovl_s8(_k.val[1]); // k0[1],k1[1],k2[1],k3[1],k0[3],k1[3],k2[3],k3[3] _sum = vmlal_lane_s16(_sum, vget_low_s16(_k02_s16), vget_low_s16(_r0_s16), 0); // i0[0]*k[0-3][0] _sum = vmlal_lane_s16(_sum, vget_low_s16(_k13_s16), vget_low_s16(_r0_s16), 1); // i0[1]*k[0-3][1] _sum = vmlal_lane_s16(_sum, vget_high_s16(_k02_s16), vget_low_s16(_r0_s16), 2); // i0[2]*k[0-3][2] _sum = vmlal_lane_s16(_sum, vget_high_s16(_k13_s16), vget_low_s16(_r0_s16), 3); // i0[3]*k[0-3][3] tmpptr += 4; kptr += 16; } for (; q+1<inch; q=q+2) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3] int8x8_t _k = vld1_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1] _r0[2] = _r0[0]; _r0[3] = _r0[1]; _r0[4] = _r0[0]; _r0[5] = _r0[1]; _r0[6] = _r0[0]; _r0[7] = _r0[1]; int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vpadalq_s16(_sum, _tp0); tmpptr += 2; kptr += 8; } for (; q<inch; q++) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3] int8x8_t _k = vld1_s8(kptr); // k[0-3][0] int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vaddw_s16(_sum, vget_low_s16(_tp0)); tmpptr += 1; kptr += 4; } // top_s32 -> top_f32 float32x4_t _sum_f32 = vcvtq_f32_s32(_sum); // top_f32 = top_f32 * scale_in _sum_f32 = vmulq_f32(_sum_f32, _scale_in03); // top_f32 = top_f32 + bias _sum_f32 = vaddq_f32(_sum_f32, _bias03); // top_f32 = top_f32 * scale_out _sum_f32 = vmulq_f32(_sum_f32, _scale_out03); // top_f32 -> top_s32 _sum = vcvtaq_s32_f32(_sum_f32); // top_s32 -> top_s16 int16x4_t _sum_s16 = vqmovn_s32(_sum); int16x8_t _sum_s16_tp = vcombine_s16(_sum_s16, _sum_s16); // top_s16 -> top_s8 int8x8_t _sum_s8 = vqmovn_s16(_sum_s16_tp); // save top_s8 vst1_lane_s8(outptr0, _sum_s8, 0); vst1_lane_s8(outptr1, _sum_s8, 1); vst1_lane_s8(outptr2, _sum_s8, 2); vst1_lane_s8(outptr3, _sum_s8, 3); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int q=0; for (; q+1<inch; q=q+2) { sum0 += tmpptr[0] * kptr[0]; sum0 += tmpptr[1] * kptr[1]; sum1 += tmpptr[0] * kptr[2]; sum1 += tmpptr[1] * kptr[3]; sum2 += tmpptr[0] * kptr[4]; sum2 += tmpptr[1] * kptr[5]; sum3 += tmpptr[0] * kptr[6]; sum3 += tmpptr[1] * kptr[7]; tmpptr += 2; kptr += 8; } for (; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr += 1; kptr += 4; } outptr0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0); outptr1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1); outptr2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2); outptr3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3); #endif outptr0++; outptr1++; outptr2++; outptr3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out0 = top_blob.channel(p); signed char* outptr0 = out0; const float bias0 = bias ? bias[p] : 0.f; const float scale_requant_in = scales_requant[2*p]; const float scale_requant_out = scales_requant[2*p+1]; float32x4_t _bias0 = vdupq_n_f32(bias0); float32x4_t _scale_in = vdupq_n_f32(scale_requant_in); float32x4_t _scale_out = vdupq_n_f32(scale_requant_out); int i = 0; for (; i+3<size; i+=4) { signed char* tmpptr = bottom_tm.channel(i/4); const signed char* kptr = kernel.channel(p/4 + p%4); #if 1 //__ARM_NEON int32x4_t _sum = vdupq_n_s32(0); int q=0; for (; q+1<inch; q=q+2) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-1], i1[0-1], i2[0-1], i3[0-1] int8x8_t _k = vld1_s8(kptr); // k0[0-1] _k[2] = _k[0]; _k[3] = _k[1]; _k[4] = _k[0]; _k[5] = _k[1]; _k[6] = _k[0]; _k[7] = _k[1]; int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vpadalq_s16(_sum, _tp0); tmpptr += 8; kptr += 2; } for (; q<inch; q++) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0], i1[0], i2[0], i3[0] int8x8_t _k = vld1_s8(kptr); // k[0][0] int16x8_t _r0_s16 = vmovl_s8(_r0); int16x8_t _k_s16 = vmovl_s8(_k); _sum = vmlal_lane_s16(_sum, vget_low_s16(_r0_s16), vget_low_s16(_k_s16), 0); // i0k0, i1k0, i2k0, i3k0 tmpptr += 4; kptr += 1; } // top_s32 -> top_f32 float32x4_t _sum_f32 = vcvtq_f32_s32(_sum); // top_f32 = top_f32 * scale_in _sum_f32 = vmulq_f32(_sum_f32, _scale_in); // top_f32 = top_f32 + bias _sum_f32 = vaddq_f32(_sum_f32, _bias0); // top_f32 = top_f32 * scale_out _sum_f32 = vmulq_f32(_sum_f32, _scale_out); // top_f32 -> top_s32 _sum = vcvtaq_s32_f32(_sum_f32); // top_s32 -> top_s16 int16x4_t _sum_s16 = vqmovn_s32(_sum); int16x8_t _sum_s16_tp = vcombine_s16(_sum_s16, _sum_s16); // top_s16 -> top_s8 int8x8_t _sum_s8 = vqmovn_s16(_sum_s16_tp); // save top_s8 vst1_s8(outptr0, _sum_s8); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int q=0; for (; q+1<inch; q=q+2) { sum0 += tmpptr[0] * kptr[0]; sum0 += tmpptr[1] * kptr[1]; sum1 += tmpptr[2] * kptr[0]; sum1 += tmpptr[3] * kptr[1]; sum2 += tmpptr[4] * kptr[0]; sum2 += tmpptr[5] * kptr[1]; sum3 += tmpptr[6] * kptr[0]; sum3 += tmpptr[7] * kptr[1]; tmpptr += 8; kptr += 2; } for (; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out); outptr0[1] = float2int8(((float)sum1 * scale_requant_in + bias0) * scale_requant_out); outptr0[2] = float2int8(((float)sum2 * scale_requant_in + bias0) * scale_requant_out); outptr0[3] = float2int8(((float)sum3 * scale_requant_in + bias0) * scale_requant_out); #endif outptr0 += 4; } for (; i<size; i++) { signed char* tmpptr = bottom_tm.channel(i/4 + i%4); const signed char* kptr = kernel.channel(p/4 + p%4); int q = 0; int sum0 = 0; for (; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out); outptr0++; } } } #else static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { const signed char* kernel = _kernel; kernel_tm.create(4*4, inch/4 + inch%4, outch/4 + outch%4, (size_t)1u); int p = 0; for (; p+3<outch; p+=4) { const signed char* kernel0 = kernel + (p+0)*inch; const signed char* kernel1 = kernel + (p+1)*inch; const signed char* kernel2 = kernel + (p+2)*inch; const signed char* kernel3 = kernel + (p+3)*inch; signed char* ktmp = kernel_tm.channel(p/4); for (int q=0; q<inch; q++) { // kernel0...3 0 ktmp[0] = kernel0[0]; ktmp[1] = kernel1[0]; ktmp[2] = kernel2[0]; ktmp[3] = kernel3[0]; ktmp += 4; kernel0 += 1; kernel1 += 1; kernel2 += 1; kernel3 += 1; } } for (; p<outch; p++) { const signed char* kernel0 = kernel + p*inch; signed char* ktmp = kernel_tm.channel(p/4 + p%4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[0]; ktmp++; kernel0++; } } } /* * Convolution 1x1 quantized with sgemm int8 */ static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; // interleave Mat tmp(8*4, inch/4+inch%4, size/8 + (size%8)/4 + size%4, 1u, opt.workspace_allocator); { int nn_size = size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 8; const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = tmp.channel(i/8); for (int q=0; q<inch; q++) { #if __ARM_NEON asm volatile( "pld [%0, #64] \n" "vld1.s8 {d0}, [%0] \n" "vst1.s8 {d0}, [%1]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "d0" ); img0 += bottom_blob.cstep; #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; tmpptr += 8; img0 += bottom_blob.cstep; #endif // __ARM_NEON__ } } nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = remain_size_start + ii * 4; const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = tmp.channel(i/8 + (i%8)/4); for (int q=0; q<inch; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<size; i++) { const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); for (int q=0; q<inch; q++) { tmpptr[0] = img0[0]; tmpptr++; img0 += bottom_blob.cstep; } } } // sgemm process int nn_outch = 0; int remain_outch_start = 0; nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* outptr0 = top_blob.channel(p); int* outptr1 = top_blob.channel(p+1); int* outptr2 = top_blob.channel(p+2); int* outptr3 = top_blob.channel(p+3); int i = 0; for (; i+7<size; i+=8) { const signed char* tmpptr = tmp.channel(i/8); const signed char* kptr = kernel.channel(p/4); #if __ARM_NEON asm volatile( // inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "vmov.s32 q10, #0 \n" "vmov.s32 q11, #0 \n" "vmov.s32 q12, #0 \n" "vmov.s32 q13, #0 \n" "lsr r4, %12, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4-d7}, [%4]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data) "vmovl.s8 q5, d7 \n"// a30-a37 "vmovl.s8 q4, d6 \n"// a20-a27 "vmovl.s8 q3, d5 \n"// a10-a17 "vmovl.s8 q2, d4 \n"// a00-a07 "vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch) "vmovl.s8 q1, d1 \n"// k02-k32,k03-k33 "vmovl.s8 q0, d0 \n"// k00-k30,k01-k31 "vmlal.s16 q6, d4, d0[0] \n"// sum0 = (a00-a07) * k00 "vmlal.s16 q7, d5, d0[0] \n" "vmlal.s16 q8, d4, d0[1] \n"// sum1 = (a00-a07) * k10 "vmlal.s16 q9, d5, d0[1] \n" "vmlal.s16 q10, d4, d0[2] \n"// sum2 = (a00-a07) * k20 "vmlal.s16 q11, d5, d0[2] \n" "vmlal.s16 q12, d4, d0[3] \n"// sum3 = (a00-a07) * k30 "vmlal.s16 q13, d5, d0[3] \n" "vmlal.s16 q6, d6, d1[0] \n"// sum0 += (a10-a17) * k01 "vmlal.s16 q7, d7, d1[0] \n" "vmlal.s16 q8, d6, d1[1] \n"// sum1 += (a10-a17) * k11 "vmlal.s16 q9, d7, d1[1] \n" "vmlal.s16 q10, d6, d1[2] \n"// sum2 += (a10-a17) * k21 "vmlal.s16 q11, d7, d1[2] \n" "vmlal.s16 q12, d6, d1[3] \n"// sum3 += (a10-a17) * k31 "vmlal.s16 q13, d7, d1[3] \n" "vmlal.s16 q6, d8, d2[0] \n"// sum0 += (a20-a27) * k02 "vmlal.s16 q7, d9, d2[0] \n" "vmlal.s16 q8, d8, d2[1] \n"// sum1 += (a20-a27) * k12 "vmlal.s16 q9, d9, d2[1] \n" "vmlal.s16 q10, d8, d2[2] \n"// sum2 += (a20-a27) * k22 "vmlal.s16 q11, d9, d2[2] \n" "vmlal.s16 q12, d8, d2[3] \n"// sum3 += (a20-a27) * k32 "vmlal.s16 q13, d9, d2[3] \n" "vmlal.s16 q6, d10, d3[0] \n"// sum0 += (a30-a37) * k03 "vmlal.s16 q7, d11, d3[0] \n" "vmlal.s16 q8, d10, d3[1] \n"// sum1 += (a30-a37) * k13 "vmlal.s16 q9, d11, d3[1] \n" "vmlal.s16 q10, d10, d3[2] \n"// sum2 += (a30-a37) * k23 "vmlal.s16 q11, d11, d3[2] \n" "vmlal.s16 q12, d10, d3[3] \n"// sum3 += (a30-a37) * k33 "vmlal.s16 q13, d11, d3[3] \n" "subs r4, r4, #1 \n" "bne 0b \n"// end for "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "vld1.s8 {d2}, [%4]! \n"// tmpr a00-a07 a(inch)(data) "vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "vmlal.s16 q8, d2, d0[1] \n"// sum1 += (a00-a07) * k10 "vmlal.s16 q9, d3, d0[1] \n" "vmlal.s16 q10, d2, d0[2] \n"// sum2 += (a00-a07) * k20 "vmlal.s16 q11, d3, d0[2] \n" "vmlal.s16 q12, d2, d0[3] \n"// sum3 += (a00-a07) * k30 "vmlal.s16 q13, d3, d0[3] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.s32 {d12-d15}, [%0]! \n" "vst1.s32 {d16-d19}, [%1]! \n" "vst1.s32 {d20-d23}, [%2]! \n" "vst1.s32 {d24-d27}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum0_4 = 0; int sum0_5 = 0; int sum0_6 = 0; int sum0_7 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum1_4 = 0; int sum1_5 = 0; int sum1_6 = 0; int sum1_7 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum2_4 = 0; int sum2_5 = 0; int sum2_6 = 0; int sum2_7 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int sum3_4 = 0; int sum3_5 = 0; int sum3_6 = 0; int sum3_7 = 0; for (int q=0; q<inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum0_4 += tmpptr[4] * kptr[0]; sum0_5 += tmpptr[5] * kptr[0]; sum0_6 += tmpptr[6] * kptr[0]; sum0_7 += tmpptr[7] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum1_4 += tmpptr[4] * kptr[1]; sum1_5 += tmpptr[5] * kptr[1]; sum1_6 += tmpptr[6] * kptr[1]; sum1_7 += tmpptr[7] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum2_4 += tmpptr[4] * kptr[2]; sum2_5 += tmpptr[5] * kptr[2]; sum2_6 += tmpptr[6] * kptr[2]; sum2_7 += tmpptr[7] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; sum3_4 += tmpptr[4] * kptr[3]; sum3_5 += tmpptr[5] * kptr[3]; sum3_6 += tmpptr[6] * kptr[3]; sum3_7 += tmpptr[7] * kptr[3]; tmpptr += 8; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr0[4] = sum0_4; outptr0[5] = sum0_5; outptr0[6] = sum0_6; outptr0[7] = sum0_7; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr1[4] = sum1_4; outptr1[5] = sum1_5; outptr1[6] = sum1_6; outptr1[7] = sum1_7; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr2[4] = sum2_4; outptr2[5] = sum2_5; outptr2[6] = sum2_6; outptr2[7] = sum2_7; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr3[4] = sum3_4; outptr3[5] = sum3_5; outptr3[6] = sum3_6; outptr3[7] = sum3_7; outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; #endif // __ARM_NEON } for (; i+3<size; i+=4) { const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4); const signed char* kptr = kernel.channel(p/4); #if __ARM_NEON asm volatile( // inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "lsr r4, %12, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4-d5}, [%4]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data) "vmovl.s8 q3, d5 \n"// a20-a23,a30-a33 "vmovl.s8 q2, d4 \n"// a00-a04,a10-a14 "vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch) "vmovl.s8 q1, d1 \n"// k02-k32,k03-k33 "vmovl.s8 q0, d0 \n"// k00-k30,k01-k31 "vmlal.s16 q6, d4, d0[0] \n"// sum0 = (a00-a03) * k00 "vmlal.s16 q7, d4, d0[1] \n"// sum1 = (a00-a03) * k10 "vmlal.s16 q8, d4, d0[2] \n"// sum2 = (a00-a03) * k20 "vmlal.s16 q9, d4, d0[3] \n"// sum3 = (a00-a03) * k30 "vmlal.s16 q6, d5, d1[0] \n"// sum0 += (a10-a13) * k01 "vmlal.s16 q7, d5, d1[1] \n"// sum1 += (a10-a13) * k11 "vmlal.s16 q8, d5, d1[2] \n"// sum2 += (a10-a13) * k21 "vmlal.s16 q9, d5, d1[3] \n"// sum3 += (a10-a13) * k31 "vmlal.s16 q6, d6, d2[0] \n"// sum0 += (a20-a23) * k02 "vmlal.s16 q7, d6, d2[1] \n"// sum1 += (a20-a23) * k12 "vmlal.s16 q8, d6, d2[2] \n"// sum2 += (a20-a23) * k22 "vmlal.s16 q9, d6, d2[3] \n"// sum3 += (a20-a23) * k32 "vmlal.s16 q6, d7, d3[0] \n"// sum0 += (a30-a33) * k03 "vmlal.s16 q7, d7, d3[1] \n"// sum1 += (a30-a33) * k13 "vmlal.s16 q8, d7, d3[2] \n"// sum2 += (a30-a33) * k23 "vmlal.s16 q9, d7, d3[3] \n"// sum3 += (a30-a33) * k33 "subs r4, r4, #1 \n" "bne 0b \n"// end for "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n"// tmpr a00-a03 a(inch)(data) "vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #4 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a03) * k00 "vmlal.s16 q7, d2, d0[1] \n"// sum1 += (a00-a03) * k10 "vmlal.s16 q8, d2, d0[2] \n"// sum2 += (a00-a03) * k20 "vmlal.s16 q9, d2, d0[3] \n"// sum3 += (a00-a03) * k30 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.s32 {d12-d13}, [%0]! \n" "vst1.s32 {d14-d15}, [%1]! \n" "vst1.s32 {d16-d17}, [%2]! \n" "vst1.s32 {d18-d19}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; for (int q=0; q<inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #endif // __ARM_NEON } for (; i<size; i++) { const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); const signed char* kptr = kernel.channel(p/4); #if __ARM_NEON asm volatile( // inch loop "veor q6, q6, q6 \n" "veor q7, q7, q7 \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "vmov.s32 q10, #0 \n" "lsr r4, %12, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4}, [%4] \n"// tmpr a00,a10,a20,a30 a(inch)(data) "add %4, #4 \n" "vmovl.s8 q2, d4 \n"// a00,a10,a20,a30 "vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch) "vmovl.s8 q1, d1 \n"// k02-k32,k03-k33 "vmovl.s8 q0, d0 \n"// k00-k30,k01-k31 "vmlal.s16 q6, d0, d4[0] \n"// (k00-k30) * a00 "vmlal.s16 q7, d1, d4[1] \n"// (k01-k31) * a10 "vmlal.s16 q8, d2, d4[2] \n"// (k02-k32) * a20 "vmlal.s16 q9, d3, d4[3] \n"// (k03-k33) * a30 "subs r4, r4, #1 \n" "bne 0b \n"// end for "vadd.s32 q6, q6, q7 \n" "vadd.s32 q9, q9, q8 \n" "vadd.s32 q10, q6, q9 \n" "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n"// tmpr a00 a(inch)(data) "vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #1 \n" "add %5, #4 \n" "vmlal.s16 q10, d0, d2[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.s32 {d20[0]}, [%0]! \n" "vst1.s32 {d20[1]}, [%1]! \n" "vst1.s32 {d21[0]}, [%2]! \n" "vst1.s32 {d21[1]}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q=0; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr++; kptr += 4; } outptr0[0] = sum0; outptr1[0] = sum1; outptr2[0] = sum2; outptr3[0] = sum3; outptr0++; outptr1++; outptr2++; outptr3++; #endif // __ARM_NEON } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out0 = top_blob.channel(p); int* outptr0 = out0; int i = 0; for (; i+7<size; i+=8) { const signed char* tmpptr = tmp.channel(i/8); const signed char* kptr = kernel.channel(p/4 + p%4); #if __ARM_NEON asm volatile( // inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "lsr r4, %6, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%1, #128] \n" "vld1.s8 {d4-d7}, [%1]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data) "vmovl.s8 q5, d7 \n"// a30-a37 "vmovl.s8 q4, d6 \n"// a20-a27 "vmovl.s8 q3, d5 \n"// a10-a17 "vmovl.s8 q2, d4 \n"// a00-a07 "vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch) "vmovl.s8 q0, d0 \n"// k00,k01,k02,k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n"// (a00-a07) * k00 "vmlal.s16 q7, d5, d0[0] \n" "vmlal.s16 q6, d6, d0[1] \n"// (a10-a17) * k01 "vmlal.s16 q7, d7, d0[1] \n" "vmlal.s16 q6, d8, d0[2] \n"// (a20-a27) * k02 "vmlal.s16 q7, d9, d0[2] \n" "vmlal.s16 q6, d10, d0[3] \n"// (a30-a37) * k03 "vmlal.s16 q7, d11, d0[3] \n" "subs r4, r4, #1 \n" "bne 0b \n"// end for "1: \n" // remain loop "and r4, %6, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "vld1.s8 {d2}, [%1]! \n"// tmpr a00-a07 a(inch)(data) "vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n"// (a00-a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.s32 {d12-d15}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int sum4 = 0; int sum5 = 0; int sum6 = 0; int sum7 = 0; for (int q=0; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; sum4 += tmpptr[4] * kptr[0]; sum5 += tmpptr[5] * kptr[0]; sum6 += tmpptr[6] * kptr[0]; sum7 += tmpptr[7] * kptr[0]; tmpptr += 8; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0[4] = sum4; outptr0[5] = sum5; outptr0[6] = sum6; outptr0[7] = sum7; outptr0 += 8; #endif // __ARM_NEON } for (; i+3<size; i+=4) { const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4); const signed char* kptr = kernel.channel(p/4 + p%4); #if __ARM_NEON asm volatile( // inch loop "vmov.s32 q6, #0 \n" "lsr r4, %6, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%2, #128] \n" "vld1.s8 {d4-d5}, [%1]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data) "vmovl.s8 q3, d5 \n"// a20-a23,a30-a33 "vmovl.s8 q2, d4 \n"// a00-a03,a10-a13 "vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch) "vmovl.s8 q0, d0 \n"// k00,k01,k02,k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n"// (a00-a03) * k00 "vmlal.s16 q6, d5, d0[1] \n"// (a10-a13) * k01 "vmlal.s16 q6, d6, d0[2] \n"// (a20-a23) * k02 "vmlal.s16 q6, d7, d0[3] \n"// (a30-a33) * k03 "subs r4, r4, #1 \n" "bne 0b \n"// end for "1: \n" // remain loop "and r4, %6, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "vld1.s8 {d2}, [%1] \n"// tmpr a00-a03 a(inch)(data) "vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %1, #4 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n"// (a00-a03) * k00 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.s32 {d12-d13}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q=0; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0 += 4; #endif // __ARM_NEON } for (; i<size; i++) { const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); const signed char* kptr = kernel.channel(p/4 + p%4); int q = 0; int sum0 = 0; for (; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = sum0; outptr0++; } } // // NOTE sgemm int8 // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // int* outptr0 = out0; // // for (int i=0; i<size; i++) // { // int sum = 0; // // const signed char* kptr = _kernel.channel(p/8 + p%8); // // for (int q=0; q<inch; q++) // { // const signed char* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } static void conv1x1s1_sgemm_int8_requant_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; const float* bias = _bias; // interleave Mat tmp(8*4, inch/4+inch%4, size/8 + (size%8)/4 + size%4, 1u, opt.workspace_allocator); { int nn_size = size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 8; const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = tmp.channel(i/8); for (int q=0; q<inch; q++) { #if __ARM_NEON asm volatile( "pld [%0, #64] \n" "vld1.s8 {d0}, [%0] \n" "vst1.s8 {d0}, [%1]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "d0" ); img0 += bottom_blob.cstep; #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; tmpptr += 8; img0 += bottom_blob.cstep; #endif // __ARM_NEON__ } } nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = remain_size_start + ii * 4; const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = tmp.channel(i/8 + (i%8)/4); for (int q=0; q<inch; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<size; i++) { const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); for (int q=0; q<inch; q++) { tmpptr[0] = img0[0]; tmpptr++; img0 += bottom_blob.cstep; } } } // sgemm process int nn_outch = 0; int remain_outch_start = 0; nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; signed char* outptr0 = top_blob.channel(p); signed char* outptr1 = top_blob.channel(p+1); signed char* outptr2 = top_blob.channel(p+2); signed char* outptr3 = top_blob.channel(p+3); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; const float bias2 = bias ? bias[p+2] : 0.f; const float bias3 = bias ? bias[p+3] : 0.f; const float scale_requant_in0 = scales_requant[2*p]; const float scale_requant_out0 = scales_requant[2*p+1]; const float scale_requant_in1 = scales_requant[2*(p+1)]; const float scale_requant_out1 = scales_requant[2*(p+1)+1]; const float scale_requant_in2 = scales_requant[2*(p+2)]; const float scale_requant_out2 = scales_requant[2*(p+2)+1]; const float scale_requant_in3 = scales_requant[2*(p+3)]; const float scale_requant_out3 = scales_requant[2*(p+3)+1]; float32x4_t _bias03, _scale_in03, _scale_out03; _bias03[0] = bias0; _bias03[1] = bias1; _bias03[2] = bias2; _bias03[3] = bias3; _scale_in03[0] = scale_requant_in0; _scale_in03[1] = scale_requant_in1; _scale_in03[2] = scale_requant_in2; _scale_in03[3] = scale_requant_in3; _scale_out03[0] = scale_requant_out0; _scale_out03[1] = scale_requant_out1; _scale_out03[2] = scale_requant_out2; _scale_out03[3] = scale_requant_out3; int i = 0; for (; i+7<size; i+=8) { const signed char* tmpptr = tmp.channel(i/8); const signed char* kptr = kernel.channel(p/4); #if __ARM_NEON asm volatile( // inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "vmov.s32 q10, #0 \n" "vmov.s32 q11, #0 \n" "vmov.s32 q12, #0 \n" "vmov.s32 q13, #0 \n" "lsr r4, %12, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d28-d31}, [%4]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data) "vmovl.s8 q5, d31 \n"// a30-a37 "vmovl.s8 q4, d30 \n"// a20-a27 "vmovl.s8 q15, d29 \n"// a10-a17 "vmovl.s8 q14, d28 \n"// a00-a07 "vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch) "vmovl.s8 q1, d1 \n"// k02-k32,k03-k33 "vmovl.s8 q0, d0 \n"// k00-k30,k01-k31 "vmlal.s16 q6, d28, d0[0] \n"// sum0 = (a00-a07) * k00 "vmlal.s16 q7, d29, d0[0] \n" "vmlal.s16 q8, d28, d0[1] \n"// sum1 = (a00-a07) * k10 "vmlal.s16 q9, d29, d0[1] \n" "vmlal.s16 q10, d28, d0[2] \n"// sum2 = (a00-a07) * k20 "vmlal.s16 q11, d29, d0[2] \n" "vmlal.s16 q12, d28, d0[3] \n"// sum3 = (a00-a07) * k30 "vmlal.s16 q13, d29, d0[3] \n" "vmlal.s16 q6, d30, d1[0] \n"// sum0 += (a10-a17) * k01 "vmlal.s16 q7, d31, d1[0] \n" "vmlal.s16 q8, d30, d1[1] \n"// sum1 += (a10-a17) * k11 "vmlal.s16 q9, d31, d1[1] \n" "vmlal.s16 q10, d30, d1[2] \n"// sum2 += (a10-a17) * k21 "vmlal.s16 q11, d31, d1[2] \n" "vmlal.s16 q12, d30, d1[3] \n"// sum3 += (a10-a17) * k31 "vmlal.s16 q13, d31, d1[3] \n" "vmlal.s16 q6, d8, d2[0] \n"// sum0 += (a20-a27) * k02 "vmlal.s16 q7, d9, d2[0] \n" "vmlal.s16 q8, d8, d2[1] \n"// sum1 += (a20-a27) * k12 "vmlal.s16 q9, d9, d2[1] \n" "vmlal.s16 q10, d8, d2[2] \n"// sum2 += (a20-a27) * k22 "vmlal.s16 q11, d9, d2[2] \n" "vmlal.s16 q12, d8, d2[3] \n"// sum3 += (a20-a27) * k32 "vmlal.s16 q13, d9, d2[3] \n" "vmlal.s16 q6, d10, d3[0] \n"// sum0 += (a30-a37) * k03 "vmlal.s16 q7, d11, d3[0] \n" "vmlal.s16 q8, d10, d3[1] \n"// sum1 += (a30-a37) * k13 "vmlal.s16 q9, d11, d3[1] \n" "vmlal.s16 q10, d10, d3[2] \n"// sum2 += (a30-a37) * k23 "vmlal.s16 q11, d11, d3[2] \n" "vmlal.s16 q12, d10, d3[3] \n"// sum3 += (a30-a37) * k33 "vmlal.s16 q13, d11, d3[3] \n" "subs r4, r4, #1 \n" "bne 0b \n"// end for "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "vld1.s8 {d2}, [%4]! \n"// tmpr a00-a07 a(inch)(data) "vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "vmlal.s16 q8, d2, d0[1] \n"// sum1 += (a00-a07) * k10 "vmlal.s16 q9, d3, d0[1] \n" "vmlal.s16 q10, d2, d0[2] \n"// sum2 += (a00-a07) * k20 "vmlal.s16 q11, d3, d0[2] \n" "vmlal.s16 q12, d2, d0[3] \n"// sum3 += (a00-a07) * k30 "vmlal.s16 q13, d3, d0[3] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory "vdup.f32 q14, %13 \n" // bias "vdup.f32 q15, %14 \n" // bias "vdup.f32 q4, %15 \n" // bias "vdup.f32 q5, %16 \n" // bias // sum0 // top_s32 -> top_f32 "vcvt.f32.s32 q6, q6 \n" "vcvt.f32.s32 q7, q7 \n" "vcvt.f32.s32 q8, q8 \n" "vcvt.f32.s32 q9, q9 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q6, q6, %e17[0] \n" "vmul.f32 q7, q7, %e17[0] \n" "vmul.f32 q8, q8, %e17[1] \n" "vmul.f32 q9, q9, %e17[1] \n" // top_f32 = top_f32 + bias "vadd.f32 q6, q6, q14 \n" "vadd.f32 q7, q7, q14 \n" "vadd.f32 q8, q8, q15 \n" "vadd.f32 q9, q9, q15 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q6, %e18[0] \n" "vmul.f32 q1, q7, %e18[0] \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32 -> top_s16 "vqmovn.s32 d12, q0 \n" "vqmovn.s32 d13, q1 \n" // top_s16 -> top_s8 "vqmovn.s16 d12, q6 \n" // save top_s8 "vst1.8 {d12}, [%0]! \n" // sum1 // top_f32 = top_f32 * scale_out "vmul.f32 q0, q8, %e18[1] \n" "vmul.f32 q1, q9, %e18[1] \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32 -> top_s16 "vqmovn.s32 d16, q0 \n" "vqmovn.s32 d17, q1 \n" // top_s16 -> top_s8 "vqmovn.s16 d16, q8 \n" // save top_s8 "vst1.8 {d16}, [%1]! \n" // sum2 // top_s32 -> top_f32 "vcvt.f32.s32 q10, q10 \n" "vcvt.f32.s32 q11, q11 \n" "vcvt.f32.s32 q12, q12 \n" "vcvt.f32.s32 q13, q13 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q10, q10, %f17[0] \n" "vmul.f32 q11, q11, %f17[0] \n" "vmul.f32 q12, q12, %f17[1] \n" "vmul.f32 q13, q13, %f17[1] \n" // top_f32 = top_f32 + bias "vadd.f32 q10, q10, q4 \n" "vadd.f32 q11, q11, q4 \n" "vadd.f32 q12, q12, q5 \n" "vadd.f32 q13, q13, q5 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q10, %f18[0] \n" "vmul.f32 q1, q11, %f18[0] \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32 -> top_s16 "vqmovn.s32 d20, q0 \n" "vqmovn.s32 d21, q1 \n" // top_s16 -> top_s8 "vqmovn.s16 d20, q10 \n" // save top_s8 "vst1.8 {d20}, [%2]! \n" // sum3 // top_f32 = top_f32 * scale_out "vmul.f32 q0, q12, %f18[1] \n" "vmul.f32 q1, q13, %f18[1] \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32 -> top_s16 "vqmovn.s32 d24, q0 \n" "vqmovn.s32 d25, q1 \n" // top_s16 -> top_s8 "vqmovn.s16 d24, q12 \n" // save top_s8 "vst1.8 {d24}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch), // %12 "r"(bias0), // %13 "r"(bias1), // %14 "r"(bias2), // %15 "r"(bias3), // %16 "w"(_scale_in03), // %17 "w"(_scale_out03) // %18 : "cc", "memory", "r4", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13" ,"q14" ,"q15" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum0_4 = 0; int sum0_5 = 0; int sum0_6 = 0; int sum0_7 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum1_4 = 0; int sum1_5 = 0; int sum1_6 = 0; int sum1_7 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum2_4 = 0; int sum2_5 = 0; int sum2_6 = 0; int sum2_7 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int sum3_4 = 0; int sum3_5 = 0; int sum3_6 = 0; int sum3_7 = 0; for (int q=0; q<inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum0_4 += tmpptr[4] * kptr[0]; sum0_5 += tmpptr[5] * kptr[0]; sum0_6 += tmpptr[6] * kptr[0]; sum0_7 += tmpptr[7] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum1_4 += tmpptr[4] * kptr[1]; sum1_5 += tmpptr[5] * kptr[1]; sum1_6 += tmpptr[6] * kptr[1]; sum1_7 += tmpptr[7] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum2_4 += tmpptr[4] * kptr[2]; sum2_5 += tmpptr[5] * kptr[2]; sum2_6 += tmpptr[6] * kptr[2]; sum2_7 += tmpptr[7] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; sum3_4 += tmpptr[4] * kptr[3]; sum3_5 += tmpptr[5] * kptr[3]; sum3_6 += tmpptr[6] * kptr[3]; sum3_7 += tmpptr[7] * kptr[3]; tmpptr += 8; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr0[4] = sum0_4; outptr0[5] = sum0_5; outptr0[6] = sum0_6; outptr0[7] = sum0_7; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr1[4] = sum1_4; outptr1[5] = sum1_5; outptr1[6] = sum1_6; outptr1[7] = sum1_7; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr2[4] = sum2_4; outptr2[5] = sum2_5; outptr2[6] = sum2_6; outptr2[7] = sum2_7; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr3[4] = sum3_4; outptr3[5] = sum3_5; outptr3[6] = sum3_6; outptr3[7] = sum3_7; outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; #endif // __ARM_NEON } for (; i+3<size; i+=4) { const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4); const signed char* kptr = kernel.channel(p/4); #if __ARM_NEON asm volatile( // inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "lsr r4, %12, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d28-d29}, [%4]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data) "vmovl.s8 q15, d29 \n"// a20-a23,a30-a33 "vmovl.s8 q14, d28 \n"// a00-a04,a10-a14 "vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch) "vmovl.s8 q1, d1 \n"// k02-k32,k03-k33 "vmovl.s8 q0, d0 \n"// k00-k30,k01-k31 "vmlal.s16 q6, d28, d0[0] \n"// sum0 = (a00-a03) * k00 "vmlal.s16 q7, d28, d0[1] \n"// sum1 = (a00-a03) * k10 "vmlal.s16 q8, d28, d0[2] \n"// sum2 = (a00-a03) * k20 "vmlal.s16 q9, d28, d0[3] \n"// sum3 = (a00-a03) * k30 "vmlal.s16 q6, d29, d1[0] \n"// sum0 += (a10-a13) * k01 "vmlal.s16 q7, d29, d1[1] \n"// sum1 += (a10-a13) * k11 "vmlal.s16 q8, d29, d1[2] \n"// sum2 += (a10-a13) * k21 "vmlal.s16 q9, d29, d1[3] \n"// sum3 += (a10-a13) * k31 "vmlal.s16 q6, d30, d2[0] \n"// sum0 += (a20-a23) * k02 "vmlal.s16 q7, d30, d2[1] \n"// sum1 += (a20-a23) * k12 "vmlal.s16 q8, d30, d2[2] \n"// sum2 += (a20-a23) * k22 "vmlal.s16 q9, d30, d2[3] \n"// sum3 += (a20-a23) * k32 "vmlal.s16 q6, d31, d3[0] \n"// sum0 += (a30-a33) * k03 "vmlal.s16 q7, d31, d3[1] \n"// sum1 += (a30-a33) * k13 "vmlal.s16 q8, d31, d3[2] \n"// sum2 += (a30-a33) * k23 "vmlal.s16 q9, d31, d3[3] \n"// sum3 += (a30-a33) * k33 "subs r4, r4, #1 \n" "bne 0b \n"// end for "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n"// tmpr a00-a03 a(inch)(data) "vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #4 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a03) * k00 "vmlal.s16 q7, d2, d0[1] \n"// sum1 += (a00-a03) * k10 "vmlal.s16 q8, d2, d0[2] \n"// sum2 += (a00-a03) * k20 "vmlal.s16 q9, d2, d0[3] \n"// sum3 += (a00-a03) * k30 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory "vdup.f32 q14, %13 \n" // bias "vdup.f32 q15, %14 \n" // bias "vdup.f32 q4, %15 \n" // bias "vdup.f32 q5, %16 \n" // bias // sum0-1 // top_s32 -> top_f32 "vcvt.f32.s32 q6, q6 \n" "vcvt.f32.s32 q7, q7 \n" "vcvt.f32.s32 q8, q8 \n" "vcvt.f32.s32 q9, q9 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q6, q6, %e17[0] \n" "vmul.f32 q7, q7, %e17[1] \n" "vmul.f32 q8, q8, %f17[0] \n" "vmul.f32 q9, q9, %f17[1] \n" // top_f32 = top_f32 + bias "vadd.f32 q6, q6, q14 \n" "vadd.f32 q7, q7, q15 \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q6, %e18[0] \n" "vmul.f32 q1, q7, %e18[1] \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32 -> top_s16 "vqmovn.s32 d12, q0 \n" "vqmovn.s32 d13, q1 \n" // top_s16 -> top_s8 "vqmovn.s16 d12, q6 \n" // save top_s8 "vst1.s32 {d12[0]}, [%0]! \n" "vst1.s32 {d12[1]}, [%1]! \n" // sum1-2 // top_f32 = top_f32 * scale_out "vmul.f32 q0, q8, %f18[0] \n" "vmul.f32 q1, q9, %f18[1] \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32 -> top_s16 "vqmovn.s32 d16, q0 \n" "vqmovn.s32 d17, q1 \n" // top_s16 -> top_s8 "vqmovn.s16 d16, q8 \n" // save top_s8 "vst1.s32 {d16[0]}, [%2]! \n" "vst1.s32 {d16[1]}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch), // %12 "r"(bias0), // %13 "r"(bias1), // %14 "r"(bias2), // %15 "r"(bias3), // %16 "w"(_scale_in03), // %17 "w"(_scale_out03) // %18 : "cc", "memory", "r4", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; for (int q=0; q<inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #endif // __ARM_NEON } for (; i<size; i++) { const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); const signed char* kptr = kernel.channel(p/4); #if __ARM_NEON asm volatile( // inch loop "veor q6, q6, q6 \n" "veor q7, q7, q7 \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "vmov.s32 q10, #0 \n" "lsr r4, %12, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4}, [%4] \n"// tmpr a00,a10,a20,a30 a(inch)(data) "add %4, #4 \n" "vmovl.s8 q2, d4 \n"// a00,a10,a20,a30 "vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch) "vmovl.s8 q1, d1 \n"// k02-k32,k03-k33 "vmovl.s8 q0, d0 \n"// k00-k30,k01-k31 "vmlal.s16 q6, d0, d4[0] \n"// (k00-k30) * a00 "vmlal.s16 q7, d1, d4[1] \n"// (k01-k31) * a10 "vmlal.s16 q8, d2, d4[2] \n"// (k02-k32) * a20 "vmlal.s16 q9, d3, d4[3] \n"// (k03-k33) * a30 "subs r4, r4, #1 \n" "bne 0b \n"// end for "vadd.s32 q6, q6, q7 \n" "vadd.s32 q9, q9, q8 \n" "vadd.s32 q10, q6, q9 \n" "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n"// tmpr a00 a(inch)(data) "vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #1 \n" "add %5, #4 \n" "vmlal.s16 q10, d0, d2[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory // top_s32 -> top_f32 "vcvt.f32.s32 q10, q10 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q10, q10, %q14 \n" // top_f32 = top_f32 + bias "vadd.f32 q10, q10, %q13 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q10, %q15 \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" // top_s32 -> top_s16 "vqmovn.s32 d12, q0 \n" // top_s16 -> top_s8 "vqmovn.s16 d12, q6 \n" // save top_s8 "vst1.8 {d12[0]}, [%0]! \n" "vst1.8 {d12[1]}, [%1]! \n" "vst1.8 {d12[2]}, [%2]! \n" "vst1.8 {d12[3]}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch), // %12 "w"(_bias03), // %13 "w"(_scale_in03), // %14 "w"(_scale_out03) // %15 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q=0; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr++; kptr += 4; } outptr0[0] = sum0; outptr1[0] = sum1; outptr2[0] = sum2; outptr3[0] = sum3; outptr0++; outptr1++; outptr2++; outptr3++; #endif // __ARM_NEON } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out0 = top_blob.channel(p); signed char* outptr0 = out0; const float bias0 = bias ? bias[p] : 0.f; const float scale_requant_in = scales_requant[2*p]; const float scale_requant_out = scales_requant[2*p+1]; float32x4_t _bias0 = vdupq_n_f32(bias0); float32x4_t _scale_in = vdupq_n_f32(scale_requant_in); float32x4_t _scale_out = vdupq_n_f32(scale_requant_out); int i = 0; for (; i+7<size; i+=8) { const signed char* tmpptr = tmp.channel(i/8); #if __ARM_NEON const signed char* kptr = kernel.channel(p/4 + p%4); #endif // __ARM_NEON #if __ARM_NEON asm volatile( // inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "lsr r4, %6, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%1, #128] \n" "vld1.s8 {d4-d7}, [%1]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data) "vmovl.s8 q5, d7 \n"// a30-a37 "vmovl.s8 q4, d6 \n"// a20-a27 "vmovl.s8 q3, d5 \n"// a10-a17 "vmovl.s8 q2, d4 \n"// a00-a07 "vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch) "vmovl.s8 q0, d0 \n"// k00,k01,k02,k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n"// (a00-a07) * k00 "vmlal.s16 q7, d5, d0[0] \n" "vmlal.s16 q6, d6, d0[1] \n"// (a10-a17) * k01 "vmlal.s16 q7, d7, d0[1] \n" "vmlal.s16 q6, d8, d0[2] \n"// (a20-a27) * k02 "vmlal.s16 q7, d9, d0[2] \n" "vmlal.s16 q6, d10, d0[3] \n"// (a30-a37) * k03 "vmlal.s16 q7, d11, d0[3] \n" "subs r4, r4, #1 \n" "bne 0b \n"// end for "1: \n" // remain loop "and r4, %6, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "vld1.s8 {d2}, [%1]! \n"// tmpr a00-a07 a(inch)(data) "vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n"// (a00-a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory // top_s32 -> top_f32 "vcvt.f32.s32 q6, q6 \n" "vcvt.f32.s32 q7, q7 \n" // top_f32 = top_f32 * scale_in "vmul.f32 q6, q6, %q8 \n" "vmul.f32 q7, q7, %q8 \n" // top_f32 = top_f32 + bias "vadd.f32 q6, q6, %q7 \n" "vadd.f32 q7, q7, %q7 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q6, %q9 \n" "vmul.f32 q1, q7, %q9 \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32 -> top_s16 "vqmovn.s32 d12, q0 \n" "vqmovn.s32 d13, q1 \n" // top_s16 -> top_s8 "vqmovn.s16 d12, q6 \n" // save top_s8 "vst1.8 {d12}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch), // %6 "w"(_bias0), // %7 "w"(_scale_in), // %8 "w"(_scale_out) // %9 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int sum4 = 0; int sum5 = 0; int sum6 = 0; int sum7 = 0; for (int q=0; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; sum4 += tmpptr[4] * kptr[0]; sum5 += tmpptr[5] * kptr[0]; sum6 += tmpptr[6] * kptr[0]; sum7 += tmpptr[7] * kptr[0]; tmpptr += 8; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0[4] = sum4; outptr0[5] = sum5; outptr0[6] = sum6; outptr0[7] = sum7; outptr0 += 8; #endif // __ARM_NEON } for (; i+3<size; i+=4) { const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4); #if __ARM_NEON const signed char* kptr = kernel.channel(p/4 + p%4); #endif // __ARM_NEON #if __ARM_NEON asm volatile( // inch loop "vmov.s32 q6, #0 \n" "lsr r4, %6, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%2, #128] \n" "vld1.s8 {d4-d5}, [%1]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data) "vmovl.s8 q3, d5 \n"// a20-a23,a30-a33 "vmovl.s8 q2, d4 \n"// a00-a03,a10-a13 "vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch) "vmovl.s8 q0, d0 \n"// k00,k01,k02,k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n"// (a00-a03) * k00 "vmlal.s16 q6, d5, d0[1] \n"// (a10-a13) * k01 "vmlal.s16 q6, d6, d0[2] \n"// (a20-a23) * k02 "vmlal.s16 q6, d7, d0[3] \n"// (a30-a33) * k03 "subs r4, r4, #1 \n" "bne 0b \n"// end for "1: \n" // remain loop "and r4, %6, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "vld1.s8 {d2}, [%1] \n"// tmpr a00-a03 a(inch)(data) "vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %1, #4 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n"// (a00-a03) * k00 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory // top_s32 -> top_f32 "vcvt.f32.s32 q6, q6 \n" // top_f32 = top_f32 * scale_in "vmul.f32 q6, q6, %q8 \n" // top_f32 = top_f32 + bias "vadd.f32 q6, q6, %q7 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q6, %q9 \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" // top_s32 -> top_s16 "vqmovn.s32 d12, q0 \n" // top_s16 -> top_s8 "vqmovn.s16 d12, q6 \n" "vst1.s32 {d12[0]}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch), // %6 "w"(_bias0), // %7 "w"(_scale_in), // %8 "w"(_scale_out) // %9 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q=0; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0 += 4; #endif // __ARM_NEON } for (; i<size; i++) { const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); #if __ARM_NEON const signed char* kptr = kernel.channel(p/4 + p%4); #endif // __ARM_NEON int q = 0; int sum0 = 0; for (; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = sum0; outptr0++; } } } #endif static void conv1x1s1_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int kernel_w = 1; int kernel_h = 1; int stride_w = 1; int stride_h = 1; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); } static void conv1x1s2_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int kernel_w = 1; int kernel_h = 1; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); }
// BUG1989 is pleased to support the open source community by supporting ncnn available. // //Copyright(C) 2019 BUG1989.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif /* // __ARM_NEON */ static inline signed char float2int8(float v) { int int32 = round(v); if (int32 > 127) return 127; if (int32 < -128) return -128; return (signed char)int32; } #if __aarch64__ static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat & _kernel, Mat & kernel_tm, int inch, int outch) { const signed char *kernel = _kernel; //kernel memory packed 4 x 4 kernel_tm.create(4 * 4, inch / 4 + inch % 4, outch / 4 + outch % 4, (size_t) 1u); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char *k0 = kernel + (p + 0) * inch; const signed char *k1 = kernel + (p + 1) * inch; const signed char *k2 = kernel + (p + 2) * inch; const signed char *k3 = kernel + (p + 3) * inch; signed char *ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } for (int p = remain_outch_start; p < outch; p++) { const signed char *k0 = kernel + (p + 0) * inch; signed char *ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } static void conv1x1s1_sgemm_int8_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & kernel, const Option & opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; //bottom_tm memory packed 4 x 4 ncnn: :Mat bottom_tm(4, inch, size / 4 + size % 4, (size_t) 1u, opt.workspace_allocator); { int nn_size = size >> 2; int remain_size_start = nn_size << 2; for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char *img0 = bottom_blob.channel(0); const signed char *img1 = bottom_blob.channel(1); img0 += i; img1 += i; signed char *tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img0[1]; tmpptr[3] = img1[1]; tmpptr[4] = img0[2]; tmpptr[5] = img1[2]; tmpptr[6] = img0[3]; tmpptr[7] = img1[3]; tmpptr += 8; img0 += bottom_blob.cstep; img0 += bottom_blob.cstep; img1 += bottom_blob.cstep; img1 += bottom_blob.cstep; } for (; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; } } for (int i = remain_size_start; i < size; i++) { const signed char *img0 = bottom_blob.channel(0); img0 += i; signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += bottom_blob.cstep; } } } //sgemm process int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; int *outptr0 = top_blob.channel(p); int *outptr1 = top_blob.channel(p + 1); int *outptr2 = top_blob.channel(p + 2); int *outptr3 = top_blob.channel(p + 3); int i = 0; for (; i + 3 < size; i += 4) { signed char *tmpptr = bottom_tm.channel(i / 4); const signed char *kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile ( "prfm pldl1keep, [%4, #128] \n" "prfm pldl1keep, [%5, #128] \n" "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "lsr w4, %w12, #2 \n" // r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" // for (; k + 3 < L; k = k + 4) "ld1 {v0.16b}, [%4] \n" // i0, i1, i2, i3 "ld1 {v4.16b}, [%5] \n" // k0, k1, k2, k3 "add %4, %4, #16 \n" "add %5, %5, #16 \n" "rev32 v1.8h, v0.8h \n" // i1, i0, i3, i2 "rev64 v2.4s, v0.4s \n" // i2, i3, i0, i1 "rev64 v3.8h, v0.8h \n" // i3, i2, i1, i0 "smull v8.8h, v4.8b, v0.8b \n" "smull v9.8h, v4.8b, v1.8b \n" "smull v10.8h, v4.8b, v2.8b \n" "smull v11.8h, v4.8b, v3.8b \n" "prfm pldl1keep, [%4, #1024] \n" "prfm pldl1keep, [%5, #1024] \n" "smlal2 v8.8h, v4.16b, v0.16b \n" "smlal2 v9.8h, v4.16b, v1.16b \n" "smlal2 v10.8h, v4.16b, v2.16b \n" "smlal2 v11.8h, v4.16b, v3.16b \n" "sadalp v16.4s, v8.8h \n" // i0k0, i1k1, i2k2, i3k3 "sadalp v17.4s, v9.8h \n" // i1k0, i0k1, i3k2, i2k3 "sadalp v18.4s, v10.8h \n" // i2k0, i3k1, i0k2, i1k3 "sadalp v19.4s, v11.8h \n" // i3k0, i2k1, i1k2, i0k3 "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // for (; k + 1 < L; k = k + 2) // remain loop "and w4, %w12, #3 \n" // w4 = remain = K & 3; "cmp w4, #0 \n" "beq 3f \n" "lsr w4, w4, #1 \n" // r4 = nn = L >> 1 "cmp w4, #0 \n" "beq 3f \n" "2: \n" // for (; k + 1 < L; k = k + 2) "ld1 {v0.8b}, [%4] \n" // i0, i1, i2, i3 "ld1 {v4.8b}, [%5] \n" // k0, k1, k2, k3 "add %4, %4, #8 \n" "add %5, %5, #8 \n" "rev32 v1.4h, v0.4h \n" // i2, i3, i0, i1 "rev64 v2.2s, v0.2s \n" // i1, i0, i3, i2 "rev64 v3.4h, v0.4h \n" // i0, i1, i2, i3 "smull v8.8h, v4.8b, v0.8b \n" "smull v9.8h, v4.8b, v1.8b \n" "smull v10.8h, v4.8b, v2.8b \n" "smull v11.8h, v4.8b, v3.8b \n" "sadalp v16.4s, v8.8h \n" "sadalp v17.4s, v9.8h \n" "sadalp v18.4s,v10.8h \n" "sadalp v19.4s,v11.8h \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" // realloc "mov v20.s[0], v16.s[0] \n" "mov v20.s[1], v17.s[0] \n" "mov v20.s[2], v18.s[0] \n" "mov v20.s[3], v19.s[0] \n" "mov v21.s[0], v17.s[1] \n" "mov v21.s[1], v16.s[1] \n" "mov v21.s[2], v19.s[1] \n" "mov v21.s[3], v18.s[1] \n" "mov v22.s[0], v18.s[2] \n" "mov v22.s[1], v19.s[2] \n" "mov v22.s[2], v16.s[2] \n" "mov v22.s[3], v17.s[2] \n" "mov v23.s[0], v19.s[3] \n" "mov v23.s[1], v18.s[3] \n" "mov v23.s[2], v17.s[3] \n" "mov v23.s[3], v16.s[3] \n" "and w4, %w12, #1 \n" // w4 = remain = K & 1; "cmp w4, #0 \n" "beq 5f \n" "4: \n" "ld1 {v0.8b}, [%4] \n" "ld1 {v1.8b}, [%5] \n" "add %4, %4, #4 \n" "add %5, %5, #4 \n" "sshll v0.8h, v0.8b, #0 \n" // i0[0], i1[0], i2[0], i3[0] "sshll v1.8h, v1.8b, #0 \n" // k0[0], k1[0], k2[0], k3[0] "smlal v20.4s, v0.4h, v1.h[0] \n" // i0k0, i1k0, i2k0, i3k0 "smlal v21.4s, v0.4h, v1.h[1] \n" // i0k1, i1k1, i2k1, i3k1 "smlal v22.4s, v0.4h, v1.h[2] \n" // i0k2, i1k2, i2k2, i3k2 "smlal v23.4s, v0.4h, v1.h[3] \n" // i0k3, i1k3, i2k3, i3k3 "subs w4, w4, #1 \n" "bne 2b \n" "5: \n" "st1 {v20.4s}, [%0] \n" "st1 {v21.4s}, [%1] \n" "st1 {v22.4s}, [%2] \n" "st1 {v23.4s}, [%3] \n" : "=r"(outptr0), //%0 "=r"(outptr1), //%1 "=r"(outptr2), //%2 "=r"(outptr3), //%3 "=r"(tmpptr), //%4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0_0 += tmpptr[0] * kptr[0]; sum0_0 += tmpptr[1] * kptr[1]; sum0_1 += tmpptr[2] * kptr[0]; sum0_1 += tmpptr[3] * kptr[1]; sum0_2 += tmpptr[4] * kptr[0]; sum0_2 += tmpptr[5] * kptr[1]; sum0_3 += tmpptr[6] * kptr[0]; sum0_3 += tmpptr[7] * kptr[1]; sum1_0 += tmpptr[0] * kptr[2]; sum1_0 += tmpptr[1] * kptr[3]; sum1_1 += tmpptr[2] * kptr[2]; sum1_1 += tmpptr[3] * kptr[3]; sum1_2 += tmpptr[4] * kptr[2]; sum1_2 += tmpptr[5] * kptr[3]; sum1_3 += tmpptr[6] * kptr[2]; sum1_3 += tmpptr[7] * kptr[3]; sum2_0 += tmpptr[0] * kptr[4]; sum2_0 += tmpptr[1] * kptr[5]; sum2_1 += tmpptr[2] * kptr[4]; sum2_1 += tmpptr[3] * kptr[5]; sum2_2 += tmpptr[4] * kptr[4]; sum2_2 += tmpptr[5] * kptr[5]; sum2_3 += tmpptr[6] * kptr[4]; sum2_3 += tmpptr[7] * kptr[5]; sum3_0 += tmpptr[0] * kptr[6]; sum3_0 += tmpptr[1] * kptr[7]; sum3_1 += tmpptr[2] * kptr[6]; sum3_1 += tmpptr[3] * kptr[7]; sum3_2 += tmpptr[4] * kptr[6]; sum3_2 += tmpptr[5] * kptr[7]; sum3_3 += tmpptr[6] * kptr[6]; sum3_3 += tmpptr[7] * kptr[7]; tmpptr += 8; kptr += 8; } for (; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; #endif outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; } for (; i < size; i++) { signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); const signed char *kptr = kernel.channel(p / 4); #if __ARM_NEON int32x4_t _sum = vdupq_n_s32(0); int q = 0; for (; q + 3 < inch; q = q + 4) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0 - 3] int8x8x2_t _k = vld2_s8(kptr); //k0[0 - 1], k1[0 - 1], k2[0 - 1], k3[0 - 1]; k0[2 - 3], k1[2 - 3], k2[2 - 3], k3[2 - 3] int16x8_t _r0_s16 = vmovl_s8(_r0); //i0[0], i0[1], i0[2], i0[3] int16x8_t _k02_s16 = vmovl_s8(_k.val[0]); //k0[0], k1[0], k2[0], k3[0], k0[2], k1[2], k2[2], k3[2] int16x8_t _k13_s16 = vmovl_s8(_k.val[1]); //k0[1], k1[1], k2[1], k3[1], k0[3], k1[3], k2[3], k3[3] _sum = vmlal_lane_s16(_sum, vget_low_s16(_k02_s16), vget_low_s16(_r0_s16), 0); //i0[0] * k[0 - 3][0] _sum = vmlal_lane_s16(_sum, vget_low_s16(_k13_s16), vget_low_s16(_r0_s16), 1); //i0[1] * k[0 - 3][1] _sum = vmlal_lane_s16(_sum, vget_high_s16(_k02_s16), vget_low_s16(_r0_s16), 2); //i0[2] * k[0 - 3][2] _sum = vmlal_lane_s16(_sum, vget_high_s16(_k13_s16), vget_low_s16(_r0_s16), 3); //i0[3] * k[0 - 3][3] tmpptr += 4; kptr += 16; } for (; q + 1 < inch; q = q + 2) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0 - 3] int8x8_t _k = vld1_s8(kptr); //k0[0 - 1], k1[0 - 1], k2[0 - 1], k3[0 - 1] _r0[2] = _r0[0]; _r0[3] = _r0[1]; _r0[4] = _r0[0]; _r0[5] = _r0[1]; _r0[6] = _r0[0]; _r0[7] = _r0[1]; int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vpadalq_s16(_sum, _tp0); tmpptr += 2; kptr += 8; } for (; q < inch; q++) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0 - 3] int8x8_t _k = vld1_s8(kptr); //k[0 - 3][0] int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vaddw_s16(_sum, vget_low_s16(_tp0)); tmpptr += 1; kptr += 4; } vst1q_lane_s32(outptr0, _sum, 0); vst1q_lane_s32(outptr1, _sum, 1); vst1q_lane_s32(outptr2, _sum, 2); vst1q_lane_s32(outptr3, _sum, 3); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0 += tmpptr[0] * kptr[0]; sum0 += tmpptr[1] * kptr[1]; sum1 += tmpptr[0] * kptr[2]; sum1 += tmpptr[1] * kptr[3]; sum2 += tmpptr[0] * kptr[4]; sum2 += tmpptr[1] * kptr[5]; sum3 += tmpptr[0] * kptr[6]; sum3 += tmpptr[1] * kptr[7]; tmpptr += 2; kptr += 8; } for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr += 1; kptr += 4; } outptr0[0] = sum0; outptr1[0] = sum1; outptr2[0] = sum2; outptr3[0] = sum3; #endif outptr0++; outptr1++; outptr2++; outptr3++; } } for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); int *outptr0 = out0; int i = 0; for (; i + 3 < size; i += 4) { signed char *tmpptr = bottom_tm.channel(i / 4); const signed char *kptr = kernel.channel(p / 4 + p % 4); #if __ARM_NEON int32x4_t _sum = vdupq_n_s32(0); int q = 0; for (; q + 1 < inch; q = q + 2) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0 - 1], i1[0 - 1], i2[0 - 1], i3[0 - 1] int8x8_t _k = vld1_s8(kptr); //k0[0 - 1] _k[2] = _k[0]; _k[3] = _k[1]; _k[4] = _k[0]; _k[5] = _k[1]; _k[6] = _k[0]; _k[7] = _k[1]; int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vpadalq_s16(_sum, _tp0); tmpptr += 8; kptr += 2; } for (; q < inch; q++) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0], i1[0], i2[0], i3[0] int8x8_t _k = vld1_s8(kptr); //k[0][0] int16x8_t _r0_s16 = vmovl_s8(_r0); int16x8_t _k_s16 = vmovl_s8(_k); _sum = vmlal_lane_s16(_sum, vget_low_s16(_r0_s16), vget_low_s16(_k_s16), 0); //i0k0, i1k0, i2k0, i3k0 tmpptr += 4; kptr += 1; } vst1q_s32(outptr0, _sum); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0 += tmpptr[0] * kptr[0]; sum0 += tmpptr[1] * kptr[1]; sum1 += tmpptr[2] * kptr[0]; sum1 += tmpptr[3] * kptr[1]; sum2 += tmpptr[4] * kptr[0]; sum2 += tmpptr[5] * kptr[1]; sum3 += tmpptr[6] * kptr[0]; sum3 += tmpptr[7] * kptr[1]; tmpptr += 8; kptr += 2; } for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; #endif outptr0 += 4; } for (; i < size; i++) { signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); const signed char *kptr = kernel.channel(p / 4 + p % 4); int q = 0; int sum0 = 0; for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = sum0; outptr0++; } } } static void conv1x1s1_sgemm_int8_requant_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & kernel, const Mat & _bias, std: :vector < float >scales_requant, const Option & opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; const float *bias = _bias; //bottom_tm memory packed 4 x 4 ncnn: :Mat bottom_tm(4, inch, size / 4 + size % 4, (size_t) 1u, opt.workspace_allocator); { int nn_size = size >> 2; int remain_size_start = nn_size << 2; for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char *img0 = bottom_blob.channel(0); const signed char *img1 = bottom_blob.channel(1); img0 += i; img1 += i; signed char *tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img0[1]; tmpptr[3] = img1[1]; tmpptr[4] = img0[2]; tmpptr[5] = img1[2]; tmpptr[6] = img0[3]; tmpptr[7] = img1[3]; tmpptr += 8; img0 += bottom_blob.cstep; img0 += bottom_blob.cstep; img1 += bottom_blob.cstep; img1 += bottom_blob.cstep; } for (; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; } } for (int i = remain_size_start; i < size; i++) { const signed char *img0 = bottom_blob.channel(0); img0 += i; signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += bottom_blob.cstep; } } } //sgemm process int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; signed char *outptr0 = top_blob.channel(p); signed char *outptr1 = top_blob.channel(p + 1); signed char *outptr2 = top_blob.channel(p + 2); signed char *outptr3 = top_blob.channel(p + 3); const float bias0 = bias ? bias[p] : 0. f; const float bias1 = bias ? bias[p + 1] : 0. f; const float bias2 = bias ? bias[p + 2] : 0. f; const float bias3 = bias ? bias[p + 3] : 0. f; const float scale_requant_in0 = scales_requant[2 * p]; const float scale_requant_out0 = scales_requant[2 * p + 1]; const float scale_requant_in1 = scales_requant[2 * (p + 1)]; const float scale_requant_out1 = scales_requant[2 * (p + 1) + 1]; const float scale_requant_in2 = scales_requant[2 * (p + 2)]; const float scale_requant_out2 = scales_requant[2 * (p + 2) + 1]; const float scale_requant_in3 = scales_requant[2 * (p + 3)]; const float scale_requant_out3 = scales_requant[2 * (p + 3) + 1]; float32x4_t _bias03, _scale_in03, _scale_out03; float32x4_t _bias0 = vdupq_n_f32(bias0); float32x4_t _bias1 = vdupq_n_f32(bias1); float32x4_t _bias2 = vdupq_n_f32(bias2); float32x4_t _bias3 = vdupq_n_f32(bias3); _bias03[0] = bias0; _bias03[1] = bias1; _bias03[2] = bias2; _bias03[3] = bias3; _scale_in03[0] = scale_requant_in0; _scale_in03[1] = scale_requant_in1; _scale_in03[2] = scale_requant_in2; _scale_in03[3] = scale_requant_in3; _scale_out03[0] = scale_requant_out0; _scale_out03[1] = scale_requant_out1; _scale_out03[2] = scale_requant_out2; _scale_out03[3] = scale_requant_out3; int i = 0; for (; i + 3 < size; i += 4) { signed char *tmpptr = bottom_tm.channel(i / 4); const signed char *kptr = kernel.channel(p / 4); #if 1 //__ARM_NEON asm volatile ( "prfm pldl1keep, [%4, #128] \n" "prfm pldl1keep, [%5, #128] \n" "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "lsr w4, %w12, #2 \n" // r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" // for (; k + 3 < L; k = k + 4) "ld1 {v0.16b}, [%4] \n" // i0, i1, i2, i3 "ld1 {v4.16b}, [%5] \n" // k0, k1, k2, k3 "add %4, %4, #16 \n" "add %5, %5, #16 \n" "rev32 v1.8h, v0.8h \n" // i1, i0, i3, i2 "rev64 v2.4s, v0.4s \n" // i2, i3, i0, i1 "rev64 v3.8h, v0.8h \n" // i3, i2, i1, i0 "smull v8.8h, v4.8b, v0.8b \n" "smull v9.8h, v4.8b, v1.8b \n" "smull v10.8h, v4.8b, v2.8b \n" "smull v11.8h, v4.8b, v3.8b \n" "prfm pldl1keep, [%4, #1024] \n" "prfm pldl1keep, [%5, #1024] \n" "smlal2 v8.8h, v4.16b, v0.16b \n" "smlal2 v9.8h, v4.16b, v1.16b \n" "smlal2 v10.8h, v4.16b, v2.16b \n" "smlal2 v11.8h, v4.16b, v3.16b \n" "sadalp v16.4s, v8.8h \n" // i0k0, i1k1, i2k2, i3k3 "sadalp v17.4s, v9.8h \n" // i1k0, i0k1, i3k2, i2k3 "sadalp v18.4s, v10.8h \n" // i2k0, i3k1, i0k2, i1k3 "sadalp v19.4s, v11.8h \n" // i3k0, i2k1, i1k2, i0k3 "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // for (; k + 1 < L; k = k + 2) // remain loop "and w4, %w12, #3 \n" // w4 = remain = K & 3; "cmp w4, #0 \n" "beq 3f \n" "lsr w4, w4, #1 \n" // r4 = nn = L >> 1 "cmp w4, #0 \n" "beq 3f \n" "2: \n" // for (; k + 1 < L; k = k + 2) "ld1 {v0.8b}, [%4] \n" // i0, i1, i2, i3 "ld1 {v4.8b}, [%5] \n" // k0, k1, k2, k3 "add %4, %4, #8 \n" "add %5, %5, #8 \n" "rev32 v1.4h, v0.4h \n" // i2, i3, i0, i1 "rev64 v2.2s, v0.2s \n" // i1, i0, i3, i2 "rev64 v3.4h, v0.4h \n" // i0, i1, i2, i3 "smull v8.8h, v4.8b, v0.8b \n" "smull v9.8h, v4.8b, v1.8b \n" "smull v10.8h, v4.8b, v2.8b \n" "smull v11.8h, v4.8b, v3.8b \n" "sadalp v16.4s, v8.8h \n" "sadalp v17.4s, v9.8h \n" "sadalp v18.4s,v10.8h \n" "sadalp v19.4s,v11.8h \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" // realloc "mov v20.s[0], v16.s[0] \n" "mov v20.s[1], v17.s[0] \n" "mov v20.s[2], v18.s[0] \n" "mov v20.s[3], v19.s[0] \n" "mov v21.s[0], v17.s[1] \n" "mov v21.s[1], v16.s[1] \n" "mov v21.s[2], v19.s[1] \n" "mov v21.s[3], v18.s[1] \n" "mov v22.s[0], v18.s[2] \n" "mov v22.s[1], v19.s[2] \n" "mov v22.s[2], v16.s[2] \n" "mov v22.s[3], v17.s[2] \n" "mov v23.s[0], v19.s[3] \n" "mov v23.s[1], v18.s[3] \n" "mov v23.s[2], v17.s[3] \n" "mov v23.s[3], v16.s[3] \n" "and w4, %w12, #1 \n" // w4 = remain = K & 1; "cmp w4, #0 \n" "beq 5f \n" "4: \n" "ld1 {v0.8b}, [%4] \n" "ld1 {v1.8b}, [%5] \n" "add %4, %4, #4 \n" "add %5, %5, #4 \n" "sshll v0.8h, v0.8b, #0 \n" // i0[0], i1[0], i2[0], i3[0] "sshll v1.8h, v1.8b, #0 \n" // k0[0], k1[0], k2[0], k3[0] "smlal v20.4s, v0.4h, v1.h[0] \n" // i0k0, i1k0, i2k0, i3k0 "smlal v21.4s, v0.4h, v1.h[1] \n" // i0k1, i1k1, i2k1, i3k1 "smlal v22.4s, v0.4h, v1.h[2] \n" // i0k2, i1k2, i2k2, i3k2 "smlal v23.4s, v0.4h, v1.h[3] \n" // i0k3, i1k3, i2k3, i3k3 "subs w4, w4, #1 \n" "bne 2b \n" "5: \n" // top_s32->top_f32 "scvtf v20.4s, v20.4s \n" "scvtf v21.4s, v21.4s \n" "scvtf v22.4s, v22.4s \n" "scvtf v23.4s, v23.4s \n" // top_f32 = top_f32 * scale_in "fmul v20.4s, v20.4s, %17.s[0] \n" "fmul v21.4s, v21.4s, %17.s[1] \n" "fmul v22.4s, v22.4s, %17.s[2] \n" "fmul v23.4s, v23.4s, %17.s[3] \n" // top_f32 = top_f32 + bias "fadd v20.4s, v20.4s, %13.4s \n" "fadd v21.4s, v21.4s, %14.4s \n" "fadd v22.4s, v22.4s, %15.4s \n" "fadd v23.4s, v23.4s, %16.4s \n" // top_f32 = top_f32 * scale_out "fmul v20.4s, v20.4s, %18.s[0] \n" "fmul v21.4s, v21.4s, %18.s[1] \n" "fmul v22.4s, v22.4s, %18.s[2] \n" "fmul v23.4s, v23.4s, %18.s[3] \n" // top_f32->top_s32 "fcvtas v20.4s, v20.4s \n" "fcvtas v21.4s, v21.4s \n" "fcvtas v22.4s, v22.4s \n" "fcvtas v23.4s, v23.4s \n" // top_s32->top_s16 "sqxtn v7.4h, v20.4s \n" "sqxtn2 v7.8h, v21.4s \n" "sqxtn v8.4h, v22.4s \n" "sqxtn2 v8.8h, v23.4s \n" // top_s16->top_s8 "sqxtn v0.8b, v7.8h \n" "sqxtn v1.8b, v8.8h \n" // save top_s8 "st1 {v0.s}[0], [%0] \n" "st1 {v0.s}[1], [%1] \n" "st1 {v1.s}[0], [%2] \n" "st1 {v1.s}[1], [%3] \n" : "=r"(outptr0), //%0 "=r"(outptr1), //%1 "=r"(outptr2), //%2 "=r"(outptr3), //%3 "=r"(tmpptr), //%4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch), //%12 "w"(_bias0), //%13 "w"(_bias1), //%14 "w"(_bias2), //%15 "w"(_bias3), //%16 "w"(_scale_in03), //%17 "w"(_scale_out03) // %18 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0_0 += tmpptr[0] * kptr[0]; sum0_0 += tmpptr[1] * kptr[1]; sum0_1 += tmpptr[2] * kptr[0]; sum0_1 += tmpptr[3] * kptr[1]; sum0_2 += tmpptr[4] * kptr[0]; sum0_2 += tmpptr[5] * kptr[1]; sum0_3 += tmpptr[6] * kptr[0]; sum0_3 += tmpptr[7] * kptr[1]; sum1_0 += tmpptr[0] * kptr[2]; sum1_0 += tmpptr[1] * kptr[3]; sum1_1 += tmpptr[2] * kptr[2]; sum1_1 += tmpptr[3] * kptr[3]; sum1_2 += tmpptr[4] * kptr[2]; sum1_2 += tmpptr[5] * kptr[3]; sum1_3 += tmpptr[6] * kptr[2]; sum1_3 += tmpptr[7] * kptr[3]; sum2_0 += tmpptr[0] * kptr[4]; sum2_0 += tmpptr[1] * kptr[5]; sum2_1 += tmpptr[2] * kptr[4]; sum2_1 += tmpptr[3] * kptr[5]; sum2_2 += tmpptr[4] * kptr[4]; sum2_2 += tmpptr[5] * kptr[5]; sum2_3 += tmpptr[6] * kptr[4]; sum2_3 += tmpptr[7] * kptr[5]; sum3_0 += tmpptr[0] * kptr[6]; sum3_0 += tmpptr[1] * kptr[7]; sum3_1 += tmpptr[2] * kptr[6]; sum3_1 += tmpptr[3] * kptr[7]; sum3_2 += tmpptr[4] * kptr[6]; sum3_2 += tmpptr[5] * kptr[7]; sum3_3 += tmpptr[6] * kptr[6]; sum3_3 += tmpptr[7] * kptr[7]; tmpptr += 8; kptr += 8; } for (; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = float2int8(((float)sum0_0 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[1] = float2int8(((float)sum0_1 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[2] = float2int8(((float)sum0_2 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[3] = float2int8(((float)sum0_3 * scale_requant_in0 + bias0) * scale_requant_out0); outptr1[0] = float2int8(((float)sum1_0 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[1] = float2int8(((float)sum1_1 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[2] = float2int8(((float)sum1_2 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[3] = float2int8(((float)sum1_3 * scale_requant_in1 + bias1) * scale_requant_out1); outptr2[0] = float2int8(((float)sum2_0 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[1] = float2int8(((float)sum2_1 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[2] = float2int8(((float)sum2_2 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[3] = float2int8(((float)sum2_3 * scale_requant_in2 + bias2) * scale_requant_out2); outptr3[0] = float2int8(((float)sum3_0 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[1] = float2int8(((float)sum3_1 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[2] = float2int8(((float)sum3_2 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[3] = float2int8(((float)sum3_3 * scale_requant_in3 + bias3) * scale_requant_out3); #endif outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; } for (; i < size; i++) { signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); const signed char *kptr = kernel.channel(p / 4); #if 1 //__ARM_NEON int32x4_t _sum = vdupq_n_s32(0); int q = 0; for (; q + 3 < inch; q = q + 4) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0 - 3] int8x8x2_t _k = vld2_s8(kptr); //k0[0 - 1], k1[0 - 1], k2[0 - 1], k3[0 - 1]; k0[2 - 3], k1[2 - 3], k2[2 - 3], k3[2 - 3] int16x8_t _r0_s16 = vmovl_s8(_r0); //i0[0], i0[1], i0[2], i0[3] int16x8_t _k02_s16 = vmovl_s8(_k.val[0]); //k0[0], k1[0], k2[0], k3[0], k0[2], k1[2], k2[2], k3[2] int16x8_t _k13_s16 = vmovl_s8(_k.val[1]); //k0[1], k1[1], k2[1], k3[1], k0[3], k1[3], k2[3], k3[3] _sum = vmlal_lane_s16(_sum, vget_low_s16(_k02_s16), vget_low_s16(_r0_s16), 0); //i0[0] * k[0 - 3][0] _sum = vmlal_lane_s16(_sum, vget_low_s16(_k13_s16), vget_low_s16(_r0_s16), 1); //i0[1] * k[0 - 3][1] _sum = vmlal_lane_s16(_sum, vget_high_s16(_k02_s16), vget_low_s16(_r0_s16), 2); //i0[2] * k[0 - 3][2] _sum = vmlal_lane_s16(_sum, vget_high_s16(_k13_s16), vget_low_s16(_r0_s16), 3); //i0[3] * k[0 - 3][3] tmpptr += 4; kptr += 16; } for (; q + 1 < inch; q = q + 2) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0 - 3] int8x8_t _k = vld1_s8(kptr); //k0[0 - 1], k1[0 - 1], k2[0 - 1], k3[0 - 1] _r0[2] = _r0[0]; _r0[3] = _r0[1]; _r0[4] = _r0[0]; _r0[5] = _r0[1]; _r0[6] = _r0[0]; _r0[7] = _r0[1]; int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vpadalq_s16(_sum, _tp0); tmpptr += 2; kptr += 8; } for (; q < inch; q++) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0 - 3] int8x8_t _k = vld1_s8(kptr); //k[0 - 3][0] int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vaddw_s16(_sum, vget_low_s16(_tp0)); tmpptr += 1; kptr += 4; } //top_s32->top_f32 float32x4_t _sum_f32 = vcvtq_f32_s32(_sum); //top_f32 = top_f32 * scale_in _sum_f32 = vmulq_f32(_sum_f32, _scale_in03); //top_f32 = top_f32 + bias _sum_f32 = vaddq_f32(_sum_f32, _bias03); //top_f32 = top_f32 * scale_out _sum_f32 = vmulq_f32(_sum_f32, _scale_out03); //top_f32->top_s32 _sum = vcvtaq_s32_f32(_sum_f32); //top_s32->top_s16 int16x4_t _sum_s16 = vqmovn_s32(_sum); int16x8_t _sum_s16_tp = vcombine_s16(_sum_s16, _sum_s16); //top_s16->top_s8 int8x8_t _sum_s8 = vqmovn_s16(_sum_s16_tp); //save top_s8 vst1_lane_s8(outptr0, _sum_s8, 0); vst1_lane_s8(outptr1, _sum_s8, 1); vst1_lane_s8(outptr2, _sum_s8, 2); vst1_lane_s8(outptr3, _sum_s8, 3); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0 += tmpptr[0] * kptr[0]; sum0 += tmpptr[1] * kptr[1]; sum1 += tmpptr[0] * kptr[2]; sum1 += tmpptr[1] * kptr[3]; sum2 += tmpptr[0] * kptr[4]; sum2 += tmpptr[1] * kptr[5]; sum3 += tmpptr[0] * kptr[6]; sum3 += tmpptr[1] * kptr[7]; tmpptr += 2; kptr += 8; } for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr += 1; kptr += 4; } outptr0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0); outptr1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1); outptr2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2); outptr3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3); #endif outptr0++; outptr1++; outptr2++; outptr3++; } } for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); signed char *outptr0 = out0; const float bias0 = bias ? bias[p] : 0. f; const float scale_requant_in = scales_requant[2 * p]; const float scale_requant_out = scales_requant[2 * p + 1]; float32x4_t _bias0 = vdupq_n_f32(bias0); float32x4_t _scale_in = vdupq_n_f32(scale_requant_in); float32x4_t _scale_out = vdupq_n_f32(scale_requant_out); int i = 0; for (; i + 3 < size; i += 4) { signed char *tmpptr = bottom_tm.channel(i / 4); const signed char *kptr = kernel.channel(p / 4 + p % 4); #if 1 //__ARM_NEON int32x4_t _sum = vdupq_n_s32(0); int q = 0; for (; q + 1 < inch; q = q + 2) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0 - 1], i1[0 - 1], i2[0 - 1], i3[0 - 1] int8x8_t _k = vld1_s8(kptr); //k0[0 - 1] _k[2] = _k[0]; _k[3] = _k[1]; _k[4] = _k[0]; _k[5] = _k[1]; _k[6] = _k[0]; _k[7] = _k[1]; int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vpadalq_s16(_sum, _tp0); tmpptr += 8; kptr += 2; } for (; q < inch; q++) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0], i1[0], i2[0], i3[0] int8x8_t _k = vld1_s8(kptr); //k[0][0] int16x8_t _r0_s16 = vmovl_s8(_r0); int16x8_t _k_s16 = vmovl_s8(_k); _sum = vmlal_lane_s16(_sum, vget_low_s16(_r0_s16), vget_low_s16(_k_s16), 0); //i0k0, i1k0, i2k0, i3k0 tmpptr += 4; kptr += 1; } //top_s32->top_f32 float32x4_t _sum_f32 = vcvtq_f32_s32(_sum); //top_f32 = top_f32 * scale_in _sum_f32 = vmulq_f32(_sum_f32, _scale_in); //top_f32 = top_f32 + bias _sum_f32 = vaddq_f32(_sum_f32, _bias0); //top_f32 = top_f32 * scale_out _sum_f32 = vmulq_f32(_sum_f32, _scale_out); //top_f32->top_s32 _sum = vcvtaq_s32_f32(_sum_f32); //top_s32->top_s16 int16x4_t _sum_s16 = vqmovn_s32(_sum); int16x8_t _sum_s16_tp = vcombine_s16(_sum_s16, _sum_s16); //top_s16->top_s8 int8x8_t _sum_s8 = vqmovn_s16(_sum_s16_tp); //save top_s8 vst1_s8(outptr0, _sum_s8); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0 += tmpptr[0] * kptr[0]; sum0 += tmpptr[1] * kptr[1]; sum1 += tmpptr[2] * kptr[0]; sum1 += tmpptr[3] * kptr[1]; sum2 += tmpptr[4] * kptr[0]; sum2 += tmpptr[5] * kptr[1]; sum3 += tmpptr[6] * kptr[0]; sum3 += tmpptr[7] * kptr[1]; tmpptr += 8; kptr += 2; } for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out); outptr0[1] = float2int8(((float)sum1 * scale_requant_in + bias0) * scale_requant_out); outptr0[2] = float2int8(((float)sum2 * scale_requant_in + bias0) * scale_requant_out); outptr0[3] = float2int8(((float)sum3 * scale_requant_in + bias0) * scale_requant_out); #endif outptr0 += 4; } for (; i < size; i++) { signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); const signed char *kptr = kernel.channel(p / 4 + p % 4); int q = 0; int sum0 = 0; for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out); outptr0++; } } } #else static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat & _kernel, Mat & kernel_tm, int inch, int outch) { const signed char *kernel = _kernel; kernel_tm.create(4 * 4, inch / 4 + inch % 4, outch / 4 + outch % 4, (size_t) 1u); int p = 0; for (; p + 3 < outch; p += 4) { const signed char *kernel0 = kernel + (p + 0) * inch; const signed char *kernel1 = kernel + (p + 1) * inch; const signed char *kernel2 = kernel + (p + 2) * inch; const signed char *kernel3 = kernel + (p + 3) * inch; signed char *ktmp = kernel_tm.channel(p / 4); for (int q = 0; q < inch; q++) { //kernel0...3 0 ktmp[0] = kernel0[0]; ktmp[1] = kernel1[0]; ktmp[2] = kernel2[0]; ktmp[3] = kernel3[0]; ktmp += 4; kernel0 += 1; kernel1 += 1; kernel2 += 1; kernel3 += 1; } } for (; p < outch; p++) { const signed char *kernel0 = kernel + p * inch; signed char *ktmp = kernel_tm.channel(p / 4 + p % 4); for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[0]; ktmp++; kernel0++; } } } /* * Convolution 1x1 quantized with sgemm int8 */ static void conv1x1s1_sgemm_int8_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & kernel, const Option & opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; //interleave Mat tmp(8 * 4, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + size % 4, 1u, opt.workspace_allocator); { int nn_size = size >> 3; int remain_size_start = nn_size << 3; for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const signed char *img0 = bottom_blob.channel(0); img0 += i; signed char *tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { #if __ARM_NEON asm volatile ( "pld [%0, #64] \n" "vld1.s8 {d0}, [%0] \n" "vst1.s8 {d0}, [%1]! \n" : "=r" (img0), //%0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "d0" ); img0 += bottom_blob.cstep; #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; tmpptr += 8; img0 += bottom_blob.cstep; #endif /* // __ARM_NEON__ */ } } nn_size = (size - remain_size_start) >> 2; for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const signed char *img0 = bottom_blob.channel(0); img0 += i; signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; } } remain_size_start += nn_size << 2; for (int i = remain_size_start; i < size; i++) { const signed char *img0 = bottom_blob.channel(0); img0 += i; signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr++; img0 += bottom_blob.cstep; } } } //sgemm process int nn_outch = 0; int remain_outch_start = 0; nn_outch = (outch - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; int *outptr0 = top_blob.channel(p); int *outptr1 = top_blob.channel(p + 1); int *outptr2 = top_blob.channel(p + 2); int *outptr3 = top_blob.channel(p + 3); int i = 0; for (; i + 7 < size; i += 8) { const signed char *tmpptr = tmp.channel(i / 8); const signed char *kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile ( //inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "vmov.s32 q10, #0 \n" "vmov.s32 q11, #0 \n" "vmov.s32 q12, #0 \n" "vmov.s32 q13, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4-d7}, [%4]! \n" // tmpr a00 - a07, a10 - a17, a20 - a27, a30 - a37 a(inch) (data) "vmovl.s8 q5, d7 \n" // a30 - a37 "vmovl.s8 q4, d6 \n" // a20 - a27 "vmovl.s8 q3, d5 \n" // a10 - a17 "vmovl.s8 q2, d4 \n" // a00 - a07 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00 - k30, k01 - k31, k02 - k32, k03 - k33 k(outch) (inch) "vmovl.s8 q1, d1 \n" // k02 - k32, k03 - k33 "vmovl.s8 q0, d0 \n" // k00 - k30, k01 - k31 "vmlal.s16 q6, d4, d0[0] \n" // sum0 = (a00 - a07) * k00 "vmlal.s16 q7, d5, d0[0] \n" "vmlal.s16 q8, d4, d0[1] \n" // sum1 = (a00 - a07) * k10 "vmlal.s16 q9, d5, d0[1] \n" "vmlal.s16 q10, d4, d0[2] \n" // sum2 = (a00 - a07) * k20 "vmlal.s16 q11, d5, d0[2] \n" "vmlal.s16 q12, d4, d0[3] \n" // sum3 = (a00 - a07) * k30 "vmlal.s16 q13, d5, d0[3] \n" "vmlal.s16 q6, d6, d1[0] \n" // sum0 += (a10 - a17) * k01 "vmlal.s16 q7, d7, d1[0] \n" "vmlal.s16 q8, d6, d1[1] \n" // sum1 += (a10 - a17) * k11 "vmlal.s16 q9, d7, d1[1] \n" "vmlal.s16 q10, d6, d1[2] \n" // sum2 += (a10 - a17) * k21 "vmlal.s16 q11, d7, d1[2] \n" "vmlal.s16 q12, d6, d1[3] \n" // sum3 += (a10 - a17) * k31 "vmlal.s16 q13, d7, d1[3] \n" "vmlal.s16 q6, d8, d2[0] \n" // sum0 += (a20 - a27) * k02 "vmlal.s16 q7, d9, d2[0] \n" "vmlal.s16 q8, d8, d2[1] \n" // sum1 += (a20 - a27) * k12 "vmlal.s16 q9, d9, d2[1] \n" "vmlal.s16 q10, d8, d2[2] \n" // sum2 += (a20 - a27) * k22 "vmlal.s16 q11, d9, d2[2] \n" "vmlal.s16 q12, d8, d2[3] \n" // sum3 += (a20 - a27) * k32 "vmlal.s16 q13, d9, d2[3] \n" "vmlal.s16 q6, d10, d3[0] \n" // sum0 += (a30 - a37) * k03 "vmlal.s16 q7, d11, d3[0] \n" "vmlal.s16 q8, d10, d3[1] \n" // sum1 += (a30 - a37) * k13 "vmlal.s16 q9, d11, d3[1] \n" "vmlal.s16 q10, d10, d3[2] \n" // sum2 += (a30 - a37) * k23 "vmlal.s16 q11, d11, d3[2] \n" "vmlal.s16 q12, d10, d3[3] \n" // sum3 += (a30 - a37) * k33 "vmlal.s16 q13, d11, d3[3] \n" "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%4]! \n" // tmpr a00 - a07 a(inch) (data) "vld1.s8 {d0}, [%5] \n" // kptr k00 - k30 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n" // sum0 += (a00 - a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "vmlal.s16 q8, d2, d0[1] \n" // sum1 += (a00 - a07) * k10 "vmlal.s16 q9, d3, d0[1] \n" "vmlal.s16 q10, d2, d0[2] \n" // sum2 += (a00 - a07) * k20 "vmlal.s16 q11, d3, d0[2] \n" "vmlal.s16 q12, d2, d0[3] \n" // sum3 += (a00 - a07) * k30 "vmlal.s16 q13, d3, d0[3] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vst1.s32 {d12-d15}, [%0]! \n" "vst1.s32 {d16-d19}, [%1]! \n" "vst1.s32 {d20-d23}, [%2]! \n" "vst1.s32 {d24-d27}, [%3]! \n" : "=r"(outptr0), //%0 "=r"(outptr1), //%1 "=r"(outptr2), //%2 "=r"(outptr3), //%3 "=r"(tmpptr), //%4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum0_4 = 0; int sum0_5 = 0; int sum0_6 = 0; int sum0_7 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum1_4 = 0; int sum1_5 = 0; int sum1_6 = 0; int sum1_7 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum2_4 = 0; int sum2_5 = 0; int sum2_6 = 0; int sum2_7 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int sum3_4 = 0; int sum3_5 = 0; int sum3_6 = 0; int sum3_7 = 0; for (int q = 0; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum0_4 += tmpptr[4] * kptr[0]; sum0_5 += tmpptr[5] * kptr[0]; sum0_6 += tmpptr[6] * kptr[0]; sum0_7 += tmpptr[7] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum1_4 += tmpptr[4] * kptr[1]; sum1_5 += tmpptr[5] * kptr[1]; sum1_6 += tmpptr[6] * kptr[1]; sum1_7 += tmpptr[7] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum2_4 += tmpptr[4] * kptr[2]; sum2_5 += tmpptr[5] * kptr[2]; sum2_6 += tmpptr[6] * kptr[2]; sum2_7 += tmpptr[7] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; sum3_4 += tmpptr[4] * kptr[3]; sum3_5 += tmpptr[5] * kptr[3]; sum3_6 += tmpptr[6] * kptr[3]; sum3_7 += tmpptr[7] * kptr[3]; tmpptr += 8; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr0[4] = sum0_4; outptr0[5] = sum0_5; outptr0[6] = sum0_6; outptr0[7] = sum0_7; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr1[4] = sum1_4; outptr1[5] = sum1_5; outptr1[6] = sum1_6; outptr1[7] = sum1_7; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr2[4] = sum2_4; outptr2[5] = sum2_5; outptr2[6] = sum2_6; outptr2[7] = sum2_7; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr3[4] = sum3_4; outptr3[5] = sum3_5; outptr3[6] = sum3_6; outptr3[7] = sum3_7; outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; #endif /* // __ARM_NEON */ } for (; i + 3 < size; i += 4) { const signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const signed char *kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile ( //inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4-d5}, [%4]! \n" // tmpr a00 - a03, a10 - a13, a20 - a23, a30 - a33 a(inch) (data) "vmovl.s8 q3, d5 \n" // a20 - a23, a30 - a33 "vmovl.s8 q2, d4 \n" // a00 - a04, a10 - a14 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00 - k30, k01 - k31, k02 - k32, k03 - k33 k(outch) (inch) "vmovl.s8 q1, d1 \n" // k02 - k32, k03 - k33 "vmovl.s8 q0, d0 \n" // k00 - k30, k01 - k31 "vmlal.s16 q6, d4, d0[0] \n" // sum0 = (a00 - a03) * k00 "vmlal.s16 q7, d4, d0[1] \n" // sum1 = (a00 - a03) * k10 "vmlal.s16 q8, d4, d0[2] \n" // sum2 = (a00 - a03) * k20 "vmlal.s16 q9, d4, d0[3] \n" // sum3 = (a00 - a03) * k30 "vmlal.s16 q6, d5, d1[0] \n" // sum0 += (a10 - a13) * k01 "vmlal.s16 q7, d5, d1[1] \n" // sum1 += (a10 - a13) * k11 "vmlal.s16 q8, d5, d1[2] \n" // sum2 += (a10 - a13) * k21 "vmlal.s16 q9, d5, d1[3] \n" // sum3 += (a10 - a13) * k31 "vmlal.s16 q6, d6, d2[0] \n" // sum0 += (a20 - a23) * k02 "vmlal.s16 q7, d6, d2[1] \n" // sum1 += (a20 - a23) * k12 "vmlal.s16 q8, d6, d2[2] \n" // sum2 += (a20 - a23) * k22 "vmlal.s16 q9, d6, d2[3] \n" // sum3 += (a20 - a23) * k32 "vmlal.s16 q6, d7, d3[0] \n" // sum0 += (a30 - a33) * k03 "vmlal.s16 q7, d7, d3[1] \n" // sum1 += (a30 - a33) * k13 "vmlal.s16 q8, d7, d3[2] \n" // sum2 += (a30 - a33) * k23 "vmlal.s16 q9, d7, d3[3] \n" // sum3 += (a30 - a33) * k33 "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n" // tmpr a00 - a03 a(inch) (data) "vld1.s8 {d0}, [%5] \n" // kptr k00 - k30 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #4 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n" // sum0 += (a00 - a03) * k00 "vmlal.s16 q7, d2, d0[1] \n" // sum1 += (a00 - a03) * k10 "vmlal.s16 q8, d2, d0[2] \n" // sum2 += (a00 - a03) * k20 "vmlal.s16 q9, d2, d0[3] \n" // sum3 += (a00 - a03) * k30 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vst1.s32 {d12-d13}, [%0]! \n" "vst1.s32 {d14-d15}, [%1]! \n" "vst1.s32 {d16-d17}, [%2]! \n" "vst1.s32 {d18-d19}, [%3]! \n" : "=r"(outptr0), //%0 "=r"(outptr1), //%1 "=r"(outptr2), //%2 "=r"(outptr3), //%3 "=r"(tmpptr), //%4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; for (int q = 0; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #endif /* // __ARM_NEON */ } for (; i < size; i++) { const signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const signed char *kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile ( //inch loop "veor q6, q6, q6 \n" "veor q7, q7, q7 \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "vmov.s32 q10, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4}, [%4] \n" // tmpr a00, a10, a20, a30 a(inch) (data) "add %4, #4 \n" "vmovl.s8 q2, d4 \n" // a00, a10, a20, a30 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00 - k30, k01 - k31, k02 - k32, k03 - k33 k(outch) (inch) "vmovl.s8 q1, d1 \n" // k02 - k32, k03 - k33 "vmovl.s8 q0, d0 \n" // k00 - k30, k01 - k31 "vmlal.s16 q6, d0, d4[0] \n" // (k00 - k30) * a00 "vmlal.s16 q7, d1, d4[1] \n" // (k01 - k31) * a10 "vmlal.s16 q8, d2, d4[2] \n" // (k02 - k32) * a20 "vmlal.s16 q9, d3, d4[3] \n" // (k03 - k33) * a30 "subs r4, r4, #1 \n" "bne 0b \n" // end for "vadd.s32 q6, q6, q7 \n" "vadd.s32 q9, q9, q8 \n" "vadd.s32 q10, q6, q9 \n" "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n" // tmpr a00 a(inch) (data) "vld1.s8 {d0}, [%5] \n" // kptr k00 - k30 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #1 \n" "add %5, #4 \n" "vmlal.s16 q10, d0, d2[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vst1.s32 {d20[0]}, [%0]! \n" "vst1.s32 {d20[1]}, [%1]! \n" "vst1.s32 {d21[0]}, [%2]! \n" "vst1.s32 {d21[1]}, [%3]! \n" : "=r"(outptr0), //%0 "=r"(outptr1), //%1 "=r"(outptr2), //%2 "=r"(outptr3), //%3 "=r"(tmpptr), //%4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr++; kptr += 4; } outptr0[0] = sum0; outptr1[0] = sum1; outptr2[0] = sum2; outptr3[0] = sum3; outptr0++; outptr1++; outptr2++; outptr3++; #endif /* // __ARM_NEON */ } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); int *outptr0 = out0; int i = 0; for (; i + 7 < size; i += 8) { const signed char *tmpptr = tmp.channel(i / 8); const signed char *kptr = kernel.channel(p / 4 + p % 4); #if __ARM_NEON asm volatile ( //inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "lsr r4, %6, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%1, #128] \n" "vld1.s8 {d4-d7}, [%1]! \n" // tmpr a00 - a07, a10 - a17, a20 - a27, a30 - a37 a(inch) (data) "vmovl.s8 q5, d7 \n" // a30 - a37 "vmovl.s8 q4, d6 \n" // a20 - a27 "vmovl.s8 q3, d5 \n" // a10 - a17 "vmovl.s8 q2, d4 \n" // a00 - a07 "vld1.s8 {d0}, [%2] \n" // kptr k00, k01, k02, k03 k(outch) (inch) "vmovl.s8 q0, d0 \n" // k00, k01, k02, k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n" // (a00 - a07) * k00 "vmlal.s16 q7, d5, d0[0] \n" "vmlal.s16 q6, d6, d0[1] \n" // (a10 - a17) * k01 "vmlal.s16 q7, d7, d0[1] \n" "vmlal.s16 q6, d8, d0[2] \n" // (a20 - a27) * k02 "vmlal.s16 q7, d9, d0[2] \n" "vmlal.s16 q6, d10, d0[3] \n" // (a30 - a37) * k03 "vmlal.s16 q7, d11, d0[3] \n" "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %6, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%1]! \n" // tmpr a00 - a07 a(inch) (data) "vld1.s8 {d0}, [%2] \n" // kptr k00 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n" // (a00 - a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vst1.s32 {d12-d15}, [%0]! \n" : "=r"(outptr0), //%0 "=r"(tmpptr), //%1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int sum4 = 0; int sum5 = 0; int sum6 = 0; int sum7 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; sum4 += tmpptr[4] * kptr[0]; sum5 += tmpptr[5] * kptr[0]; sum6 += tmpptr[6] * kptr[0]; sum7 += tmpptr[7] * kptr[0]; tmpptr += 8; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0[4] = sum4; outptr0[5] = sum5; outptr0[6] = sum6; outptr0[7] = sum7; outptr0 += 8; #endif /* // __ARM_NEON */ } for (; i + 3 < size; i += 4) { const signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const signed char *kptr = kernel.channel(p / 4 + p % 4); #if __ARM_NEON asm volatile ( //inch loop "vmov.s32 q6, #0 \n" "lsr r4, %6, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%2, #128] \n" "vld1.s8 {d4-d5}, [%1]! \n" // tmpr a00 - a03, a10 - a13, a20 - a23, a30 - a33 a(inch) (data) "vmovl.s8 q3, d5 \n" // a20 - a23, a30 - a33 "vmovl.s8 q2, d4 \n" // a00 - a03, a10 - a13 "vld1.s8 {d0}, [%2] \n" // kptr k00, k01, k02, k03 k(outch) (inch) "vmovl.s8 q0, d0 \n" // k00, k01, k02, k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n" // (a00 - a03) * k00 "vmlal.s16 q6, d5, d0[1] \n" // (a10 - a13) * k01 "vmlal.s16 q6, d6, d0[2] \n" // (a20 - a23) * k02 "vmlal.s16 q6, d7, d0[3] \n" // (a30 - a33) * k03 "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %6, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%1] \n" // tmpr a00 - a03 a(inch) (data) "vld1.s8 {d0}, [%2] \n" // kptr k00 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %1, #4 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n" // (a00 - a03) * k00 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vst1.s32 {d12-d13}, [%0]! \n" : "=r"(outptr0), //%0 "=r"(tmpptr), //%1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0 += 4; #endif /* // __ARM_NEON */ } for (; i < size; i++) { const signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const signed char *kptr = kernel.channel(p / 4 + p % 4); int q = 0; int sum0 = 0; for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = sum0; outptr0++; } } ////NOTE sgemm int8 // for (; p < outch; p++) // { //Mat out0 = top_blob.channel(p); // //int *outptr0 = out0; // //for (int i = 0; i < size; i++) // { //int sum = 0; // //const signed char *kptr = _kernel.channel(p / 8 + p % 8); // //for (int q = 0; q < inch; q++) // { //const signed char *img0 = bottom_blob.channel(q); // //sum += img0[i] * kptr[0]; //kptr++; // } // //outptr0[i] = sum; // } // } } static void conv1x1s1_sgemm_int8_requant_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & kernel, const Mat & _bias, std: :vector < float >scales_requant, const Option & opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; const float *bias = _bias; //interleave Mat tmp(8 * 4, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + size % 4, 1u, opt.workspace_allocator); { int nn_size = size >> 3; int remain_size_start = nn_size << 3; for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const signed char *img0 = bottom_blob.channel(0); img0 += i; signed char *tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { #if __ARM_NEON asm volatile ( "pld [%0, #64] \n" "vld1.s8 {d0}, [%0] \n" "vst1.s8 {d0}, [%1]! \n" : "=r" (img0), //%0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "d0" ); img0 += bottom_blob.cstep; #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; tmpptr += 8; img0 += bottom_blob.cstep; #endif /* // __ARM_NEON__ */ } } nn_size = (size - remain_size_start) >> 2; for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const signed char *img0 = bottom_blob.channel(0); img0 += i; signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; } } remain_size_start += nn_size << 2; for (int i = remain_size_start; i < size; i++) { const signed char *img0 = bottom_blob.channel(0); img0 += i; signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr++; img0 += bottom_blob.cstep; } } } //sgemm process int nn_outch = 0; int remain_outch_start = 0; nn_outch = (outch - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; signed char *outptr0 = top_blob.channel(p); signed char *outptr1 = top_blob.channel(p + 1); signed char *outptr2 = top_blob.channel(p + 2); signed char *outptr3 = top_blob.channel(p + 3); const float bias0 = bias ? bias[p] : 0. f; const float bias1 = bias ? bias[p + 1] : 0. f; const float bias2 = bias ? bias[p + 2] : 0. f; const float bias3 = bias ? bias[p + 3] : 0. f; const float scale_requant_in0 = scales_requant[2 * p]; const float scale_requant_out0 = scales_requant[2 * p + 1]; const float scale_requant_in1 = scales_requant[2 * (p + 1)]; const float scale_requant_out1 = scales_requant[2 * (p + 1) + 1]; const float scale_requant_in2 = scales_requant[2 * (p + 2)]; const float scale_requant_out2 = scales_requant[2 * (p + 2) + 1]; const float scale_requant_in3 = scales_requant[2 * (p + 3)]; const float scale_requant_out3 = scales_requant[2 * (p + 3) + 1]; float32x4_t _bias03, _scale_in03, _scale_out03; _bias03[0] = bias0; _bias03[1] = bias1; _bias03[2] = bias2; _bias03[3] = bias3; _scale_in03[0] = scale_requant_in0; _scale_in03[1] = scale_requant_in1; _scale_in03[2] = scale_requant_in2; _scale_in03[3] = scale_requant_in3; _scale_out03[0] = scale_requant_out0; _scale_out03[1] = scale_requant_out1; _scale_out03[2] = scale_requant_out2; _scale_out03[3] = scale_requant_out3; int i = 0; for (; i + 7 < size; i += 8) { const signed char *tmpptr = tmp.channel(i / 8); const signed char *kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile ( //inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "vmov.s32 q10, #0 \n" "vmov.s32 q11, #0 \n" "vmov.s32 q12, #0 \n" "vmov.s32 q13, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d28-d31}, [%4]! \n" // tmpr a00 - a07, a10 - a17, a20 - a27, a30 - a37 a(inch) (data) "vmovl.s8 q5, d31 \n" // a30 - a37 "vmovl.s8 q4, d30 \n" // a20 - a27 "vmovl.s8 q15, d29 \n" // a10 - a17 "vmovl.s8 q14, d28 \n" // a00 - a07 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00 - k30, k01 - k31, k02 - k32, k03 - k33 k(outch) (inch) "vmovl.s8 q1, d1 \n" // k02 - k32, k03 - k33 "vmovl.s8 q0, d0 \n" // k00 - k30, k01 - k31 "vmlal.s16 q6, d28, d0[0] \n" // sum0 = (a00 - a07) * k00 "vmlal.s16 q7, d29, d0[0] \n" "vmlal.s16 q8, d28, d0[1] \n" // sum1 = (a00 - a07) * k10 "vmlal.s16 q9, d29, d0[1] \n" "vmlal.s16 q10, d28, d0[2] \n" // sum2 = (a00 - a07) * k20 "vmlal.s16 q11, d29, d0[2] \n" "vmlal.s16 q12, d28, d0[3] \n" // sum3 = (a00 - a07) * k30 "vmlal.s16 q13, d29, d0[3] \n" "vmlal.s16 q6, d30, d1[0] \n" // sum0 += (a10 - a17) * k01 "vmlal.s16 q7, d31, d1[0] \n" "vmlal.s16 q8, d30, d1[1] \n" // sum1 += (a10 - a17) * k11 "vmlal.s16 q9, d31, d1[1] \n" "vmlal.s16 q10, d30, d1[2] \n" // sum2 += (a10 - a17) * k21 "vmlal.s16 q11, d31, d1[2] \n" "vmlal.s16 q12, d30, d1[3] \n" // sum3 += (a10 - a17) * k31 "vmlal.s16 q13, d31, d1[3] \n" "vmlal.s16 q6, d8, d2[0] \n" // sum0 += (a20 - a27) * k02 "vmlal.s16 q7, d9, d2[0] \n" "vmlal.s16 q8, d8, d2[1] \n" // sum1 += (a20 - a27) * k12 "vmlal.s16 q9, d9, d2[1] \n" "vmlal.s16 q10, d8, d2[2] \n" // sum2 += (a20 - a27) * k22 "vmlal.s16 q11, d9, d2[2] \n" "vmlal.s16 q12, d8, d2[3] \n" // sum3 += (a20 - a27) * k32 "vmlal.s16 q13, d9, d2[3] \n" "vmlal.s16 q6, d10, d3[0] \n" // sum0 += (a30 - a37) * k03 "vmlal.s16 q7, d11, d3[0] \n" "vmlal.s16 q8, d10, d3[1] \n" // sum1 += (a30 - a37) * k13 "vmlal.s16 q9, d11, d3[1] \n" "vmlal.s16 q10, d10, d3[2] \n" // sum2 += (a30 - a37) * k23 "vmlal.s16 q11, d11, d3[2] \n" "vmlal.s16 q12, d10, d3[3] \n" // sum3 += (a30 - a37) * k33 "vmlal.s16 q13, d11, d3[3] \n" "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%4]! \n" // tmpr a00 - a07 a(inch) (data) "vld1.s8 {d0}, [%5] \n" // kptr k00 - k30 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n" // sum0 += (a00 - a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "vmlal.s16 q8, d2, d0[1] \n" // sum1 += (a00 - a07) * k10 "vmlal.s16 q9, d3, d0[1] \n" "vmlal.s16 q10, d2, d0[2] \n" // sum2 += (a00 - a07) * k20 "vmlal.s16 q11, d3, d0[2] \n" "vmlal.s16 q12, d2, d0[3] \n" // sum3 += (a00 - a07) * k30 "vmlal.s16 q13, d3, d0[3] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vdup.f32 q14, %13 \n" // bias "vdup.f32 q15, %14 \n" // bias "vdup.f32 q4, %15 \n" // bias "vdup.f32 q5, %16 \n" // bias // sum0 // top_s32->top_f32 "vcvt.f32.s32 q6, q6 \n" "vcvt.f32.s32 q7, q7 \n" "vcvt.f32.s32 q8, q8 \n" "vcvt.f32.s32 q9, q9 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q6, q6, %e17[0] \n" "vmul.f32 q7, q7, %e17[0] \n" "vmul.f32 q8, q8, %e17[1] \n" "vmul.f32 q9, q9, %e17[1] \n" // top_f32 = top_f32 + bias "vadd.f32 q6, q6, q14 \n" "vadd.f32 q7, q7, q14 \n" "vadd.f32 q8, q8, q15 \n" "vadd.f32 q9, q9, q15 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q6, %e18[0] \n" "vmul.f32 q1, q7, %e18[0] \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32->top_s16 "vqmovn.s32 d12, q0 \n" "vqmovn.s32 d13, q1 \n" // top_s16->top_s8 "vqmovn.s16 d12, q6 \n" // save top_s8 "vst1.8 {d12}, [%0]! \n" // sum1 // top_f32 = top_f32 * scale_out "vmul.f32 q0, q8, %e18[1] \n" "vmul.f32 q1, q9, %e18[1] \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32->top_s16 "vqmovn.s32 d16, q0 \n" "vqmovn.s32 d17, q1 \n" // top_s16->top_s8 "vqmovn.s16 d16, q8 \n" // save top_s8 "vst1.8 {d16}, [%1]! \n" // sum2 // top_s32->top_f32 "vcvt.f32.s32 q10, q10 \n" "vcvt.f32.s32 q11, q11 \n" "vcvt.f32.s32 q12, q12 \n" "vcvt.f32.s32 q13, q13 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q10, q10, %f17[0] \n" "vmul.f32 q11, q11, %f17[0] \n" "vmul.f32 q12, q12, %f17[1] \n" "vmul.f32 q13, q13, %f17[1] \n" // top_f32 = top_f32 + bias "vadd.f32 q10, q10, q4 \n" "vadd.f32 q11, q11, q4 \n" "vadd.f32 q12, q12, q5 \n" "vadd.f32 q13, q13, q5 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q10, %f18[0] \n" "vmul.f32 q1, q11, %f18[0] \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32->top_s16 "vqmovn.s32 d20, q0 \n" "vqmovn.s32 d21, q1 \n" // top_s16->top_s8 "vqmovn.s16 d20, q10 \n" // save top_s8 "vst1.8 {d20}, [%2]! \n" // sum3 // top_f32 = top_f32 * scale_out "vmul.f32 q0, q12, %f18[1] \n" "vmul.f32 q1, q13, %f18[1] \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32->top_s16 "vqmovn.s32 d24, q0 \n" "vqmovn.s32 d25, q1 \n" // top_s16->top_s8 "vqmovn.s16 d24, q12 \n" // save top_s8 "vst1.8 {d24}, [%3]! \n" : "=r"(outptr0), //%0 "=r"(outptr1), //%1 "=r"(outptr2), //%2 "=r"(outptr3), //%3 "=r"(tmpptr), //%4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch), //%12 "r"(bias0), //%13 "r"(bias1), //%14 "r"(bias2), //%15 "r"(bias3), //%16 "w"(_scale_in03), //%17 "w"(_scale_out03) // %18 : "cc", "memory", "r4", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum0_4 = 0; int sum0_5 = 0; int sum0_6 = 0; int sum0_7 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum1_4 = 0; int sum1_5 = 0; int sum1_6 = 0; int sum1_7 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum2_4 = 0; int sum2_5 = 0; int sum2_6 = 0; int sum2_7 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int sum3_4 = 0; int sum3_5 = 0; int sum3_6 = 0; int sum3_7 = 0; for (int q = 0; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum0_4 += tmpptr[4] * kptr[0]; sum0_5 += tmpptr[5] * kptr[0]; sum0_6 += tmpptr[6] * kptr[0]; sum0_7 += tmpptr[7] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum1_4 += tmpptr[4] * kptr[1]; sum1_5 += tmpptr[5] * kptr[1]; sum1_6 += tmpptr[6] * kptr[1]; sum1_7 += tmpptr[7] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum2_4 += tmpptr[4] * kptr[2]; sum2_5 += tmpptr[5] * kptr[2]; sum2_6 += tmpptr[6] * kptr[2]; sum2_7 += tmpptr[7] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; sum3_4 += tmpptr[4] * kptr[3]; sum3_5 += tmpptr[5] * kptr[3]; sum3_6 += tmpptr[6] * kptr[3]; sum3_7 += tmpptr[7] * kptr[3]; tmpptr += 8; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr0[4] = sum0_4; outptr0[5] = sum0_5; outptr0[6] = sum0_6; outptr0[7] = sum0_7; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr1[4] = sum1_4; outptr1[5] = sum1_5; outptr1[6] = sum1_6; outptr1[7] = sum1_7; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr2[4] = sum2_4; outptr2[5] = sum2_5; outptr2[6] = sum2_6; outptr2[7] = sum2_7; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr3[4] = sum3_4; outptr3[5] = sum3_5; outptr3[6] = sum3_6; outptr3[7] = sum3_7; outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; #endif /* // __ARM_NEON */ } for (; i + 3 < size; i += 4) { const signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const signed char *kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile ( //inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d28-d29}, [%4]! \n" // tmpr a00 - a03, a10 - a13, a20 - a23, a30 - a33 a(inch) (data) "vmovl.s8 q15, d29 \n" // a20 - a23, a30 - a33 "vmovl.s8 q14, d28 \n" // a00 - a04, a10 - a14 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00 - k30, k01 - k31, k02 - k32, k03 - k33 k(outch) (inch) "vmovl.s8 q1, d1 \n" // k02 - k32, k03 - k33 "vmovl.s8 q0, d0 \n" // k00 - k30, k01 - k31 "vmlal.s16 q6, d28, d0[0] \n" // sum0 = (a00 - a03) * k00 "vmlal.s16 q7, d28, d0[1] \n" // sum1 = (a00 - a03) * k10 "vmlal.s16 q8, d28, d0[2] \n" // sum2 = (a00 - a03) * k20 "vmlal.s16 q9, d28, d0[3] \n" // sum3 = (a00 - a03) * k30 "vmlal.s16 q6, d29, d1[0] \n" // sum0 += (a10 - a13) * k01 "vmlal.s16 q7, d29, d1[1] \n" // sum1 += (a10 - a13) * k11 "vmlal.s16 q8, d29, d1[2] \n" // sum2 += (a10 - a13) * k21 "vmlal.s16 q9, d29, d1[3] \n" // sum3 += (a10 - a13) * k31 "vmlal.s16 q6, d30, d2[0] \n" // sum0 += (a20 - a23) * k02 "vmlal.s16 q7, d30, d2[1] \n" // sum1 += (a20 - a23) * k12 "vmlal.s16 q8, d30, d2[2] \n" // sum2 += (a20 - a23) * k22 "vmlal.s16 q9, d30, d2[3] \n" // sum3 += (a20 - a23) * k32 "vmlal.s16 q6, d31, d3[0] \n" // sum0 += (a30 - a33) * k03 "vmlal.s16 q7, d31, d3[1] \n" // sum1 += (a30 - a33) * k13 "vmlal.s16 q8, d31, d3[2] \n" // sum2 += (a30 - a33) * k23 "vmlal.s16 q9, d31, d3[3] \n" // sum3 += (a30 - a33) * k33 "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n" // tmpr a00 - a03 a(inch) (data) "vld1.s8 {d0}, [%5] \n" // kptr k00 - k30 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #4 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n" // sum0 += (a00 - a03) * k00 "vmlal.s16 q7, d2, d0[1] \n" // sum1 += (a00 - a03) * k10 "vmlal.s16 q8, d2, d0[2] \n" // sum2 += (a00 - a03) * k20 "vmlal.s16 q9, d2, d0[3] \n" // sum3 += (a00 - a03) * k30 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vdup.f32 q14, %13 \n" // bias "vdup.f32 q15, %14 \n" // bias "vdup.f32 q4, %15 \n" // bias "vdup.f32 q5, %16 \n" // bias // sum0 - 1 // top_s32->top_f32 "vcvt.f32.s32 q6, q6 \n" "vcvt.f32.s32 q7, q7 \n" "vcvt.f32.s32 q8, q8 \n" "vcvt.f32.s32 q9, q9 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q6, q6, %e17[0] \n" "vmul.f32 q7, q7, %e17[1] \n" "vmul.f32 q8, q8, %f17[0] \n" "vmul.f32 q9, q9, %f17[1] \n" // top_f32 = top_f32 + bias "vadd.f32 q6, q6, q14 \n" "vadd.f32 q7, q7, q15 \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q6, %e18[0] \n" "vmul.f32 q1, q7, %e18[1] \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32->top_s16 "vqmovn.s32 d12, q0 \n" "vqmovn.s32 d13, q1 \n" // top_s16->top_s8 "vqmovn.s16 d12, q6 \n" // save top_s8 "vst1.s32 {d12[0]}, [%0]! \n" "vst1.s32 {d12[1]}, [%1]! \n" // sum1 - 2 // top_f32 = top_f32 * scale_out "vmul.f32 q0, q8, %f18[0] \n" "vmul.f32 q1, q9, %f18[1] \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32->top_s16 "vqmovn.s32 d16, q0 \n" "vqmovn.s32 d17, q1 \n" // top_s16->top_s8 "vqmovn.s16 d16, q8 \n" // save top_s8 "vst1.s32 {d16[0]}, [%2]! \n" "vst1.s32 {d16[1]}, [%3]! \n" : "=r"(outptr0), //%0 "=r"(outptr1), //%1 "=r"(outptr2), //%2 "=r"(outptr3), //%3 "=r"(tmpptr), //%4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch), //%12 "r"(bias0), //%13 "r"(bias1), //%14 "r"(bias2), //%15 "r"(bias3), //%16 "w"(_scale_in03), //%17 "w"(_scale_out03) // %18 : "cc", "memory", "r4", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; for (int q = 0; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #endif /* // __ARM_NEON */ } for (; i < size; i++) { const signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const signed char *kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile ( //inch loop "veor q6, q6, q6 \n" "veor q7, q7, q7 \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "vmov.s32 q10, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4}, [%4] \n" // tmpr a00, a10, a20, a30 a(inch) (data) "add %4, #4 \n" "vmovl.s8 q2, d4 \n" // a00, a10, a20, a30 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00 - k30, k01 - k31, k02 - k32, k03 - k33 k(outch) (inch) "vmovl.s8 q1, d1 \n" // k02 - k32, k03 - k33 "vmovl.s8 q0, d0 \n" // k00 - k30, k01 - k31 "vmlal.s16 q6, d0, d4[0] \n" // (k00 - k30) * a00 "vmlal.s16 q7, d1, d4[1] \n" // (k01 - k31) * a10 "vmlal.s16 q8, d2, d4[2] \n" // (k02 - k32) * a20 "vmlal.s16 q9, d3, d4[3] \n" // (k03 - k33) * a30 "subs r4, r4, #1 \n" "bne 0b \n" // end for "vadd.s32 q6, q6, q7 \n" "vadd.s32 q9, q9, q8 \n" "vadd.s32 q10, q6, q9 \n" "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n" // tmpr a00 a(inch) (data) "vld1.s8 {d0}, [%5] \n" // kptr k00 - k30 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #1 \n" "add %5, #4 \n" "vmlal.s16 q10, d0, d2[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory // top_s32->top_f32 "vcvt.f32.s32 q10, q10 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q10, q10, %q14 \n" // top_f32 = top_f32 + bias "vadd.f32 q10, q10, %q13 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q10, %q15 \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" // top_s32->top_s16 "vqmovn.s32 d12, q0 \n" // top_s16->top_s8 "vqmovn.s16 d12, q6 \n" // save top_s8 "vst1.8 {d12[0]}, [%0]! \n" "vst1.8 {d12[1]}, [%1]! \n" "vst1.8 {d12[2]}, [%2]! \n" "vst1.8 {d12[3]}, [%3]! \n" : "=r"(outptr0), //%0 "=r"(outptr1), //%1 "=r"(outptr2), //%2 "=r"(outptr3), //%3 "=r"(tmpptr), //%4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch), //%12 "w"(_bias03), //%13 "w"(_scale_in03), //%14 "w"(_scale_out03) // %15 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr++; kptr += 4; } outptr0[0] = sum0; outptr1[0] = sum1; outptr2[0] = sum2; outptr3[0] = sum3; outptr0++; outptr1++; outptr2++; outptr3++; #endif /* // __ARM_NEON */ } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); signed char *outptr0 = out0; const float bias0 = bias ? bias[p] : 0. f; const float scale_requant_in = scales_requant[2 * p]; const float scale_requant_out = scales_requant[2 * p + 1]; float32x4_t _bias0 = vdupq_n_f32(bias0); float32x4_t _scale_in = vdupq_n_f32(scale_requant_in); float32x4_t _scale_out = vdupq_n_f32(scale_requant_out); int i = 0; for (; i + 7 < size; i += 8) { const signed char *tmpptr = tmp.channel(i / 8); #if __ARM_NEON const signed char *kptr = kernel.channel(p / 4 + p % 4); #endif /* // __ARM_NEON */ #if __ARM_NEON asm volatile ( //inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "lsr r4, %6, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%1, #128] \n" "vld1.s8 {d4-d7}, [%1]! \n" // tmpr a00 - a07, a10 - a17, a20 - a27, a30 - a37 a(inch) (data) "vmovl.s8 q5, d7 \n" // a30 - a37 "vmovl.s8 q4, d6 \n" // a20 - a27 "vmovl.s8 q3, d5 \n" // a10 - a17 "vmovl.s8 q2, d4 \n" // a00 - a07 "vld1.s8 {d0}, [%2] \n" // kptr k00, k01, k02, k03 k(outch) (inch) "vmovl.s8 q0, d0 \n" // k00, k01, k02, k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n" // (a00 - a07) * k00 "vmlal.s16 q7, d5, d0[0] \n" "vmlal.s16 q6, d6, d0[1] \n" // (a10 - a17) * k01 "vmlal.s16 q7, d7, d0[1] \n" "vmlal.s16 q6, d8, d0[2] \n" // (a20 - a27) * k02 "vmlal.s16 q7, d9, d0[2] \n" "vmlal.s16 q6, d10, d0[3] \n" // (a30 - a37) * k03 "vmlal.s16 q7, d11, d0[3] \n" "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %6, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%1]! \n" // tmpr a00 - a07 a(inch) (data) "vld1.s8 {d0}, [%2] \n" // kptr k00 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n" // (a00 - a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory // top_s32->top_f32 "vcvt.f32.s32 q6, q6 \n" "vcvt.f32.s32 q7, q7 \n" // top_f32 = top_f32 * scale_in "vmul.f32 q6, q6, %q8 \n" "vmul.f32 q7, q7, %q8 \n" // top_f32 = top_f32 + bias "vadd.f32 q6, q6, %q7 \n" "vadd.f32 q7, q7, %q7 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q6, %q9 \n" "vmul.f32 q1, q7, %q9 \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32->top_s16 "vqmovn.s32 d12, q0 \n" "vqmovn.s32 d13, q1 \n" // top_s16->top_s8 "vqmovn.s16 d12, q6 \n" // save top_s8 "vst1.8 {d12}, [%0]! \n" : "=r"(outptr0), //%0 "=r"(tmpptr), //%1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch), //%6 "w"(_bias0), //%7 "w"(_scale_in), //%8 "w"(_scale_out) // %9 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int sum4 = 0; int sum5 = 0; int sum6 = 0; int sum7 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; sum4 += tmpptr[4] * kptr[0]; sum5 += tmpptr[5] * kptr[0]; sum6 += tmpptr[6] * kptr[0]; sum7 += tmpptr[7] * kptr[0]; tmpptr += 8; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0[4] = sum4; outptr0[5] = sum5; outptr0[6] = sum6; outptr0[7] = sum7; outptr0 += 8; #endif /* // __ARM_NEON */ } for (; i + 3 < size; i += 4) { const signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4); #if __ARM_NEON const signed char *kptr = kernel.channel(p / 4 + p % 4); #endif /* // __ARM_NEON */ #if __ARM_NEON asm volatile ( //inch loop "vmov.s32 q6, #0 \n" "lsr r4, %6, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%2, #128] \n" "vld1.s8 {d4-d5}, [%1]! \n" // tmpr a00 - a03, a10 - a13, a20 - a23, a30 - a33 a(inch) (data) "vmovl.s8 q3, d5 \n" // a20 - a23, a30 - a33 "vmovl.s8 q2, d4 \n" // a00 - a03, a10 - a13 "vld1.s8 {d0}, [%2] \n" // kptr k00, k01, k02, k03 k(outch) (inch) "vmovl.s8 q0, d0 \n" // k00, k01, k02, k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n" // (a00 - a03) * k00 "vmlal.s16 q6, d5, d0[1] \n" // (a10 - a13) * k01 "vmlal.s16 q6, d6, d0[2] \n" // (a20 - a23) * k02 "vmlal.s16 q6, d7, d0[3] \n" // (a30 - a33) * k03 "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %6, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%1] \n" // tmpr a00 - a03 a(inch) (data) "vld1.s8 {d0}, [%2] \n" // kptr k00 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %1, #4 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n" // (a00 - a03) * k00 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory // top_s32->top_f32 "vcvt.f32.s32 q6, q6 \n" // top_f32 = top_f32 * scale_in "vmul.f32 q6, q6, %q8 \n" // top_f32 = top_f32 + bias "vadd.f32 q6, q6, %q7 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q6, %q9 \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" // top_s32->top_s16 "vqmovn.s32 d12, q0 \n" // top_s16->top_s8 "vqmovn.s16 d12, q6 \n" "vst1.s32 {d12[0]}, [%0]! \n" : "=r"(outptr0), //%0 "=r"(tmpptr), //%1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch), //%6 "w"(_bias0), //%7 "w"(_scale_in), //%8 "w"(_scale_out) // %9 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0 += 4; #endif /* // __ARM_NEON */ } for (; i < size; i++) { const signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); #if __ARM_NEON const signed char *kptr = kernel.channel(p / 4 + p % 4); #endif /* // __ARM_NEON */ int q = 0; int sum0 = 0; for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = sum0; outptr0++; } } } #endif static void conv1x1s1_int8_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, const Option & opt) { int kernel_w = 1; int kernel_h = 1; int stride_w = 1; int stride_h = 1; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); } static void conv1x1s2_int8_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, const Option & opt) { int kernel_w = 1; int kernel_h = 1; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); }
// BUG1989 is pleased to support the open source community by supporting ncnn available. // //Copyright(C) 2019 BUG1989.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif /* // __ARM_NEON */ static inline signed char float2int8(float v) { int int32 = round(v); if (int32 > 127) return 127; if (int32 < -128) return -128; return (signed char)int32; } #if __aarch64__ static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat & _kernel, Mat & kernel_tm, int inch, int outch) { const signed char *kernel = _kernel; //kernel memory packed 4 x 4 kernel_tm.create(4 * 4, inch / 4 + inch % 4, outch / 4 + outch % 4, (size_t) 1u); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char *k0 = kernel + (p + 0) * inch; const signed char *k1 = kernel + (p + 1) * inch; const signed char *k2 = kernel + (p + 2) * inch; const signed char *k3 = kernel + (p + 3) * inch; signed char *ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } for (int p = remain_outch_start; p < outch; p++) { const signed char *k0 = kernel + (p + 0) * inch; signed char *ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } static void conv1x1s1_sgemm_int8_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & kernel, const Option & opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; //bottom_tm memory packed 4 x 4 ncnn: :Mat bottom_tm(4, inch, size / 4 + size % 4, (size_t) 1u, opt.workspace_allocator); { int nn_size = size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char *img0 = bottom_blob.channel(0); const signed char *img1 = bottom_blob.channel(1); img0 += i; img1 += i; signed char *tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img0[1]; tmpptr[3] = img1[1]; tmpptr[4] = img0[2]; tmpptr[5] = img1[2]; tmpptr[6] = img0[3]; tmpptr[7] = img1[3]; tmpptr += 8; img0 += bottom_blob.cstep; img0 += bottom_blob.cstep; img1 += bottom_blob.cstep; img1 += bottom_blob.cstep; } for (; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const signed char *img0 = bottom_blob.channel(0); img0 += i; signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += bottom_blob.cstep; } } } //sgemm process int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; int *outptr0 = top_blob.channel(p); int *outptr1 = top_blob.channel(p + 1); int *outptr2 = top_blob.channel(p + 2); int *outptr3 = top_blob.channel(p + 3); int i = 0; for (; i + 3 < size; i += 4) { signed char *tmpptr = bottom_tm.channel(i / 4); const signed char *kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile ( "prfm pldl1keep, [%4, #128] \n" "prfm pldl1keep, [%5, #128] \n" "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "lsr w4, %w12, #2 \n" // r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" // for (; k + 3 < L; k = k + 4) "ld1 {v0.16b}, [%4] \n" // i0, i1, i2, i3 "ld1 {v4.16b}, [%5] \n" // k0, k1, k2, k3 "add %4, %4, #16 \n" "add %5, %5, #16 \n" "rev32 v1.8h, v0.8h \n" // i1, i0, i3, i2 "rev64 v2.4s, v0.4s \n" // i2, i3, i0, i1 "rev64 v3.8h, v0.8h \n" // i3, i2, i1, i0 "smull v8.8h, v4.8b, v0.8b \n" "smull v9.8h, v4.8b, v1.8b \n" "smull v10.8h, v4.8b, v2.8b \n" "smull v11.8h, v4.8b, v3.8b \n" "prfm pldl1keep, [%4, #1024] \n" "prfm pldl1keep, [%5, #1024] \n" "smlal2 v8.8h, v4.16b, v0.16b \n" "smlal2 v9.8h, v4.16b, v1.16b \n" "smlal2 v10.8h, v4.16b, v2.16b \n" "smlal2 v11.8h, v4.16b, v3.16b \n" "sadalp v16.4s, v8.8h \n" // i0k0, i1k1, i2k2, i3k3 "sadalp v17.4s, v9.8h \n" // i1k0, i0k1, i3k2, i2k3 "sadalp v18.4s, v10.8h \n" // i2k0, i3k1, i0k2, i1k3 "sadalp v19.4s, v11.8h \n" // i3k0, i2k1, i1k2, i0k3 "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // for (; k + 1 < L; k = k + 2) // remain loop "and w4, %w12, #3 \n" // w4 = remain = K & 3; "cmp w4, #0 \n" "beq 3f \n" "lsr w4, w4, #1 \n" // r4 = nn = L >> 1 "cmp w4, #0 \n" "beq 3f \n" "2: \n" // for (; k + 1 < L; k = k + 2) "ld1 {v0.8b}, [%4] \n" // i0, i1, i2, i3 "ld1 {v4.8b}, [%5] \n" // k0, k1, k2, k3 "add %4, %4, #8 \n" "add %5, %5, #8 \n" "rev32 v1.4h, v0.4h \n" // i2, i3, i0, i1 "rev64 v2.2s, v0.2s \n" // i1, i0, i3, i2 "rev64 v3.4h, v0.4h \n" // i0, i1, i2, i3 "smull v8.8h, v4.8b, v0.8b \n" "smull v9.8h, v4.8b, v1.8b \n" "smull v10.8h, v4.8b, v2.8b \n" "smull v11.8h, v4.8b, v3.8b \n" "sadalp v16.4s, v8.8h \n" "sadalp v17.4s, v9.8h \n" "sadalp v18.4s,v10.8h \n" "sadalp v19.4s,v11.8h \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" // realloc "mov v20.s[0], v16.s[0] \n" "mov v20.s[1], v17.s[0] \n" "mov v20.s[2], v18.s[0] \n" "mov v20.s[3], v19.s[0] \n" "mov v21.s[0], v17.s[1] \n" "mov v21.s[1], v16.s[1] \n" "mov v21.s[2], v19.s[1] \n" "mov v21.s[3], v18.s[1] \n" "mov v22.s[0], v18.s[2] \n" "mov v22.s[1], v19.s[2] \n" "mov v22.s[2], v16.s[2] \n" "mov v22.s[3], v17.s[2] \n" "mov v23.s[0], v19.s[3] \n" "mov v23.s[1], v18.s[3] \n" "mov v23.s[2], v17.s[3] \n" "mov v23.s[3], v16.s[3] \n" "and w4, %w12, #1 \n" // w4 = remain = K & 1; "cmp w4, #0 \n" "beq 5f \n" "4: \n" "ld1 {v0.8b}, [%4] \n" "ld1 {v1.8b}, [%5] \n" "add %4, %4, #4 \n" "add %5, %5, #4 \n" "sshll v0.8h, v0.8b, #0 \n" // i0[0], i1[0], i2[0], i3[0] "sshll v1.8h, v1.8b, #0 \n" // k0[0], k1[0], k2[0], k3[0] "smlal v20.4s, v0.4h, v1.h[0] \n" // i0k0, i1k0, i2k0, i3k0 "smlal v21.4s, v0.4h, v1.h[1] \n" // i0k1, i1k1, i2k1, i3k1 "smlal v22.4s, v0.4h, v1.h[2] \n" // i0k2, i1k2, i2k2, i3k2 "smlal v23.4s, v0.4h, v1.h[3] \n" // i0k3, i1k3, i2k3, i3k3 "subs w4, w4, #1 \n" "bne 2b \n" "5: \n" "st1 {v20.4s}, [%0] \n" "st1 {v21.4s}, [%1] \n" "st1 {v22.4s}, [%2] \n" "st1 {v23.4s}, [%3] \n" : "=r"(outptr0), //%0 "=r"(outptr1), //%1 "=r"(outptr2), //%2 "=r"(outptr3), //%3 "=r"(tmpptr), //%4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0_0 += tmpptr[0] * kptr[0]; sum0_0 += tmpptr[1] * kptr[1]; sum0_1 += tmpptr[2] * kptr[0]; sum0_1 += tmpptr[3] * kptr[1]; sum0_2 += tmpptr[4] * kptr[0]; sum0_2 += tmpptr[5] * kptr[1]; sum0_3 += tmpptr[6] * kptr[0]; sum0_3 += tmpptr[7] * kptr[1]; sum1_0 += tmpptr[0] * kptr[2]; sum1_0 += tmpptr[1] * kptr[3]; sum1_1 += tmpptr[2] * kptr[2]; sum1_1 += tmpptr[3] * kptr[3]; sum1_2 += tmpptr[4] * kptr[2]; sum1_2 += tmpptr[5] * kptr[3]; sum1_3 += tmpptr[6] * kptr[2]; sum1_3 += tmpptr[7] * kptr[3]; sum2_0 += tmpptr[0] * kptr[4]; sum2_0 += tmpptr[1] * kptr[5]; sum2_1 += tmpptr[2] * kptr[4]; sum2_1 += tmpptr[3] * kptr[5]; sum2_2 += tmpptr[4] * kptr[4]; sum2_2 += tmpptr[5] * kptr[5]; sum2_3 += tmpptr[6] * kptr[4]; sum2_3 += tmpptr[7] * kptr[5]; sum3_0 += tmpptr[0] * kptr[6]; sum3_0 += tmpptr[1] * kptr[7]; sum3_1 += tmpptr[2] * kptr[6]; sum3_1 += tmpptr[3] * kptr[7]; sum3_2 += tmpptr[4] * kptr[6]; sum3_2 += tmpptr[5] * kptr[7]; sum3_3 += tmpptr[6] * kptr[6]; sum3_3 += tmpptr[7] * kptr[7]; tmpptr += 8; kptr += 8; } for (; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; #endif outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; } for (; i < size; i++) { signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); const signed char *kptr = kernel.channel(p / 4); #if __ARM_NEON int32x4_t _sum = vdupq_n_s32(0); int q = 0; for (; q + 3 < inch; q = q + 4) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0 - 3] int8x8x2_t _k = vld2_s8(kptr); //k0[0 - 1], k1[0 - 1], k2[0 - 1], k3[0 - 1]; k0[2 - 3], k1[2 - 3], k2[2 - 3], k3[2 - 3] int16x8_t _r0_s16 = vmovl_s8(_r0); //i0[0], i0[1], i0[2], i0[3] int16x8_t _k02_s16 = vmovl_s8(_k.val[0]); //k0[0], k1[0], k2[0], k3[0], k0[2], k1[2], k2[2], k3[2] int16x8_t _k13_s16 = vmovl_s8(_k.val[1]); //k0[1], k1[1], k2[1], k3[1], k0[3], k1[3], k2[3], k3[3] _sum = vmlal_lane_s16(_sum, vget_low_s16(_k02_s16), vget_low_s16(_r0_s16), 0); //i0[0] * k[0 - 3][0] _sum = vmlal_lane_s16(_sum, vget_low_s16(_k13_s16), vget_low_s16(_r0_s16), 1); //i0[1] * k[0 - 3][1] _sum = vmlal_lane_s16(_sum, vget_high_s16(_k02_s16), vget_low_s16(_r0_s16), 2); //i0[2] * k[0 - 3][2] _sum = vmlal_lane_s16(_sum, vget_high_s16(_k13_s16), vget_low_s16(_r0_s16), 3); //i0[3] * k[0 - 3][3] tmpptr += 4; kptr += 16; } for (; q + 1 < inch; q = q + 2) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0 - 3] int8x8_t _k = vld1_s8(kptr); //k0[0 - 1], k1[0 - 1], k2[0 - 1], k3[0 - 1] _r0[2] = _r0[0]; _r0[3] = _r0[1]; _r0[4] = _r0[0]; _r0[5] = _r0[1]; _r0[6] = _r0[0]; _r0[7] = _r0[1]; int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vpadalq_s16(_sum, _tp0); tmpptr += 2; kptr += 8; } for (; q < inch; q++) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0 - 3] int8x8_t _k = vld1_s8(kptr); //k[0 - 3][0] int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vaddw_s16(_sum, vget_low_s16(_tp0)); tmpptr += 1; kptr += 4; } vst1q_lane_s32(outptr0, _sum, 0); vst1q_lane_s32(outptr1, _sum, 1); vst1q_lane_s32(outptr2, _sum, 2); vst1q_lane_s32(outptr3, _sum, 3); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0 += tmpptr[0] * kptr[0]; sum0 += tmpptr[1] * kptr[1]; sum1 += tmpptr[0] * kptr[2]; sum1 += tmpptr[1] * kptr[3]; sum2 += tmpptr[0] * kptr[4]; sum2 += tmpptr[1] * kptr[5]; sum3 += tmpptr[0] * kptr[6]; sum3 += tmpptr[1] * kptr[7]; tmpptr += 2; kptr += 8; } for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr += 1; kptr += 4; } outptr0[0] = sum0; outptr1[0] = sum1; outptr2[0] = sum2; outptr3[0] = sum3; #endif outptr0++; outptr1++; outptr2++; outptr3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); int *outptr0 = out0; int i = 0; for (; i + 3 < size; i += 4) { signed char *tmpptr = bottom_tm.channel(i / 4); const signed char *kptr = kernel.channel(p / 4 + p % 4); #if __ARM_NEON int32x4_t _sum = vdupq_n_s32(0); int q = 0; for (; q + 1 < inch; q = q + 2) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0 - 1], i1[0 - 1], i2[0 - 1], i3[0 - 1] int8x8_t _k = vld1_s8(kptr); //k0[0 - 1] _k[2] = _k[0]; _k[3] = _k[1]; _k[4] = _k[0]; _k[5] = _k[1]; _k[6] = _k[0]; _k[7] = _k[1]; int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vpadalq_s16(_sum, _tp0); tmpptr += 8; kptr += 2; } for (; q < inch; q++) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0], i1[0], i2[0], i3[0] int8x8_t _k = vld1_s8(kptr); //k[0][0] int16x8_t _r0_s16 = vmovl_s8(_r0); int16x8_t _k_s16 = vmovl_s8(_k); _sum = vmlal_lane_s16(_sum, vget_low_s16(_r0_s16), vget_low_s16(_k_s16), 0); //i0k0, i1k0, i2k0, i3k0 tmpptr += 4; kptr += 1; } vst1q_s32(outptr0, _sum); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0 += tmpptr[0] * kptr[0]; sum0 += tmpptr[1] * kptr[1]; sum1 += tmpptr[2] * kptr[0]; sum1 += tmpptr[3] * kptr[1]; sum2 += tmpptr[4] * kptr[0]; sum2 += tmpptr[5] * kptr[1]; sum3 += tmpptr[6] * kptr[0]; sum3 += tmpptr[7] * kptr[1]; tmpptr += 8; kptr += 2; } for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; #endif outptr0 += 4; } for (; i < size; i++) { signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); const signed char *kptr = kernel.channel(p / 4 + p % 4); int q = 0; int sum0 = 0; for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = sum0; outptr0++; } } } static void conv1x1s1_sgemm_int8_requant_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & kernel, const Mat & _bias, std: :vector < float >scales_requant, const Option & opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; const float *bias = _bias; //bottom_tm memory packed 4 x 4 ncnn: :Mat bottom_tm(4, inch, size / 4 + size % 4, (size_t) 1u, opt.workspace_allocator); { int nn_size = size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char *img0 = bottom_blob.channel(0); const signed char *img1 = bottom_blob.channel(1); img0 += i; img1 += i; signed char *tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img0[1]; tmpptr[3] = img1[1]; tmpptr[4] = img0[2]; tmpptr[5] = img1[2]; tmpptr[6] = img0[3]; tmpptr[7] = img1[3]; tmpptr += 8; img0 += bottom_blob.cstep; img0 += bottom_blob.cstep; img1 += bottom_blob.cstep; img1 += bottom_blob.cstep; } for (; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const signed char *img0 = bottom_blob.channel(0); img0 += i; signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += bottom_blob.cstep; } } } //sgemm process int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; signed char *outptr0 = top_blob.channel(p); signed char *outptr1 = top_blob.channel(p + 1); signed char *outptr2 = top_blob.channel(p + 2); signed char *outptr3 = top_blob.channel(p + 3); const float bias0 = bias ? bias[p] : 0. f; const float bias1 = bias ? bias[p + 1] : 0. f; const float bias2 = bias ? bias[p + 2] : 0. f; const float bias3 = bias ? bias[p + 3] : 0. f; const float scale_requant_in0 = scales_requant[2 * p]; const float scale_requant_out0 = scales_requant[2 * p + 1]; const float scale_requant_in1 = scales_requant[2 * (p + 1)]; const float scale_requant_out1 = scales_requant[2 * (p + 1) + 1]; const float scale_requant_in2 = scales_requant[2 * (p + 2)]; const float scale_requant_out2 = scales_requant[2 * (p + 2) + 1]; const float scale_requant_in3 = scales_requant[2 * (p + 3)]; const float scale_requant_out3 = scales_requant[2 * (p + 3) + 1]; float32x4_t _bias03, _scale_in03, _scale_out03; float32x4_t _bias0 = vdupq_n_f32(bias0); float32x4_t _bias1 = vdupq_n_f32(bias1); float32x4_t _bias2 = vdupq_n_f32(bias2); float32x4_t _bias3 = vdupq_n_f32(bias3); _bias03[0] = bias0; _bias03[1] = bias1; _bias03[2] = bias2; _bias03[3] = bias3; _scale_in03[0] = scale_requant_in0; _scale_in03[1] = scale_requant_in1; _scale_in03[2] = scale_requant_in2; _scale_in03[3] = scale_requant_in3; _scale_out03[0] = scale_requant_out0; _scale_out03[1] = scale_requant_out1; _scale_out03[2] = scale_requant_out2; _scale_out03[3] = scale_requant_out3; int i = 0; for (; i + 3 < size; i += 4) { signed char *tmpptr = bottom_tm.channel(i / 4); const signed char *kptr = kernel.channel(p / 4); #if 1 //__ARM_NEON asm volatile ( "prfm pldl1keep, [%4, #128] \n" "prfm pldl1keep, [%5, #128] \n" "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "lsr w4, %w12, #2 \n" // r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" // for (; k + 3 < L; k = k + 4) "ld1 {v0.16b}, [%4] \n" // i0, i1, i2, i3 "ld1 {v4.16b}, [%5] \n" // k0, k1, k2, k3 "add %4, %4, #16 \n" "add %5, %5, #16 \n" "rev32 v1.8h, v0.8h \n" // i1, i0, i3, i2 "rev64 v2.4s, v0.4s \n" // i2, i3, i0, i1 "rev64 v3.8h, v0.8h \n" // i3, i2, i1, i0 "smull v8.8h, v4.8b, v0.8b \n" "smull v9.8h, v4.8b, v1.8b \n" "smull v10.8h, v4.8b, v2.8b \n" "smull v11.8h, v4.8b, v3.8b \n" "prfm pldl1keep, [%4, #1024] \n" "prfm pldl1keep, [%5, #1024] \n" "smlal2 v8.8h, v4.16b, v0.16b \n" "smlal2 v9.8h, v4.16b, v1.16b \n" "smlal2 v10.8h, v4.16b, v2.16b \n" "smlal2 v11.8h, v4.16b, v3.16b \n" "sadalp v16.4s, v8.8h \n" // i0k0, i1k1, i2k2, i3k3 "sadalp v17.4s, v9.8h \n" // i1k0, i0k1, i3k2, i2k3 "sadalp v18.4s, v10.8h \n" // i2k0, i3k1, i0k2, i1k3 "sadalp v19.4s, v11.8h \n" // i3k0, i2k1, i1k2, i0k3 "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // for (; k + 1 < L; k = k + 2) // remain loop "and w4, %w12, #3 \n" // w4 = remain = K & 3; "cmp w4, #0 \n" "beq 3f \n" "lsr w4, w4, #1 \n" // r4 = nn = L >> 1 "cmp w4, #0 \n" "beq 3f \n" "2: \n" // for (; k + 1 < L; k = k + 2) "ld1 {v0.8b}, [%4] \n" // i0, i1, i2, i3 "ld1 {v4.8b}, [%5] \n" // k0, k1, k2, k3 "add %4, %4, #8 \n" "add %5, %5, #8 \n" "rev32 v1.4h, v0.4h \n" // i2, i3, i0, i1 "rev64 v2.2s, v0.2s \n" // i1, i0, i3, i2 "rev64 v3.4h, v0.4h \n" // i0, i1, i2, i3 "smull v8.8h, v4.8b, v0.8b \n" "smull v9.8h, v4.8b, v1.8b \n" "smull v10.8h, v4.8b, v2.8b \n" "smull v11.8h, v4.8b, v3.8b \n" "sadalp v16.4s, v8.8h \n" "sadalp v17.4s, v9.8h \n" "sadalp v18.4s,v10.8h \n" "sadalp v19.4s,v11.8h \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" // realloc "mov v20.s[0], v16.s[0] \n" "mov v20.s[1], v17.s[0] \n" "mov v20.s[2], v18.s[0] \n" "mov v20.s[3], v19.s[0] \n" "mov v21.s[0], v17.s[1] \n" "mov v21.s[1], v16.s[1] \n" "mov v21.s[2], v19.s[1] \n" "mov v21.s[3], v18.s[1] \n" "mov v22.s[0], v18.s[2] \n" "mov v22.s[1], v19.s[2] \n" "mov v22.s[2], v16.s[2] \n" "mov v22.s[3], v17.s[2] \n" "mov v23.s[0], v19.s[3] \n" "mov v23.s[1], v18.s[3] \n" "mov v23.s[2], v17.s[3] \n" "mov v23.s[3], v16.s[3] \n" "and w4, %w12, #1 \n" // w4 = remain = K & 1; "cmp w4, #0 \n" "beq 5f \n" "4: \n" "ld1 {v0.8b}, [%4] \n" "ld1 {v1.8b}, [%5] \n" "add %4, %4, #4 \n" "add %5, %5, #4 \n" "sshll v0.8h, v0.8b, #0 \n" // i0[0], i1[0], i2[0], i3[0] "sshll v1.8h, v1.8b, #0 \n" // k0[0], k1[0], k2[0], k3[0] "smlal v20.4s, v0.4h, v1.h[0] \n" // i0k0, i1k0, i2k0, i3k0 "smlal v21.4s, v0.4h, v1.h[1] \n" // i0k1, i1k1, i2k1, i3k1 "smlal v22.4s, v0.4h, v1.h[2] \n" // i0k2, i1k2, i2k2, i3k2 "smlal v23.4s, v0.4h, v1.h[3] \n" // i0k3, i1k3, i2k3, i3k3 "subs w4, w4, #1 \n" "bne 2b \n" "5: \n" // top_s32->top_f32 "scvtf v20.4s, v20.4s \n" "scvtf v21.4s, v21.4s \n" "scvtf v22.4s, v22.4s \n" "scvtf v23.4s, v23.4s \n" // top_f32 = top_f32 * scale_in "fmul v20.4s, v20.4s, %17.s[0] \n" "fmul v21.4s, v21.4s, %17.s[1] \n" "fmul v22.4s, v22.4s, %17.s[2] \n" "fmul v23.4s, v23.4s, %17.s[3] \n" // top_f32 = top_f32 + bias "fadd v20.4s, v20.4s, %13.4s \n" "fadd v21.4s, v21.4s, %14.4s \n" "fadd v22.4s, v22.4s, %15.4s \n" "fadd v23.4s, v23.4s, %16.4s \n" // top_f32 = top_f32 * scale_out "fmul v20.4s, v20.4s, %18.s[0] \n" "fmul v21.4s, v21.4s, %18.s[1] \n" "fmul v22.4s, v22.4s, %18.s[2] \n" "fmul v23.4s, v23.4s, %18.s[3] \n" // top_f32->top_s32 "fcvtas v20.4s, v20.4s \n" "fcvtas v21.4s, v21.4s \n" "fcvtas v22.4s, v22.4s \n" "fcvtas v23.4s, v23.4s \n" // top_s32->top_s16 "sqxtn v7.4h, v20.4s \n" "sqxtn2 v7.8h, v21.4s \n" "sqxtn v8.4h, v22.4s \n" "sqxtn2 v8.8h, v23.4s \n" // top_s16->top_s8 "sqxtn v0.8b, v7.8h \n" "sqxtn v1.8b, v8.8h \n" // save top_s8 "st1 {v0.s}[0], [%0] \n" "st1 {v0.s}[1], [%1] \n" "st1 {v1.s}[0], [%2] \n" "st1 {v1.s}[1], [%3] \n" : "=r"(outptr0), //%0 "=r"(outptr1), //%1 "=r"(outptr2), //%2 "=r"(outptr3), //%3 "=r"(tmpptr), //%4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch), //%12 "w"(_bias0), //%13 "w"(_bias1), //%14 "w"(_bias2), //%15 "w"(_bias3), //%16 "w"(_scale_in03), //%17 "w"(_scale_out03) // %18 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0_0 += tmpptr[0] * kptr[0]; sum0_0 += tmpptr[1] * kptr[1]; sum0_1 += tmpptr[2] * kptr[0]; sum0_1 += tmpptr[3] * kptr[1]; sum0_2 += tmpptr[4] * kptr[0]; sum0_2 += tmpptr[5] * kptr[1]; sum0_3 += tmpptr[6] * kptr[0]; sum0_3 += tmpptr[7] * kptr[1]; sum1_0 += tmpptr[0] * kptr[2]; sum1_0 += tmpptr[1] * kptr[3]; sum1_1 += tmpptr[2] * kptr[2]; sum1_1 += tmpptr[3] * kptr[3]; sum1_2 += tmpptr[4] * kptr[2]; sum1_2 += tmpptr[5] * kptr[3]; sum1_3 += tmpptr[6] * kptr[2]; sum1_3 += tmpptr[7] * kptr[3]; sum2_0 += tmpptr[0] * kptr[4]; sum2_0 += tmpptr[1] * kptr[5]; sum2_1 += tmpptr[2] * kptr[4]; sum2_1 += tmpptr[3] * kptr[5]; sum2_2 += tmpptr[4] * kptr[4]; sum2_2 += tmpptr[5] * kptr[5]; sum2_3 += tmpptr[6] * kptr[4]; sum2_3 += tmpptr[7] * kptr[5]; sum3_0 += tmpptr[0] * kptr[6]; sum3_0 += tmpptr[1] * kptr[7]; sum3_1 += tmpptr[2] * kptr[6]; sum3_1 += tmpptr[3] * kptr[7]; sum3_2 += tmpptr[4] * kptr[6]; sum3_2 += tmpptr[5] * kptr[7]; sum3_3 += tmpptr[6] * kptr[6]; sum3_3 += tmpptr[7] * kptr[7]; tmpptr += 8; kptr += 8; } for (; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = float2int8(((float)sum0_0 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[1] = float2int8(((float)sum0_1 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[2] = float2int8(((float)sum0_2 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[3] = float2int8(((float)sum0_3 * scale_requant_in0 + bias0) * scale_requant_out0); outptr1[0] = float2int8(((float)sum1_0 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[1] = float2int8(((float)sum1_1 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[2] = float2int8(((float)sum1_2 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[3] = float2int8(((float)sum1_3 * scale_requant_in1 + bias1) * scale_requant_out1); outptr2[0] = float2int8(((float)sum2_0 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[1] = float2int8(((float)sum2_1 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[2] = float2int8(((float)sum2_2 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[3] = float2int8(((float)sum2_3 * scale_requant_in2 + bias2) * scale_requant_out2); outptr3[0] = float2int8(((float)sum3_0 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[1] = float2int8(((float)sum3_1 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[2] = float2int8(((float)sum3_2 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[3] = float2int8(((float)sum3_3 * scale_requant_in3 + bias3) * scale_requant_out3); #endif outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; } for (; i < size; i++) { signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); const signed char *kptr = kernel.channel(p / 4); #if 1 //__ARM_NEON int32x4_t _sum = vdupq_n_s32(0); int q = 0; for (; q + 3 < inch; q = q + 4) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0 - 3] int8x8x2_t _k = vld2_s8(kptr); //k0[0 - 1], k1[0 - 1], k2[0 - 1], k3[0 - 1]; k0[2 - 3], k1[2 - 3], k2[2 - 3], k3[2 - 3] int16x8_t _r0_s16 = vmovl_s8(_r0); //i0[0], i0[1], i0[2], i0[3] int16x8_t _k02_s16 = vmovl_s8(_k.val[0]); //k0[0], k1[0], k2[0], k3[0], k0[2], k1[2], k2[2], k3[2] int16x8_t _k13_s16 = vmovl_s8(_k.val[1]); //k0[1], k1[1], k2[1], k3[1], k0[3], k1[3], k2[3], k3[3] _sum = vmlal_lane_s16(_sum, vget_low_s16(_k02_s16), vget_low_s16(_r0_s16), 0); //i0[0] * k[0 - 3][0] _sum = vmlal_lane_s16(_sum, vget_low_s16(_k13_s16), vget_low_s16(_r0_s16), 1); //i0[1] * k[0 - 3][1] _sum = vmlal_lane_s16(_sum, vget_high_s16(_k02_s16), vget_low_s16(_r0_s16), 2); //i0[2] * k[0 - 3][2] _sum = vmlal_lane_s16(_sum, vget_high_s16(_k13_s16), vget_low_s16(_r0_s16), 3); //i0[3] * k[0 - 3][3] tmpptr += 4; kptr += 16; } for (; q + 1 < inch; q = q + 2) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0 - 3] int8x8_t _k = vld1_s8(kptr); //k0[0 - 1], k1[0 - 1], k2[0 - 1], k3[0 - 1] _r0[2] = _r0[0]; _r0[3] = _r0[1]; _r0[4] = _r0[0]; _r0[5] = _r0[1]; _r0[6] = _r0[0]; _r0[7] = _r0[1]; int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vpadalq_s16(_sum, _tp0); tmpptr += 2; kptr += 8; } for (; q < inch; q++) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0 - 3] int8x8_t _k = vld1_s8(kptr); //k[0 - 3][0] int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vaddw_s16(_sum, vget_low_s16(_tp0)); tmpptr += 1; kptr += 4; } //top_s32->top_f32 float32x4_t _sum_f32 = vcvtq_f32_s32(_sum); //top_f32 = top_f32 * scale_in _sum_f32 = vmulq_f32(_sum_f32, _scale_in03); //top_f32 = top_f32 + bias _sum_f32 = vaddq_f32(_sum_f32, _bias03); //top_f32 = top_f32 * scale_out _sum_f32 = vmulq_f32(_sum_f32, _scale_out03); //top_f32->top_s32 _sum = vcvtaq_s32_f32(_sum_f32); //top_s32->top_s16 int16x4_t _sum_s16 = vqmovn_s32(_sum); int16x8_t _sum_s16_tp = vcombine_s16(_sum_s16, _sum_s16); //top_s16->top_s8 int8x8_t _sum_s8 = vqmovn_s16(_sum_s16_tp); //save top_s8 vst1_lane_s8(outptr0, _sum_s8, 0); vst1_lane_s8(outptr1, _sum_s8, 1); vst1_lane_s8(outptr2, _sum_s8, 2); vst1_lane_s8(outptr3, _sum_s8, 3); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0 += tmpptr[0] * kptr[0]; sum0 += tmpptr[1] * kptr[1]; sum1 += tmpptr[0] * kptr[2]; sum1 += tmpptr[1] * kptr[3]; sum2 += tmpptr[0] * kptr[4]; sum2 += tmpptr[1] * kptr[5]; sum3 += tmpptr[0] * kptr[6]; sum3 += tmpptr[1] * kptr[7]; tmpptr += 2; kptr += 8; } for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr += 1; kptr += 4; } outptr0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0); outptr1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1); outptr2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2); outptr3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3); #endif outptr0++; outptr1++; outptr2++; outptr3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); signed char *outptr0 = out0; const float bias0 = bias ? bias[p] : 0. f; const float scale_requant_in = scales_requant[2 * p]; const float scale_requant_out = scales_requant[2 * p + 1]; float32x4_t _bias0 = vdupq_n_f32(bias0); float32x4_t _scale_in = vdupq_n_f32(scale_requant_in); float32x4_t _scale_out = vdupq_n_f32(scale_requant_out); int i = 0; for (; i + 3 < size; i += 4) { signed char *tmpptr = bottom_tm.channel(i / 4); const signed char *kptr = kernel.channel(p / 4 + p % 4); #if 1 //__ARM_NEON int32x4_t _sum = vdupq_n_s32(0); int q = 0; for (; q + 1 < inch; q = q + 2) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0 - 1], i1[0 - 1], i2[0 - 1], i3[0 - 1] int8x8_t _k = vld1_s8(kptr); //k0[0 - 1] _k[2] = _k[0]; _k[3] = _k[1]; _k[4] = _k[0]; _k[5] = _k[1]; _k[6] = _k[0]; _k[7] = _k[1]; int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vpadalq_s16(_sum, _tp0); tmpptr += 8; kptr += 2; } for (; q < inch; q++) { int8x8_t _r0 = vld1_s8(tmpptr); //i0[0], i1[0], i2[0], i3[0] int8x8_t _k = vld1_s8(kptr); //k[0][0] int16x8_t _r0_s16 = vmovl_s8(_r0); int16x8_t _k_s16 = vmovl_s8(_k); _sum = vmlal_lane_s16(_sum, vget_low_s16(_r0_s16), vget_low_s16(_k_s16), 0); //i0k0, i1k0, i2k0, i3k0 tmpptr += 4; kptr += 1; } //top_s32->top_f32 float32x4_t _sum_f32 = vcvtq_f32_s32(_sum); //top_f32 = top_f32 * scale_in _sum_f32 = vmulq_f32(_sum_f32, _scale_in); //top_f32 = top_f32 + bias _sum_f32 = vaddq_f32(_sum_f32, _bias0); //top_f32 = top_f32 * scale_out _sum_f32 = vmulq_f32(_sum_f32, _scale_out); //top_f32->top_s32 _sum = vcvtaq_s32_f32(_sum_f32); //top_s32->top_s16 int16x4_t _sum_s16 = vqmovn_s32(_sum); int16x8_t _sum_s16_tp = vcombine_s16(_sum_s16, _sum_s16); //top_s16->top_s8 int8x8_t _sum_s8 = vqmovn_s16(_sum_s16_tp); //save top_s8 vst1_s8(outptr0, _sum_s8); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0 += tmpptr[0] * kptr[0]; sum0 += tmpptr[1] * kptr[1]; sum1 += tmpptr[2] * kptr[0]; sum1 += tmpptr[3] * kptr[1]; sum2 += tmpptr[4] * kptr[0]; sum2 += tmpptr[5] * kptr[1]; sum3 += tmpptr[6] * kptr[0]; sum3 += tmpptr[7] * kptr[1]; tmpptr += 8; kptr += 2; } for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out); outptr0[1] = float2int8(((float)sum1 * scale_requant_in + bias0) * scale_requant_out); outptr0[2] = float2int8(((float)sum2 * scale_requant_in + bias0) * scale_requant_out); outptr0[3] = float2int8(((float)sum3 * scale_requant_in + bias0) * scale_requant_out); #endif outptr0 += 4; } for (; i < size; i++) { signed char *tmpptr = bottom_tm.channel(i / 4 + i % 4); const signed char *kptr = kernel.channel(p / 4 + p % 4); int q = 0; int sum0 = 0; for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out); outptr0++; } } } #else static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat & _kernel, Mat & kernel_tm, int inch, int outch) { const signed char *kernel = _kernel; kernel_tm.create(4 * 4, inch / 4 + inch % 4, outch / 4 + outch % 4, (size_t) 1u); int p = 0; for (; p + 3 < outch; p += 4) { const signed char *kernel0 = kernel + (p + 0) * inch; const signed char *kernel1 = kernel + (p + 1) * inch; const signed char *kernel2 = kernel + (p + 2) * inch; const signed char *kernel3 = kernel + (p + 3) * inch; signed char *ktmp = kernel_tm.channel(p / 4); for (int q = 0; q < inch; q++) { //kernel0...3 0 ktmp[0] = kernel0[0]; ktmp[1] = kernel1[0]; ktmp[2] = kernel2[0]; ktmp[3] = kernel3[0]; ktmp += 4; kernel0 += 1; kernel1 += 1; kernel2 += 1; kernel3 += 1; } } for (; p < outch; p++) { const signed char *kernel0 = kernel + p * inch; signed char *ktmp = kernel_tm.channel(p / 4 + p % 4); for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[0]; ktmp++; kernel0++; } } } /* * Convolution 1x1 quantized with sgemm int8 */ static void conv1x1s1_sgemm_int8_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & kernel, const Option & opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; //interleave Mat tmp(8 * 4, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + size % 4, 1u, opt.workspace_allocator); { int nn_size = size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const signed char *img0 = bottom_blob.channel(0); img0 += i; signed char *tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { #if __ARM_NEON asm volatile ( "pld [%0, #64] \n" "vld1.s8 {d0}, [%0] \n" "vst1.s8 {d0}, [%1]! \n" : "=r" (img0), //%0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "d0" ); img0 += bottom_blob.cstep; #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; tmpptr += 8; img0 += bottom_blob.cstep; #endif /* // __ARM_NEON__ */ } } nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const signed char *img0 = bottom_blob.channel(0); img0 += i; signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const signed char *img0 = bottom_blob.channel(0); img0 += i; signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr++; img0 += bottom_blob.cstep; } } } //sgemm process int nn_outch = 0; int remain_outch_start = 0; nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; int *outptr0 = top_blob.channel(p); int *outptr1 = top_blob.channel(p + 1); int *outptr2 = top_blob.channel(p + 2); int *outptr3 = top_blob.channel(p + 3); int i = 0; for (; i + 7 < size; i += 8) { const signed char *tmpptr = tmp.channel(i / 8); const signed char *kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile ( //inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "vmov.s32 q10, #0 \n" "vmov.s32 q11, #0 \n" "vmov.s32 q12, #0 \n" "vmov.s32 q13, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4-d7}, [%4]! \n" // tmpr a00 - a07, a10 - a17, a20 - a27, a30 - a37 a(inch) (data) "vmovl.s8 q5, d7 \n" // a30 - a37 "vmovl.s8 q4, d6 \n" // a20 - a27 "vmovl.s8 q3, d5 \n" // a10 - a17 "vmovl.s8 q2, d4 \n" // a00 - a07 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00 - k30, k01 - k31, k02 - k32, k03 - k33 k(outch) (inch) "vmovl.s8 q1, d1 \n" // k02 - k32, k03 - k33 "vmovl.s8 q0, d0 \n" // k00 - k30, k01 - k31 "vmlal.s16 q6, d4, d0[0] \n" // sum0 = (a00 - a07) * k00 "vmlal.s16 q7, d5, d0[0] \n" "vmlal.s16 q8, d4, d0[1] \n" // sum1 = (a00 - a07) * k10 "vmlal.s16 q9, d5, d0[1] \n" "vmlal.s16 q10, d4, d0[2] \n" // sum2 = (a00 - a07) * k20 "vmlal.s16 q11, d5, d0[2] \n" "vmlal.s16 q12, d4, d0[3] \n" // sum3 = (a00 - a07) * k30 "vmlal.s16 q13, d5, d0[3] \n" "vmlal.s16 q6, d6, d1[0] \n" // sum0 += (a10 - a17) * k01 "vmlal.s16 q7, d7, d1[0] \n" "vmlal.s16 q8, d6, d1[1] \n" // sum1 += (a10 - a17) * k11 "vmlal.s16 q9, d7, d1[1] \n" "vmlal.s16 q10, d6, d1[2] \n" // sum2 += (a10 - a17) * k21 "vmlal.s16 q11, d7, d1[2] \n" "vmlal.s16 q12, d6, d1[3] \n" // sum3 += (a10 - a17) * k31 "vmlal.s16 q13, d7, d1[3] \n" "vmlal.s16 q6, d8, d2[0] \n" // sum0 += (a20 - a27) * k02 "vmlal.s16 q7, d9, d2[0] \n" "vmlal.s16 q8, d8, d2[1] \n" // sum1 += (a20 - a27) * k12 "vmlal.s16 q9, d9, d2[1] \n" "vmlal.s16 q10, d8, d2[2] \n" // sum2 += (a20 - a27) * k22 "vmlal.s16 q11, d9, d2[2] \n" "vmlal.s16 q12, d8, d2[3] \n" // sum3 += (a20 - a27) * k32 "vmlal.s16 q13, d9, d2[3] \n" "vmlal.s16 q6, d10, d3[0] \n" // sum0 += (a30 - a37) * k03 "vmlal.s16 q7, d11, d3[0] \n" "vmlal.s16 q8, d10, d3[1] \n" // sum1 += (a30 - a37) * k13 "vmlal.s16 q9, d11, d3[1] \n" "vmlal.s16 q10, d10, d3[2] \n" // sum2 += (a30 - a37) * k23 "vmlal.s16 q11, d11, d3[2] \n" "vmlal.s16 q12, d10, d3[3] \n" // sum3 += (a30 - a37) * k33 "vmlal.s16 q13, d11, d3[3] \n" "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%4]! \n" // tmpr a00 - a07 a(inch) (data) "vld1.s8 {d0}, [%5] \n" // kptr k00 - k30 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n" // sum0 += (a00 - a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "vmlal.s16 q8, d2, d0[1] \n" // sum1 += (a00 - a07) * k10 "vmlal.s16 q9, d3, d0[1] \n" "vmlal.s16 q10, d2, d0[2] \n" // sum2 += (a00 - a07) * k20 "vmlal.s16 q11, d3, d0[2] \n" "vmlal.s16 q12, d2, d0[3] \n" // sum3 += (a00 - a07) * k30 "vmlal.s16 q13, d3, d0[3] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vst1.s32 {d12-d15}, [%0]! \n" "vst1.s32 {d16-d19}, [%1]! \n" "vst1.s32 {d20-d23}, [%2]! \n" "vst1.s32 {d24-d27}, [%3]! \n" : "=r"(outptr0), //%0 "=r"(outptr1), //%1 "=r"(outptr2), //%2 "=r"(outptr3), //%3 "=r"(tmpptr), //%4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum0_4 = 0; int sum0_5 = 0; int sum0_6 = 0; int sum0_7 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum1_4 = 0; int sum1_5 = 0; int sum1_6 = 0; int sum1_7 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum2_4 = 0; int sum2_5 = 0; int sum2_6 = 0; int sum2_7 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int sum3_4 = 0; int sum3_5 = 0; int sum3_6 = 0; int sum3_7 = 0; for (int q = 0; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum0_4 += tmpptr[4] * kptr[0]; sum0_5 += tmpptr[5] * kptr[0]; sum0_6 += tmpptr[6] * kptr[0]; sum0_7 += tmpptr[7] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum1_4 += tmpptr[4] * kptr[1]; sum1_5 += tmpptr[5] * kptr[1]; sum1_6 += tmpptr[6] * kptr[1]; sum1_7 += tmpptr[7] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum2_4 += tmpptr[4] * kptr[2]; sum2_5 += tmpptr[5] * kptr[2]; sum2_6 += tmpptr[6] * kptr[2]; sum2_7 += tmpptr[7] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; sum3_4 += tmpptr[4] * kptr[3]; sum3_5 += tmpptr[5] * kptr[3]; sum3_6 += tmpptr[6] * kptr[3]; sum3_7 += tmpptr[7] * kptr[3]; tmpptr += 8; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr0[4] = sum0_4; outptr0[5] = sum0_5; outptr0[6] = sum0_6; outptr0[7] = sum0_7; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr1[4] = sum1_4; outptr1[5] = sum1_5; outptr1[6] = sum1_6; outptr1[7] = sum1_7; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr2[4] = sum2_4; outptr2[5] = sum2_5; outptr2[6] = sum2_6; outptr2[7] = sum2_7; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr3[4] = sum3_4; outptr3[5] = sum3_5; outptr3[6] = sum3_6; outptr3[7] = sum3_7; outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; #endif /* // __ARM_NEON */ } for (; i + 3 < size; i += 4) { const signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const signed char *kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile ( //inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4-d5}, [%4]! \n" // tmpr a00 - a03, a10 - a13, a20 - a23, a30 - a33 a(inch) (data) "vmovl.s8 q3, d5 \n" // a20 - a23, a30 - a33 "vmovl.s8 q2, d4 \n" // a00 - a04, a10 - a14 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00 - k30, k01 - k31, k02 - k32, k03 - k33 k(outch) (inch) "vmovl.s8 q1, d1 \n" // k02 - k32, k03 - k33 "vmovl.s8 q0, d0 \n" // k00 - k30, k01 - k31 "vmlal.s16 q6, d4, d0[0] \n" // sum0 = (a00 - a03) * k00 "vmlal.s16 q7, d4, d0[1] \n" // sum1 = (a00 - a03) * k10 "vmlal.s16 q8, d4, d0[2] \n" // sum2 = (a00 - a03) * k20 "vmlal.s16 q9, d4, d0[3] \n" // sum3 = (a00 - a03) * k30 "vmlal.s16 q6, d5, d1[0] \n" // sum0 += (a10 - a13) * k01 "vmlal.s16 q7, d5, d1[1] \n" // sum1 += (a10 - a13) * k11 "vmlal.s16 q8, d5, d1[2] \n" // sum2 += (a10 - a13) * k21 "vmlal.s16 q9, d5, d1[3] \n" // sum3 += (a10 - a13) * k31 "vmlal.s16 q6, d6, d2[0] \n" // sum0 += (a20 - a23) * k02 "vmlal.s16 q7, d6, d2[1] \n" // sum1 += (a20 - a23) * k12 "vmlal.s16 q8, d6, d2[2] \n" // sum2 += (a20 - a23) * k22 "vmlal.s16 q9, d6, d2[3] \n" // sum3 += (a20 - a23) * k32 "vmlal.s16 q6, d7, d3[0] \n" // sum0 += (a30 - a33) * k03 "vmlal.s16 q7, d7, d3[1] \n" // sum1 += (a30 - a33) * k13 "vmlal.s16 q8, d7, d3[2] \n" // sum2 += (a30 - a33) * k23 "vmlal.s16 q9, d7, d3[3] \n" // sum3 += (a30 - a33) * k33 "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n" // tmpr a00 - a03 a(inch) (data) "vld1.s8 {d0}, [%5] \n" // kptr k00 - k30 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #4 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n" // sum0 += (a00 - a03) * k00 "vmlal.s16 q7, d2, d0[1] \n" // sum1 += (a00 - a03) * k10 "vmlal.s16 q8, d2, d0[2] \n" // sum2 += (a00 - a03) * k20 "vmlal.s16 q9, d2, d0[3] \n" // sum3 += (a00 - a03) * k30 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vst1.s32 {d12-d13}, [%0]! \n" "vst1.s32 {d14-d15}, [%1]! \n" "vst1.s32 {d16-d17}, [%2]! \n" "vst1.s32 {d18-d19}, [%3]! \n" : "=r"(outptr0), //%0 "=r"(outptr1), //%1 "=r"(outptr2), //%2 "=r"(outptr3), //%3 "=r"(tmpptr), //%4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; for (int q = 0; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #endif /* // __ARM_NEON */ } for (; i < size; i++) { const signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const signed char *kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile ( //inch loop "veor q6, q6, q6 \n" "veor q7, q7, q7 \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "vmov.s32 q10, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4}, [%4] \n" // tmpr a00, a10, a20, a30 a(inch) (data) "add %4, #4 \n" "vmovl.s8 q2, d4 \n" // a00, a10, a20, a30 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00 - k30, k01 - k31, k02 - k32, k03 - k33 k(outch) (inch) "vmovl.s8 q1, d1 \n" // k02 - k32, k03 - k33 "vmovl.s8 q0, d0 \n" // k00 - k30, k01 - k31 "vmlal.s16 q6, d0, d4[0] \n" // (k00 - k30) * a00 "vmlal.s16 q7, d1, d4[1] \n" // (k01 - k31) * a10 "vmlal.s16 q8, d2, d4[2] \n" // (k02 - k32) * a20 "vmlal.s16 q9, d3, d4[3] \n" // (k03 - k33) * a30 "subs r4, r4, #1 \n" "bne 0b \n" // end for "vadd.s32 q6, q6, q7 \n" "vadd.s32 q9, q9, q8 \n" "vadd.s32 q10, q6, q9 \n" "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n" // tmpr a00 a(inch) (data) "vld1.s8 {d0}, [%5] \n" // kptr k00 - k30 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #1 \n" "add %5, #4 \n" "vmlal.s16 q10, d0, d2[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vst1.s32 {d20[0]}, [%0]! \n" "vst1.s32 {d20[1]}, [%1]! \n" "vst1.s32 {d21[0]}, [%2]! \n" "vst1.s32 {d21[1]}, [%3]! \n" : "=r"(outptr0), //%0 "=r"(outptr1), //%1 "=r"(outptr2), //%2 "=r"(outptr3), //%3 "=r"(tmpptr), //%4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr++; kptr += 4; } outptr0[0] = sum0; outptr1[0] = sum1; outptr2[0] = sum2; outptr3[0] = sum3; outptr0++; outptr1++; outptr2++; outptr3++; #endif /* // __ARM_NEON */ } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); int *outptr0 = out0; int i = 0; for (; i + 7 < size; i += 8) { const signed char *tmpptr = tmp.channel(i / 8); const signed char *kptr = kernel.channel(p / 4 + p % 4); #if __ARM_NEON asm volatile ( //inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "lsr r4, %6, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%1, #128] \n" "vld1.s8 {d4-d7}, [%1]! \n" // tmpr a00 - a07, a10 - a17, a20 - a27, a30 - a37 a(inch) (data) "vmovl.s8 q5, d7 \n" // a30 - a37 "vmovl.s8 q4, d6 \n" // a20 - a27 "vmovl.s8 q3, d5 \n" // a10 - a17 "vmovl.s8 q2, d4 \n" // a00 - a07 "vld1.s8 {d0}, [%2] \n" // kptr k00, k01, k02, k03 k(outch) (inch) "vmovl.s8 q0, d0 \n" // k00, k01, k02, k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n" // (a00 - a07) * k00 "vmlal.s16 q7, d5, d0[0] \n" "vmlal.s16 q6, d6, d0[1] \n" // (a10 - a17) * k01 "vmlal.s16 q7, d7, d0[1] \n" "vmlal.s16 q6, d8, d0[2] \n" // (a20 - a27) * k02 "vmlal.s16 q7, d9, d0[2] \n" "vmlal.s16 q6, d10, d0[3] \n" // (a30 - a37) * k03 "vmlal.s16 q7, d11, d0[3] \n" "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %6, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%1]! \n" // tmpr a00 - a07 a(inch) (data) "vld1.s8 {d0}, [%2] \n" // kptr k00 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n" // (a00 - a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vst1.s32 {d12-d15}, [%0]! \n" : "=r"(outptr0), //%0 "=r"(tmpptr), //%1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int sum4 = 0; int sum5 = 0; int sum6 = 0; int sum7 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; sum4 += tmpptr[4] * kptr[0]; sum5 += tmpptr[5] * kptr[0]; sum6 += tmpptr[6] * kptr[0]; sum7 += tmpptr[7] * kptr[0]; tmpptr += 8; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0[4] = sum4; outptr0[5] = sum5; outptr0[6] = sum6; outptr0[7] = sum7; outptr0 += 8; #endif /* // __ARM_NEON */ } for (; i + 3 < size; i += 4) { const signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const signed char *kptr = kernel.channel(p / 4 + p % 4); #if __ARM_NEON asm volatile ( //inch loop "vmov.s32 q6, #0 \n" "lsr r4, %6, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%2, #128] \n" "vld1.s8 {d4-d5}, [%1]! \n" // tmpr a00 - a03, a10 - a13, a20 - a23, a30 - a33 a(inch) (data) "vmovl.s8 q3, d5 \n" // a20 - a23, a30 - a33 "vmovl.s8 q2, d4 \n" // a00 - a03, a10 - a13 "vld1.s8 {d0}, [%2] \n" // kptr k00, k01, k02, k03 k(outch) (inch) "vmovl.s8 q0, d0 \n" // k00, k01, k02, k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n" // (a00 - a03) * k00 "vmlal.s16 q6, d5, d0[1] \n" // (a10 - a13) * k01 "vmlal.s16 q6, d6, d0[2] \n" // (a20 - a23) * k02 "vmlal.s16 q6, d7, d0[3] \n" // (a30 - a33) * k03 "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %6, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%1] \n" // tmpr a00 - a03 a(inch) (data) "vld1.s8 {d0}, [%2] \n" // kptr k00 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %1, #4 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n" // (a00 - a03) * k00 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vst1.s32 {d12-d13}, [%0]! \n" : "=r"(outptr0), //%0 "=r"(tmpptr), //%1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0 += 4; #endif /* // __ARM_NEON */ } for (; i < size; i++) { const signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const signed char *kptr = kernel.channel(p / 4 + p % 4); int q = 0; int sum0 = 0; for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = sum0; outptr0++; } } ////NOTE sgemm int8 // for (; p < outch; p++) // { //Mat out0 = top_blob.channel(p); // //int *outptr0 = out0; // //for (int i = 0; i < size; i++) // { //int sum = 0; // //const signed char *kptr = _kernel.channel(p / 8 + p % 8); // //for (int q = 0; q < inch; q++) // { //const signed char *img0 = bottom_blob.channel(q); // //sum += img0[i] * kptr[0]; //kptr++; // } // //outptr0[i] = sum; // } // } } static void conv1x1s1_sgemm_int8_requant_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & kernel, const Mat & _bias, std: :vector < float >scales_requant, const Option & opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; const float *bias = _bias; //interleave Mat tmp(8 * 4, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + size % 4, 1u, opt.workspace_allocator); { int nn_size = size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const signed char *img0 = bottom_blob.channel(0); img0 += i; signed char *tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { #if __ARM_NEON asm volatile ( "pld [%0, #64] \n" "vld1.s8 {d0}, [%0] \n" "vst1.s8 {d0}, [%1]! \n" : "=r" (img0), //%0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "d0" ); img0 += bottom_blob.cstep; #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; tmpptr += 8; img0 += bottom_blob.cstep; #endif /* // __ARM_NEON__ */ } } nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const signed char *img0 = bottom_blob.channel(0); img0 += i; signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const signed char *img0 = bottom_blob.channel(0); img0 += i; signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr++; img0 += bottom_blob.cstep; } } } //sgemm process int nn_outch = 0; int remain_outch_start = 0; nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; signed char *outptr0 = top_blob.channel(p); signed char *outptr1 = top_blob.channel(p + 1); signed char *outptr2 = top_blob.channel(p + 2); signed char *outptr3 = top_blob.channel(p + 3); const float bias0 = bias ? bias[p] : 0. f; const float bias1 = bias ? bias[p + 1] : 0. f; const float bias2 = bias ? bias[p + 2] : 0. f; const float bias3 = bias ? bias[p + 3] : 0. f; const float scale_requant_in0 = scales_requant[2 * p]; const float scale_requant_out0 = scales_requant[2 * p + 1]; const float scale_requant_in1 = scales_requant[2 * (p + 1)]; const float scale_requant_out1 = scales_requant[2 * (p + 1) + 1]; const float scale_requant_in2 = scales_requant[2 * (p + 2)]; const float scale_requant_out2 = scales_requant[2 * (p + 2) + 1]; const float scale_requant_in3 = scales_requant[2 * (p + 3)]; const float scale_requant_out3 = scales_requant[2 * (p + 3) + 1]; float32x4_t _bias03, _scale_in03, _scale_out03; _bias03[0] = bias0; _bias03[1] = bias1; _bias03[2] = bias2; _bias03[3] = bias3; _scale_in03[0] = scale_requant_in0; _scale_in03[1] = scale_requant_in1; _scale_in03[2] = scale_requant_in2; _scale_in03[3] = scale_requant_in3; _scale_out03[0] = scale_requant_out0; _scale_out03[1] = scale_requant_out1; _scale_out03[2] = scale_requant_out2; _scale_out03[3] = scale_requant_out3; int i = 0; for (; i + 7 < size; i += 8) { const signed char *tmpptr = tmp.channel(i / 8); const signed char *kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile ( //inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "vmov.s32 q10, #0 \n" "vmov.s32 q11, #0 \n" "vmov.s32 q12, #0 \n" "vmov.s32 q13, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d28-d31}, [%4]! \n" // tmpr a00 - a07, a10 - a17, a20 - a27, a30 - a37 a(inch) (data) "vmovl.s8 q5, d31 \n" // a30 - a37 "vmovl.s8 q4, d30 \n" // a20 - a27 "vmovl.s8 q15, d29 \n" // a10 - a17 "vmovl.s8 q14, d28 \n" // a00 - a07 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00 - k30, k01 - k31, k02 - k32, k03 - k33 k(outch) (inch) "vmovl.s8 q1, d1 \n" // k02 - k32, k03 - k33 "vmovl.s8 q0, d0 \n" // k00 - k30, k01 - k31 "vmlal.s16 q6, d28, d0[0] \n" // sum0 = (a00 - a07) * k00 "vmlal.s16 q7, d29, d0[0] \n" "vmlal.s16 q8, d28, d0[1] \n" // sum1 = (a00 - a07) * k10 "vmlal.s16 q9, d29, d0[1] \n" "vmlal.s16 q10, d28, d0[2] \n" // sum2 = (a00 - a07) * k20 "vmlal.s16 q11, d29, d0[2] \n" "vmlal.s16 q12, d28, d0[3] \n" // sum3 = (a00 - a07) * k30 "vmlal.s16 q13, d29, d0[3] \n" "vmlal.s16 q6, d30, d1[0] \n" // sum0 += (a10 - a17) * k01 "vmlal.s16 q7, d31, d1[0] \n" "vmlal.s16 q8, d30, d1[1] \n" // sum1 += (a10 - a17) * k11 "vmlal.s16 q9, d31, d1[1] \n" "vmlal.s16 q10, d30, d1[2] \n" // sum2 += (a10 - a17) * k21 "vmlal.s16 q11, d31, d1[2] \n" "vmlal.s16 q12, d30, d1[3] \n" // sum3 += (a10 - a17) * k31 "vmlal.s16 q13, d31, d1[3] \n" "vmlal.s16 q6, d8, d2[0] \n" // sum0 += (a20 - a27) * k02 "vmlal.s16 q7, d9, d2[0] \n" "vmlal.s16 q8, d8, d2[1] \n" // sum1 += (a20 - a27) * k12 "vmlal.s16 q9, d9, d2[1] \n" "vmlal.s16 q10, d8, d2[2] \n" // sum2 += (a20 - a27) * k22 "vmlal.s16 q11, d9, d2[2] \n" "vmlal.s16 q12, d8, d2[3] \n" // sum3 += (a20 - a27) * k32 "vmlal.s16 q13, d9, d2[3] \n" "vmlal.s16 q6, d10, d3[0] \n" // sum0 += (a30 - a37) * k03 "vmlal.s16 q7, d11, d3[0] \n" "vmlal.s16 q8, d10, d3[1] \n" // sum1 += (a30 - a37) * k13 "vmlal.s16 q9, d11, d3[1] \n" "vmlal.s16 q10, d10, d3[2] \n" // sum2 += (a30 - a37) * k23 "vmlal.s16 q11, d11, d3[2] \n" "vmlal.s16 q12, d10, d3[3] \n" // sum3 += (a30 - a37) * k33 "vmlal.s16 q13, d11, d3[3] \n" "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%4]! \n" // tmpr a00 - a07 a(inch) (data) "vld1.s8 {d0}, [%5] \n" // kptr k00 - k30 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n" // sum0 += (a00 - a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "vmlal.s16 q8, d2, d0[1] \n" // sum1 += (a00 - a07) * k10 "vmlal.s16 q9, d3, d0[1] \n" "vmlal.s16 q10, d2, d0[2] \n" // sum2 += (a00 - a07) * k20 "vmlal.s16 q11, d3, d0[2] \n" "vmlal.s16 q12, d2, d0[3] \n" // sum3 += (a00 - a07) * k30 "vmlal.s16 q13, d3, d0[3] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vdup.f32 q14, %13 \n" // bias "vdup.f32 q15, %14 \n" // bias "vdup.f32 q4, %15 \n" // bias "vdup.f32 q5, %16 \n" // bias // sum0 // top_s32->top_f32 "vcvt.f32.s32 q6, q6 \n" "vcvt.f32.s32 q7, q7 \n" "vcvt.f32.s32 q8, q8 \n" "vcvt.f32.s32 q9, q9 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q6, q6, %e17[0] \n" "vmul.f32 q7, q7, %e17[0] \n" "vmul.f32 q8, q8, %e17[1] \n" "vmul.f32 q9, q9, %e17[1] \n" // top_f32 = top_f32 + bias "vadd.f32 q6, q6, q14 \n" "vadd.f32 q7, q7, q14 \n" "vadd.f32 q8, q8, q15 \n" "vadd.f32 q9, q9, q15 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q6, %e18[0] \n" "vmul.f32 q1, q7, %e18[0] \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32->top_s16 "vqmovn.s32 d12, q0 \n" "vqmovn.s32 d13, q1 \n" // top_s16->top_s8 "vqmovn.s16 d12, q6 \n" // save top_s8 "vst1.8 {d12}, [%0]! \n" // sum1 // top_f32 = top_f32 * scale_out "vmul.f32 q0, q8, %e18[1] \n" "vmul.f32 q1, q9, %e18[1] \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32->top_s16 "vqmovn.s32 d16, q0 \n" "vqmovn.s32 d17, q1 \n" // top_s16->top_s8 "vqmovn.s16 d16, q8 \n" // save top_s8 "vst1.8 {d16}, [%1]! \n" // sum2 // top_s32->top_f32 "vcvt.f32.s32 q10, q10 \n" "vcvt.f32.s32 q11, q11 \n" "vcvt.f32.s32 q12, q12 \n" "vcvt.f32.s32 q13, q13 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q10, q10, %f17[0] \n" "vmul.f32 q11, q11, %f17[0] \n" "vmul.f32 q12, q12, %f17[1] \n" "vmul.f32 q13, q13, %f17[1] \n" // top_f32 = top_f32 + bias "vadd.f32 q10, q10, q4 \n" "vadd.f32 q11, q11, q4 \n" "vadd.f32 q12, q12, q5 \n" "vadd.f32 q13, q13, q5 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q10, %f18[0] \n" "vmul.f32 q1, q11, %f18[0] \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32->top_s16 "vqmovn.s32 d20, q0 \n" "vqmovn.s32 d21, q1 \n" // top_s16->top_s8 "vqmovn.s16 d20, q10 \n" // save top_s8 "vst1.8 {d20}, [%2]! \n" // sum3 // top_f32 = top_f32 * scale_out "vmul.f32 q0, q12, %f18[1] \n" "vmul.f32 q1, q13, %f18[1] \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32->top_s16 "vqmovn.s32 d24, q0 \n" "vqmovn.s32 d25, q1 \n" // top_s16->top_s8 "vqmovn.s16 d24, q12 \n" // save top_s8 "vst1.8 {d24}, [%3]! \n" : "=r"(outptr0), //%0 "=r"(outptr1), //%1 "=r"(outptr2), //%2 "=r"(outptr3), //%3 "=r"(tmpptr), //%4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch), //%12 "r"(bias0), //%13 "r"(bias1), //%14 "r"(bias2), //%15 "r"(bias3), //%16 "w"(_scale_in03), //%17 "w"(_scale_out03) // %18 : "cc", "memory", "r4", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum0_4 = 0; int sum0_5 = 0; int sum0_6 = 0; int sum0_7 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum1_4 = 0; int sum1_5 = 0; int sum1_6 = 0; int sum1_7 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum2_4 = 0; int sum2_5 = 0; int sum2_6 = 0; int sum2_7 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int sum3_4 = 0; int sum3_5 = 0; int sum3_6 = 0; int sum3_7 = 0; for (int q = 0; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum0_4 += tmpptr[4] * kptr[0]; sum0_5 += tmpptr[5] * kptr[0]; sum0_6 += tmpptr[6] * kptr[0]; sum0_7 += tmpptr[7] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum1_4 += tmpptr[4] * kptr[1]; sum1_5 += tmpptr[5] * kptr[1]; sum1_6 += tmpptr[6] * kptr[1]; sum1_7 += tmpptr[7] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum2_4 += tmpptr[4] * kptr[2]; sum2_5 += tmpptr[5] * kptr[2]; sum2_6 += tmpptr[6] * kptr[2]; sum2_7 += tmpptr[7] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; sum3_4 += tmpptr[4] * kptr[3]; sum3_5 += tmpptr[5] * kptr[3]; sum3_6 += tmpptr[6] * kptr[3]; sum3_7 += tmpptr[7] * kptr[3]; tmpptr += 8; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr0[4] = sum0_4; outptr0[5] = sum0_5; outptr0[6] = sum0_6; outptr0[7] = sum0_7; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr1[4] = sum1_4; outptr1[5] = sum1_5; outptr1[6] = sum1_6; outptr1[7] = sum1_7; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr2[4] = sum2_4; outptr2[5] = sum2_5; outptr2[6] = sum2_6; outptr2[7] = sum2_7; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr3[4] = sum3_4; outptr3[5] = sum3_5; outptr3[6] = sum3_6; outptr3[7] = sum3_7; outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; #endif /* // __ARM_NEON */ } for (; i + 3 < size; i += 4) { const signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const signed char *kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile ( //inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d28-d29}, [%4]! \n" // tmpr a00 - a03, a10 - a13, a20 - a23, a30 - a33 a(inch) (data) "vmovl.s8 q15, d29 \n" // a20 - a23, a30 - a33 "vmovl.s8 q14, d28 \n" // a00 - a04, a10 - a14 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00 - k30, k01 - k31, k02 - k32, k03 - k33 k(outch) (inch) "vmovl.s8 q1, d1 \n" // k02 - k32, k03 - k33 "vmovl.s8 q0, d0 \n" // k00 - k30, k01 - k31 "vmlal.s16 q6, d28, d0[0] \n" // sum0 = (a00 - a03) * k00 "vmlal.s16 q7, d28, d0[1] \n" // sum1 = (a00 - a03) * k10 "vmlal.s16 q8, d28, d0[2] \n" // sum2 = (a00 - a03) * k20 "vmlal.s16 q9, d28, d0[3] \n" // sum3 = (a00 - a03) * k30 "vmlal.s16 q6, d29, d1[0] \n" // sum0 += (a10 - a13) * k01 "vmlal.s16 q7, d29, d1[1] \n" // sum1 += (a10 - a13) * k11 "vmlal.s16 q8, d29, d1[2] \n" // sum2 += (a10 - a13) * k21 "vmlal.s16 q9, d29, d1[3] \n" // sum3 += (a10 - a13) * k31 "vmlal.s16 q6, d30, d2[0] \n" // sum0 += (a20 - a23) * k02 "vmlal.s16 q7, d30, d2[1] \n" // sum1 += (a20 - a23) * k12 "vmlal.s16 q8, d30, d2[2] \n" // sum2 += (a20 - a23) * k22 "vmlal.s16 q9, d30, d2[3] \n" // sum3 += (a20 - a23) * k32 "vmlal.s16 q6, d31, d3[0] \n" // sum0 += (a30 - a33) * k03 "vmlal.s16 q7, d31, d3[1] \n" // sum1 += (a30 - a33) * k13 "vmlal.s16 q8, d31, d3[2] \n" // sum2 += (a30 - a33) * k23 "vmlal.s16 q9, d31, d3[3] \n" // sum3 += (a30 - a33) * k33 "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n" // tmpr a00 - a03 a(inch) (data) "vld1.s8 {d0}, [%5] \n" // kptr k00 - k30 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #4 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n" // sum0 += (a00 - a03) * k00 "vmlal.s16 q7, d2, d0[1] \n" // sum1 += (a00 - a03) * k10 "vmlal.s16 q8, d2, d0[2] \n" // sum2 += (a00 - a03) * k20 "vmlal.s16 q9, d2, d0[3] \n" // sum3 += (a00 - a03) * k30 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vdup.f32 q14, %13 \n" // bias "vdup.f32 q15, %14 \n" // bias "vdup.f32 q4, %15 \n" // bias "vdup.f32 q5, %16 \n" // bias // sum0 - 1 // top_s32->top_f32 "vcvt.f32.s32 q6, q6 \n" "vcvt.f32.s32 q7, q7 \n" "vcvt.f32.s32 q8, q8 \n" "vcvt.f32.s32 q9, q9 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q6, q6, %e17[0] \n" "vmul.f32 q7, q7, %e17[1] \n" "vmul.f32 q8, q8, %f17[0] \n" "vmul.f32 q9, q9, %f17[1] \n" // top_f32 = top_f32 + bias "vadd.f32 q6, q6, q14 \n" "vadd.f32 q7, q7, q15 \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q6, %e18[0] \n" "vmul.f32 q1, q7, %e18[1] \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32->top_s16 "vqmovn.s32 d12, q0 \n" "vqmovn.s32 d13, q1 \n" // top_s16->top_s8 "vqmovn.s16 d12, q6 \n" // save top_s8 "vst1.s32 {d12[0]}, [%0]! \n" "vst1.s32 {d12[1]}, [%1]! \n" // sum1 - 2 // top_f32 = top_f32 * scale_out "vmul.f32 q0, q8, %f18[0] \n" "vmul.f32 q1, q9, %f18[1] \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32->top_s16 "vqmovn.s32 d16, q0 \n" "vqmovn.s32 d17, q1 \n" // top_s16->top_s8 "vqmovn.s16 d16, q8 \n" // save top_s8 "vst1.s32 {d16[0]}, [%2]! \n" "vst1.s32 {d16[1]}, [%3]! \n" : "=r"(outptr0), //%0 "=r"(outptr1), //%1 "=r"(outptr2), //%2 "=r"(outptr3), //%3 "=r"(tmpptr), //%4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch), //%12 "r"(bias0), //%13 "r"(bias1), //%14 "r"(bias2), //%15 "r"(bias3), //%16 "w"(_scale_in03), //%17 "w"(_scale_out03) // %18 : "cc", "memory", "r4", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; for (int q = 0; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #endif /* // __ARM_NEON */ } for (; i < size; i++) { const signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const signed char *kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile ( //inch loop "veor q6, q6, q6 \n" "veor q7, q7, q7 \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "vmov.s32 q10, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4}, [%4] \n" // tmpr a00, a10, a20, a30 a(inch) (data) "add %4, #4 \n" "vmovl.s8 q2, d4 \n" // a00, a10, a20, a30 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00 - k30, k01 - k31, k02 - k32, k03 - k33 k(outch) (inch) "vmovl.s8 q1, d1 \n" // k02 - k32, k03 - k33 "vmovl.s8 q0, d0 \n" // k00 - k30, k01 - k31 "vmlal.s16 q6, d0, d4[0] \n" // (k00 - k30) * a00 "vmlal.s16 q7, d1, d4[1] \n" // (k01 - k31) * a10 "vmlal.s16 q8, d2, d4[2] \n" // (k02 - k32) * a20 "vmlal.s16 q9, d3, d4[3] \n" // (k03 - k33) * a30 "subs r4, r4, #1 \n" "bne 0b \n" // end for "vadd.s32 q6, q6, q7 \n" "vadd.s32 q9, q9, q8 \n" "vadd.s32 q10, q6, q9 \n" "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n" // tmpr a00 a(inch) (data) "vld1.s8 {d0}, [%5] \n" // kptr k00 - k30 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #1 \n" "add %5, #4 \n" "vmlal.s16 q10, d0, d2[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory // top_s32->top_f32 "vcvt.f32.s32 q10, q10 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q10, q10, %q14 \n" // top_f32 = top_f32 + bias "vadd.f32 q10, q10, %q13 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q10, %q15 \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" // top_s32->top_s16 "vqmovn.s32 d12, q0 \n" // top_s16->top_s8 "vqmovn.s16 d12, q6 \n" // save top_s8 "vst1.8 {d12[0]}, [%0]! \n" "vst1.8 {d12[1]}, [%1]! \n" "vst1.8 {d12[2]}, [%2]! \n" "vst1.8 {d12[3]}, [%3]! \n" : "=r"(outptr0), //%0 "=r"(outptr1), //%1 "=r"(outptr2), //%2 "=r"(outptr3), //%3 "=r"(tmpptr), //%4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch), //%12 "w"(_bias03), //%13 "w"(_scale_in03), //%14 "w"(_scale_out03) // %15 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr++; kptr += 4; } outptr0[0] = sum0; outptr1[0] = sum1; outptr2[0] = sum2; outptr3[0] = sum3; outptr0++; outptr1++; outptr2++; outptr3++; #endif /* // __ARM_NEON */ } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); signed char *outptr0 = out0; const float bias0 = bias ? bias[p] : 0. f; const float scale_requant_in = scales_requant[2 * p]; const float scale_requant_out = scales_requant[2 * p + 1]; float32x4_t _bias0 = vdupq_n_f32(bias0); float32x4_t _scale_in = vdupq_n_f32(scale_requant_in); float32x4_t _scale_out = vdupq_n_f32(scale_requant_out); int i = 0; for (; i + 7 < size; i += 8) { const signed char *tmpptr = tmp.channel(i / 8); #if __ARM_NEON const signed char *kptr = kernel.channel(p / 4 + p % 4); #endif /* // __ARM_NEON */ #if __ARM_NEON asm volatile ( //inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "lsr r4, %6, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%1, #128] \n" "vld1.s8 {d4-d7}, [%1]! \n" // tmpr a00 - a07, a10 - a17, a20 - a27, a30 - a37 a(inch) (data) "vmovl.s8 q5, d7 \n" // a30 - a37 "vmovl.s8 q4, d6 \n" // a20 - a27 "vmovl.s8 q3, d5 \n" // a10 - a17 "vmovl.s8 q2, d4 \n" // a00 - a07 "vld1.s8 {d0}, [%2] \n" // kptr k00, k01, k02, k03 k(outch) (inch) "vmovl.s8 q0, d0 \n" // k00, k01, k02, k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n" // (a00 - a07) * k00 "vmlal.s16 q7, d5, d0[0] \n" "vmlal.s16 q6, d6, d0[1] \n" // (a10 - a17) * k01 "vmlal.s16 q7, d7, d0[1] \n" "vmlal.s16 q6, d8, d0[2] \n" // (a20 - a27) * k02 "vmlal.s16 q7, d9, d0[2] \n" "vmlal.s16 q6, d10, d0[3] \n" // (a30 - a37) * k03 "vmlal.s16 q7, d11, d0[3] \n" "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %6, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%1]! \n" // tmpr a00 - a07 a(inch) (data) "vld1.s8 {d0}, [%2] \n" // kptr k00 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n" // (a00 - a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory // top_s32->top_f32 "vcvt.f32.s32 q6, q6 \n" "vcvt.f32.s32 q7, q7 \n" // top_f32 = top_f32 * scale_in "vmul.f32 q6, q6, %q8 \n" "vmul.f32 q7, q7, %q8 \n" // top_f32 = top_f32 + bias "vadd.f32 q6, q6, %q7 \n" "vadd.f32 q7, q7, %q7 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q6, %q9 \n" "vmul.f32 q1, q7, %q9 \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32->top_s16 "vqmovn.s32 d12, q0 \n" "vqmovn.s32 d13, q1 \n" // top_s16->top_s8 "vqmovn.s16 d12, q6 \n" // save top_s8 "vst1.8 {d12}, [%0]! \n" : "=r"(outptr0), //%0 "=r"(tmpptr), //%1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch), //%6 "w"(_bias0), //%7 "w"(_scale_in), //%8 "w"(_scale_out) // %9 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int sum4 = 0; int sum5 = 0; int sum6 = 0; int sum7 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; sum4 += tmpptr[4] * kptr[0]; sum5 += tmpptr[5] * kptr[0]; sum6 += tmpptr[6] * kptr[0]; sum7 += tmpptr[7] * kptr[0]; tmpptr += 8; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0[4] = sum4; outptr0[5] = sum5; outptr0[6] = sum6; outptr0[7] = sum7; outptr0 += 8; #endif /* // __ARM_NEON */ } for (; i + 3 < size; i += 4) { const signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4); #if __ARM_NEON const signed char *kptr = kernel.channel(p / 4 + p % 4); #endif /* // __ARM_NEON */ #if __ARM_NEON asm volatile ( //inch loop "vmov.s32 q6, #0 \n" "lsr r4, %6, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for (; nn != 0; nn--) "pld [%2, #128] \n" "vld1.s8 {d4-d5}, [%1]! \n" // tmpr a00 - a03, a10 - a13, a20 - a23, a30 - a33 a(inch) (data) "vmovl.s8 q3, d5 \n" // a20 - a23, a30 - a33 "vmovl.s8 q2, d4 \n" // a00 - a03, a10 - a13 "vld1.s8 {d0}, [%2] \n" // kptr k00, k01, k02, k03 k(outch) (inch) "vmovl.s8 q0, d0 \n" // k00, k01, k02, k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n" // (a00 - a03) * k00 "vmlal.s16 q6, d5, d0[1] \n" // (a10 - a13) * k01 "vmlal.s16 q6, d6, d0[2] \n" // (a20 - a23) * k02 "vmlal.s16 q6, d7, d0[3] \n" // (a30 - a33) * k03 "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %6, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for (; remain != 0; remain--) "vld1.s8 {d2}, [%1] \n" // tmpr a00 - a03 a(inch) (data) "vld1.s8 {d0}, [%2] \n" // kptr k00 k(outch) (inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %1, #4 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n" // (a00 - a03) * k00 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory // top_s32->top_f32 "vcvt.f32.s32 q6, q6 \n" // top_f32 = top_f32 * scale_in "vmul.f32 q6, q6, %q8 \n" // top_f32 = top_f32 + bias "vadd.f32 q6, q6, %q7 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q6, %q9 \n" // top_f32->top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" // top_s32->top_s16 "vqmovn.s32 d12, q0 \n" // top_s16->top_s8 "vqmovn.s16 d12, q6 \n" "vst1.s32 {d12[0]}, [%0]! \n" : "=r"(outptr0), //%0 "=r"(tmpptr), //%1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch), //%6 "w"(_bias0), //%7 "w"(_scale_in), //%8 "w"(_scale_out) // %9 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0 += 4; #endif /* // __ARM_NEON */ } for (; i < size; i++) { const signed char *tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); #if __ARM_NEON const signed char *kptr = kernel.channel(p / 4 + p % 4); #endif /* // __ARM_NEON */ int q = 0; int sum0 = 0; for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = sum0; outptr0++; } } } #endif static void conv1x1s1_int8_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, const Option & opt) { int kernel_w = 1; int kernel_h = 1; int stride_w = 1; int stride_h = 1; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); } static void conv1x1s2_int8_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, const Option & opt) { int kernel_w = 1; int kernel_h = 1; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); }
deconvolution_pack4to1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_pack4to1_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack4to1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } v4f32 _sum = (v4f32)__msa_fill_w(0); const float* kptr = (const float*)weight_data_pack4to1 + maxk * channels * p * 4; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const float* sptr = m.row(sy) + sx * 4; int k = y * kernel_w + x; v4f32 _val = (v4f32)__msa_ld_w(sptr, 0); v4f32 _w = (v4f32)__msa_ld_w(kptr + k * 4, 0); _sum = __msa_fadd_w(_sum, __msa_fmul_w(_val, _w)); } } kptr += maxk * 4; } sum += __msa_fhadd_w(_sum); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2021 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_pack4to1_msa(const Mat & bottom_blob, Mat & top_blob, const Mat & weight_data_pack4to1, const Mat & bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat & activation_params, const Option & opt) { int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float *bias_data_ptr = bias_data; //num_output for (int p = 0; p < outch; p++) { float *outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0. f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } v4f32 _sum = (v4f32) __msa_fill_w(0); const float *kptr = (const float *)weight_data_pack4to1 + maxk * channels * p * 4; //channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const float *sptr = m.row(sy) + sx * 4; int k = y * kernel_w + x; v4f32 _val = (v4f32) __msa_ld_w(sptr, 0); v4f32 _w = (v4f32) __msa_ld_w(kptr + k * 4, 0); _sum = __msa_fadd_w(_sum, __msa_fmul_w(_val, _w)); } } kptr += maxk * 4; } sum += __msa_fhadd_w(_sum); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2021 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_pack4to1_msa(const Mat & bottom_blob, Mat & top_blob, const Mat & weight_data_pack4to1, const Mat & bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat & activation_params, const Option & opt) { int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float *bias_data_ptr = bias_data; //num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float *outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0. f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } v4f32 _sum = (v4f32) __msa_fill_w(0); const float *kptr = (const float *)weight_data_pack4to1 + maxk * channels * p * 4; //channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const float *sptr = m.row(sy) + sx * 4; int k = y * kernel_w + x; v4f32 _val = (v4f32) __msa_ld_w(sptr, 0); v4f32 _w = (v4f32) __msa_ld_w(kptr + k * 4, 0); _sum = __msa_fadd_w(_sum, __msa_fmul_w(_val, _w)); } } kptr += maxk * 4; } sum += __msa_fhadd_w(_sum); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
GB_unaryop__identity_uint16_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint16_fp32 // op(A') function: GB_tran__identity_uint16_fp32 // C type: uint16_t // A type: float // cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint16_t z ; GB_CAST_UNSIGNED(z,aij,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint16_fp32 ( uint16_t *Cx, // Cx and Ax may be aliased float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint16_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint16_fp32 // op(A') function: GB_tran__identity_uint16_fp32 // C type: uint16_t // A type: float // cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint16_t z ; GB_CAST_UNSIGNED(z,aij,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint16_fp32 ( uint16_t *Cx, // Cx and Ax may be aliased float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint16_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint16_fp32 // op(A') function: GB_tran__identity_uint16_fp32 // C type: uint16_t // A type: float // cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint16_t z ; GB_CAST_UNSIGNED(z,aij,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint16_fp32 ( uint16_t *Cx, // Cx and Ax may be aliased float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint16_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
interop.c
// RUN: %libomptarget-compile-run-and-check-generic // REQUIRES: nvptx64-nvidia-cuda #include <assert.h> #include <omp.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> typedef void *cudaStream_t; int main() { int device_id = omp_get_default_device(); #pragma omp parallel master { double D0, D2; omp_interop_t interop; #pragma omp interop init(targetsync : interop) device(device_id) nowait assert(interop != NULL); int err; for (int i = omp_ipr_first; i < 0; i++) { const char *n = omp_get_interop_name(interop, (omp_interop_property_t)(i)); long int li = omp_get_interop_int(interop, (omp_interop_property_t)(i), &err); const void *p = omp_get_interop_ptr(interop, (omp_interop_property_t)(i), &err); const char *s = omp_get_interop_str(interop, (omp_interop_property_t)(i), &err); const char *n1 = omp_get_interop_type_desc(interop, (omp_interop_property_t)(i)); } #pragma omp interop use(interop) depend(in : D0, D2) cudaStream_t stream = (omp_get_interop_ptr(interop, omp_ipr_targetsync, NULL)); assert(stream != NULL); #pragma omp interop destroy(interop) depend(in : D0, D2) device(device_id) } printf("PASS\n"); } // CHECK: PASS
// RUN:%libomptarget - compile - run - and - check - generic // REQUIRES:nvptx64 - nvidia - cuda #include <assert.h> #include <omp.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> typedef void *cudaStream_t; int main() { int device_id = omp_get_default_device(); double D0, D2; omp_interop_t interop; assert(interop != NULL); int err; for (int i = omp_ipr_first; i < 0; i++) { const char *n = omp_get_interop_name(interop, (omp_interop_property_t) (i)); long int li = omp_get_interop_int(interop, (omp_interop_property_t) (i), &err); const void *p = omp_get_interop_ptr(interop, (omp_interop_property_t) (i), &err); const char *s = omp_get_interop_str(interop, (omp_interop_property_t) (i), &err); const char *n1 = omp_get_interop_type_desc(interop, (omp_interop_property_t) (i)); } cudaStream_t stream = (omp_get_interop_ptr(interop, omp_ipr_targetsync, NULL)); assert(stream != NULL); printf("PASS\n"); } //CHECK:PASS
// RUN:%libomptarget - compile - run - and - check - generic // REQUIRES:nvptx64 - nvidia - cuda #include <assert.h> #include <omp.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> typedef void *cudaStream_t; int main() { int device_id = omp_get_default_device(); #pragma omp parallel master { double D0, D2; omp_interop_t interop; #pragma omp interop init(targetsync : interop) device(device_id) nowait assert(interop != NULL); int err; for (int i = omp_ipr_first; i < 0; i++) { const char *n = omp_get_interop_name(interop, (omp_interop_property_t) (i)); long int li = omp_get_interop_int(interop, (omp_interop_property_t) (i), &err); const void *p = omp_get_interop_ptr(interop, (omp_interop_property_t) (i), &err); const char *s = omp_get_interop_str(interop, (omp_interop_property_t) (i), &err); const char *n1 = omp_get_interop_type_desc(interop, (omp_interop_property_t) (i)); } #pragma omp interop use(interop) depend(in : D0, D2) cudaStream_t stream = (omp_get_interop_ptr(interop, omp_ipr_targetsync, NULL)); assert(stream != NULL); #pragma omp interop destroy(interop) depend(in : D0, D2) device(device_id) } printf("PASS\n"); } //CHECK:PASS
LAGraph_cc_fastsv5.c
//------------------------------------------------------------------------------ // LAGraph_cc_fastsv4: connected components //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2020 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact [email protected] for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ /** * Code is based on the algorithm described in the following paper * Zhang, Azad, Hu. FastSV: FastSV: A Distributed-Memory Connected Component * Algorithm with Fast Convergence (SIAM PP20) * * Modified by Tim Davis, Texas A&M University **/ // The input matrix A must be symmetric. Self-edges (diagonal entries) are // OK, and are ignored. The values and type of A are ignored; just its // pattern is accessed. // The matrix A must have dimension 2^32 or less. If it is larger, use the // 64-bit version of this method instead. TODO combine the two versions into a // single user-callable code. #define LAGRAPH_EXPERIMENTAL_ASK_BEFORE_BENCHMARKING #include "LAGraph.h" //------------------------------------------------------------------------------ // atomic_min_uint32: compute (*p) = min (*p, value), via atomic update //------------------------------------------------------------------------------ static inline void atomic_min_uint32 ( uint32_t *p, // input/output uint32_t value // input ) { uint32_t old, new ; do { // get the old value at (*p) // #pragma omp atomic read old = (*p) ; // compute the new minimum new = LAGRAPH_MIN (old, value) ; } while (!__sync_bool_compare_and_swap (p, old, new)) ; } //------------------------------------------------------------------------------ // Reduce_assign32: w (index) += src, using MIN as the "+=" accum operator //------------------------------------------------------------------------------ // mask = NULL, accumulator = GrB_MIN_UINT32, descriptor = NULL. // Duplicates are summed with the accumulator, which differs from how // GrB_assign works. GrB_assign states that the presence of duplicates results // in undefined behavior. SuiteSparse:GraphBLAS follows the MATLAB rule, which // discards all but the first of the duplicates. TODO: add this to GraphBLAS // as a variant of GrB_assign, either as GxB_assign_accum (or another name), // or as a GxB_* descriptor setting. #define LAGRAPH_FREE_ALL static GrB_Info Reduce_assign32 ( GrB_Vector *w_handle, // vector of size n, all entries present GrB_Vector *s_handle, // vector of size n, all entries present uint32_t *index, // array of size n GrB_Index n, int nthreads ) { GrB_Type w_type, s_type ; GrB_Index w_n, s_n, w_nvals, s_nvals, *w_i, *s_i ; uint32_t *w_x, *s_x ; #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) LAGr_Vector_export_Full (w_handle, &w_type, &w_n, (void **) &w_x, NULL) ; LAGr_Vector_export_Full (s_handle, &s_type, &s_n, (void **) &s_x, NULL) ; #else LAGr_Vector_export (w_handle, &w_type, &w_n, &w_nvals, &w_i, (void **) &w_x, NULL) ; LAGr_Vector_export (s_handle, &s_type, &s_n, &s_nvals, &s_i, (void **) &s_x, NULL) ; #endif #if 0 if (nthreads >= 4) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (GrB_Index k = 0 ; k < n ; k++) { uint32_t i = index [k] ; atomic_min_uint32 (&(w_x [i]), s_x [k]) ; } } else #endif { // sequential version, to avoid atomics for (GrB_Index k = 0 ; k < n ; k++) { uint32_t i = index [k] ; w_x [i] = LAGRAPH_MIN (w_x [i], s_x [k]) ; } } #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) LAGr_Vector_import_Full (w_handle, w_type, w_n, (void **) &w_x, NULL) ; LAGr_Vector_import_Full (s_handle, s_type, s_n, (void **) &s_x, NULL) ; #else LAGr_Vector_import (w_handle, w_type, w_n, w_nvals, &w_i, (void **) &w_x, NULL) ; LAGr_Vector_import (s_handle, s_type, s_n, s_nvals, &s_i, (void **) &s_x, NULL) ; #endif return (GrB_SUCCESS) ; } #undef LAGRAPH_FREE_ALL #define LAGRAPH_FREE_ALL \ { \ LAGRAPH_FREE (I) ; \ LAGRAPH_FREE (V32) ; \ LAGr_free (&f) ; \ LAGr_free (&gp) ; \ LAGr_free (&mngp) ; \ LAGr_free (&gp_new) ; \ LAGr_free (&mod) ; \ if (sanitize) LAGr_free (&S) ; \ } //------------------------------------------------------------------------------ // LAGraph_cc_fastsv5 //------------------------------------------------------------------------------ GrB_Info LAGraph_cc_fastsv5 ( GrB_Vector *result, // output: array of component identifiers GrB_Matrix A, // input matrix bool sanitize // if true, ensure A is symmetric ) { GrB_Info info ; uint32_t *V32 = NULL ; GrB_Index n, *I = NULL ; GrB_Vector f = NULL, gp_new = NULL, mngp = NULL, mod = NULL, gp = NULL ; GrB_Matrix S = NULL ; //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- LAGr_Matrix_nrows (&n, A) ; if (n > UINT32_MAX) { LAGRAPH_ERROR ("problem too large; use 64-bit version instead", GrB_INVALID_VALUE) ; } if (sanitize) { // S = A | A' LAGr_Matrix_new (&S, GrB_BOOL, n, n) ; LAGr_eWiseAdd (S, NULL, NULL, GrB_LOR, A, A, LAGraph_desc_otoo) ; } else { // Use the input as-is, and assume it is symmetric S = A ; } //-------------------------------------------------------------------------- // initializations //-------------------------------------------------------------------------- // determine # of threads to use for Reduce_assign int nthreads_max = LAGraph_get_nthreads ( ) ; int nthreads = n / (1024*1024) ; nthreads = LAGRAPH_MIN (nthreads, nthreads_max) ; nthreads = LAGRAPH_MAX (nthreads, 1) ; // # of threads to use for typecast int nthreads2 = n / (64*1024) ; nthreads2 = LAGRAPH_MIN (nthreads2, nthreads_max) ; nthreads2 = LAGRAPH_MAX (nthreads2, 1) ; // vectors LAGr_Vector_new (&f, GrB_UINT32, n) ; LAGr_Vector_new (&gp_new, GrB_UINT32, n) ; LAGr_Vector_new (&mod, GrB_BOOL, n) ; // temporary arrays I = LAGraph_malloc (n, sizeof (GrB_Index)) ; V32 = LAGraph_malloc (n, sizeof (uint32_t)) ; // prepare vectors #pragma omp parallel for num_threads(nthreads2) schedule(static) for (GrB_Index i = 0 ; i < n ; i++) { I [i] = i ; V32 [i] = (uint32_t) i ; } LAGr_Vector_build (f, I, V32, n, GrB_PLUS_UINT32) ; LAGr_Vector_dup (&gp, f) ; LAGr_Vector_dup (&mngp, f) ; //-------------------------------------------------------------------------- // main computation //-------------------------------------------------------------------------- bool diff = true ; while (diff) { // hooking & shortcutting LAGr_mxv (mngp, NULL, GrB_MIN_UINT32, GxB_MIN_SECOND_UINT32, S, gp, NULL) ; LAGRAPH_OK (Reduce_assign32 (&f, &mngp, V32, n, nthreads)) ; LAGr_eWiseAdd (f, NULL, GrB_MIN_UINT32, GrB_MIN_UINT32, mngp, gp, NULL); // calculate grandparent LAGr_Vector_extractTuples (NULL, V32, &n, f) ; #pragma omp parallel for num_threads(nthreads2) schedule(static) for (uint32_t i = 0 ; i < n ; i++) { I [i] = (GrB_Index) V32 [i] ; } LAGr_extract (gp_new, NULL, NULL, f, I, n, NULL) ; // check termination LAGr_eWiseMult (mod, NULL, NULL, GrB_NE_UINT32, gp_new, gp, NULL) ; LAGr_reduce (&diff, NULL, GxB_LOR_BOOL_MONOID, mod, NULL) ; // swap gp and gp_new GrB_Vector t = gp ; gp = gp_new ; gp_new = t ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- *result = f ; f = NULL ; LAGRAPH_FREE_ALL ; return (GrB_SUCCESS) ; }
//------------------------------------------------------------------------------ // LAGraph_cc_fastsv4: connected components //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2020 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact [email protected] for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ /** * Code is based on the algorithm described in the following paper * Zhang, Azad, Hu. FastSV: FastSV: A Distributed-Memory Connected Component * Algorithm with Fast Convergence (SIAM PP20) * * Modified by Tim Davis, Texas A&M University **/ // The input matrix A must be symmetric. Self-edges (diagonal entries) are // OK, and are ignored. The values and type of A are ignored; just its // pattern is accessed. // The matrix A must have dimension 2^32 or less. If it is larger, use the // 64-bit version of this method instead. TODO combine the two versions into a // single user-callable code. #define LAGRAPH_EXPERIMENTAL_ASK_BEFORE_BENCHMARKING #include "LAGraph.h" //------------------------------------------------------------------------------ // atomic_min_uint32: compute (*p) = min (*p, value), via atomic update //------------------------------------------------------------------------------ static inline void atomic_min_uint32 ( uint32_t *p, // input/output uint32_t value // input ) { uint32_t old, new ; do { // get the old value at (*p) // old = (*p) ; // compute the new minimum new = LAGRAPH_MIN (old, value) ; } while (!__sync_bool_compare_and_swap (p, old, new)) ; } //------------------------------------------------------------------------------ // Reduce_assign32: w (index) += src, using MIN as the "+=" accum operator //------------------------------------------------------------------------------ // mask = NULL, accumulator = GrB_MIN_UINT32, descriptor = NULL. // Duplicates are summed with the accumulator, which differs from how // GrB_assign works. GrB_assign states that the presence of duplicates results // in undefined behavior. SuiteSparse:GraphBLAS follows the MATLAB rule, which // discards all but the first of the duplicates. TODO: add this to GraphBLAS // as a variant of GrB_assign, either as GxB_assign_accum (or another name), // or as a GxB_* descriptor setting. #define LAGRAPH_FREE_ALL static GrB_Info Reduce_assign32 ( GrB_Vector *w_handle, // vector of size n, all entries present GrB_Vector *s_handle, // vector of size n, all entries present uint32_t *index, // array of size n GrB_Index n, int nthreads ) { GrB_Type w_type, s_type ; GrB_Index w_n, s_n, w_nvals, s_nvals, *w_i, *s_i ; uint32_t *w_x, *s_x ; #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) LAGr_Vector_export_Full (w_handle, &w_type, &w_n, (void **) &w_x, NULL) ; LAGr_Vector_export_Full (s_handle, &s_type, &s_n, (void **) &s_x, NULL) ; #else LAGr_Vector_export (w_handle, &w_type, &w_n, &w_nvals, &w_i, (void **) &w_x, NULL) ; LAGr_Vector_export (s_handle, &s_type, &s_n, &s_nvals, &s_i, (void **) &s_x, NULL) ; #endif #if 0 if (nthreads >= 4) { for (GrB_Index k = 0 ; k < n ; k++) { uint32_t i = index [k] ; atomic_min_uint32 (&(w_x [i]), s_x [k]) ; } } else #endif { // sequential version, to avoid atomics for (GrB_Index k = 0 ; k < n ; k++) { uint32_t i = index [k] ; w_x [i] = LAGRAPH_MIN (w_x [i], s_x [k]) ; } } #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) LAGr_Vector_import_Full (w_handle, w_type, w_n, (void **) &w_x, NULL) ; LAGr_Vector_import_Full (s_handle, s_type, s_n, (void **) &s_x, NULL) ; #else LAGr_Vector_import (w_handle, w_type, w_n, w_nvals, &w_i, (void **) &w_x, NULL) ; LAGr_Vector_import (s_handle, s_type, s_n, s_nvals, &s_i, (void **) &s_x, NULL) ; #endif return (GrB_SUCCESS) ; } #undef LAGRAPH_FREE_ALL #define LAGRAPH_FREE_ALL \ { \ LAGRAPH_FREE (I) ; \ LAGRAPH_FREE (V32) ; \ LAGr_free (&f) ; \ LAGr_free (&gp) ; \ LAGr_free (&mngp) ; \ LAGr_free (&gp_new) ; \ LAGr_free (&mod) ; \ if (sanitize) LAGr_free (&S) ; \ } //------------------------------------------------------------------------------ // LAGraph_cc_fastsv5 //------------------------------------------------------------------------------ GrB_Info LAGraph_cc_fastsv5 ( GrB_Vector *result, // output: array of component identifiers GrB_Matrix A, // input matrix bool sanitize // if true, ensure A is symmetric ) { GrB_Info info ; uint32_t *V32 = NULL ; GrB_Index n, *I = NULL ; GrB_Vector f = NULL, gp_new = NULL, mngp = NULL, mod = NULL, gp = NULL ; GrB_Matrix S = NULL ; //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- LAGr_Matrix_nrows (&n, A) ; if (n > UINT32_MAX) { LAGRAPH_ERROR ("problem too large; use 64-bit version instead", GrB_INVALID_VALUE) ; } if (sanitize) { // S = A | A' LAGr_Matrix_new (&S, GrB_BOOL, n, n) ; LAGr_eWiseAdd (S, NULL, NULL, GrB_LOR, A, A, LAGraph_desc_otoo) ; } else { // Use the input as-is, and assume it is symmetric S = A ; } //-------------------------------------------------------------------------- // initializations //-------------------------------------------------------------------------- // determine # of threads to use for Reduce_assign int nthreads_max = LAGraph_get_nthreads ( ) ; int nthreads = n / (1024*1024) ; nthreads = LAGRAPH_MIN (nthreads, nthreads_max) ; nthreads = LAGRAPH_MAX (nthreads, 1) ; // # of threads to use for typecast int nthreads2 = n / (64*1024) ; nthreads2 = LAGRAPH_MIN (nthreads2, nthreads_max) ; nthreads2 = LAGRAPH_MAX (nthreads2, 1) ; // vectors LAGr_Vector_new (&f, GrB_UINT32, n) ; LAGr_Vector_new (&gp_new, GrB_UINT32, n) ; LAGr_Vector_new (&mod, GrB_BOOL, n) ; // temporary arrays I = LAGraph_malloc (n, sizeof (GrB_Index)) ; V32 = LAGraph_malloc (n, sizeof (uint32_t)) ; // prepare vectors for (GrB_Index i = 0 ; i < n ; i++) { I [i] = i ; V32 [i] = (uint32_t) i ; } LAGr_Vector_build (f, I, V32, n, GrB_PLUS_UINT32) ; LAGr_Vector_dup (&gp, f) ; LAGr_Vector_dup (&mngp, f) ; //-------------------------------------------------------------------------- // main computation //-------------------------------------------------------------------------- bool diff = true ; while (diff) { // hooking & shortcutting LAGr_mxv (mngp, NULL, GrB_MIN_UINT32, GxB_MIN_SECOND_UINT32, S, gp, NULL) ; LAGRAPH_OK (Reduce_assign32 (&f, &mngp, V32, n, nthreads)) ; LAGr_eWiseAdd (f, NULL, GrB_MIN_UINT32, GrB_MIN_UINT32, mngp, gp, NULL); // calculate grandparent LAGr_Vector_extractTuples (NULL, V32, &n, f) ; for (uint32_t i = 0 ; i < n ; i++) { I [i] = (GrB_Index) V32 [i] ; } LAGr_extract (gp_new, NULL, NULL, f, I, n, NULL) ; // check termination LAGr_eWiseMult (mod, NULL, NULL, GrB_NE_UINT32, gp_new, gp, NULL) ; LAGr_reduce (&diff, NULL, GxB_LOR_BOOL_MONOID, mod, NULL) ; // swap gp and gp_new GrB_Vector t = gp ; gp = gp_new ; gp_new = t ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- *result = f ; f = NULL ; LAGRAPH_FREE_ALL ; return (GrB_SUCCESS) ; }
//------------------------------------------------------------------------------ // LAGraph_cc_fastsv4: connected components //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2020 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact [email protected] for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ /** * Code is based on the algorithm described in the following paper * Zhang, Azad, Hu. FastSV: FastSV: A Distributed-Memory Connected Component * Algorithm with Fast Convergence (SIAM PP20) * * Modified by Tim Davis, Texas A&M University **/ // The input matrix A must be symmetric. Self-edges (diagonal entries) are // OK, and are ignored. The values and type of A are ignored; just its // pattern is accessed. // The matrix A must have dimension 2^32 or less. If it is larger, use the // 64-bit version of this method instead. TODO combine the two versions into a // single user-callable code. #define LAGRAPH_EXPERIMENTAL_ASK_BEFORE_BENCHMARKING #include "LAGraph.h" //------------------------------------------------------------------------------ // atomic_min_uint32: compute (*p) = min (*p, value), via atomic update //------------------------------------------------------------------------------ static inline void atomic_min_uint32 ( uint32_t *p, // input/output uint32_t value // input ) { uint32_t old, new ; do { // get the old value at (*p) // #pragma omp atomic read old = (*p) ; // compute the new minimum new = LAGRAPH_MIN (old, value) ; } while (!__sync_bool_compare_and_swap (p, old, new)) ; } //------------------------------------------------------------------------------ // Reduce_assign32: w (index) += src, using MIN as the "+=" accum operator //------------------------------------------------------------------------------ // mask = NULL, accumulator = GrB_MIN_UINT32, descriptor = NULL. // Duplicates are summed with the accumulator, which differs from how // GrB_assign works. GrB_assign states that the presence of duplicates results // in undefined behavior. SuiteSparse:GraphBLAS follows the MATLAB rule, which // discards all but the first of the duplicates. TODO: add this to GraphBLAS // as a variant of GrB_assign, either as GxB_assign_accum (or another name), // or as a GxB_* descriptor setting. #define LAGRAPH_FREE_ALL static GrB_Info Reduce_assign32 ( GrB_Vector *w_handle, // vector of size n, all entries present GrB_Vector *s_handle, // vector of size n, all entries present uint32_t *index, // array of size n GrB_Index n, int nthreads ) { GrB_Type w_type, s_type ; GrB_Index w_n, s_n, w_nvals, s_nvals, *w_i, *s_i ; uint32_t *w_x, *s_x ; #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) LAGr_Vector_export_Full (w_handle, &w_type, &w_n, (void **) &w_x, NULL) ; LAGr_Vector_export_Full (s_handle, &s_type, &s_n, (void **) &s_x, NULL) ; #else LAGr_Vector_export (w_handle, &w_type, &w_n, &w_nvals, &w_i, (void **) &w_x, NULL) ; LAGr_Vector_export (s_handle, &s_type, &s_n, &s_nvals, &s_i, (void **) &s_x, NULL) ; #endif #if 0 if (nthreads >= 4) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (GrB_Index k = 0 ; k < n ; k++) { uint32_t i = index [k] ; atomic_min_uint32 (&(w_x [i]), s_x [k]) ; } } else #endif { // sequential version, to avoid atomics for (GrB_Index k = 0 ; k < n ; k++) { uint32_t i = index [k] ; w_x [i] = LAGRAPH_MIN (w_x [i], s_x [k]) ; } } #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) LAGr_Vector_import_Full (w_handle, w_type, w_n, (void **) &w_x, NULL) ; LAGr_Vector_import_Full (s_handle, s_type, s_n, (void **) &s_x, NULL) ; #else LAGr_Vector_import (w_handle, w_type, w_n, w_nvals, &w_i, (void **) &w_x, NULL) ; LAGr_Vector_import (s_handle, s_type, s_n, s_nvals, &s_i, (void **) &s_x, NULL) ; #endif return (GrB_SUCCESS) ; } #undef LAGRAPH_FREE_ALL #define LAGRAPH_FREE_ALL \ { \ LAGRAPH_FREE (I) ; \ LAGRAPH_FREE (V32) ; \ LAGr_free (&f) ; \ LAGr_free (&gp) ; \ LAGr_free (&mngp) ; \ LAGr_free (&gp_new) ; \ LAGr_free (&mod) ; \ if (sanitize) LAGr_free (&S) ; \ } //------------------------------------------------------------------------------ // LAGraph_cc_fastsv5 //------------------------------------------------------------------------------ GrB_Info LAGraph_cc_fastsv5 ( GrB_Vector *result, // output: array of component identifiers GrB_Matrix A, // input matrix bool sanitize // if true, ensure A is symmetric ) { GrB_Info info ; uint32_t *V32 = NULL ; GrB_Index n, *I = NULL ; GrB_Vector f = NULL, gp_new = NULL, mngp = NULL, mod = NULL, gp = NULL ; GrB_Matrix S = NULL ; //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- LAGr_Matrix_nrows (&n, A) ; if (n > UINT32_MAX) { LAGRAPH_ERROR ("problem too large; use 64-bit version instead", GrB_INVALID_VALUE) ; } if (sanitize) { // S = A | A' LAGr_Matrix_new (&S, GrB_BOOL, n, n) ; LAGr_eWiseAdd (S, NULL, NULL, GrB_LOR, A, A, LAGraph_desc_otoo) ; } else { // Use the input as-is, and assume it is symmetric S = A ; } //-------------------------------------------------------------------------- // initializations //-------------------------------------------------------------------------- // determine # of threads to use for Reduce_assign int nthreads_max = LAGraph_get_nthreads ( ) ; int nthreads = n / (1024*1024) ; nthreads = LAGRAPH_MIN (nthreads, nthreads_max) ; nthreads = LAGRAPH_MAX (nthreads, 1) ; // # of threads to use for typecast int nthreads2 = n / (64*1024) ; nthreads2 = LAGRAPH_MIN (nthreads2, nthreads_max) ; nthreads2 = LAGRAPH_MAX (nthreads2, 1) ; // vectors LAGr_Vector_new (&f, GrB_UINT32, n) ; LAGr_Vector_new (&gp_new, GrB_UINT32, n) ; LAGr_Vector_new (&mod, GrB_BOOL, n) ; // temporary arrays I = LAGraph_malloc (n, sizeof (GrB_Index)) ; V32 = LAGraph_malloc (n, sizeof (uint32_t)) ; // prepare vectors #pragma omp parallel for num_threads(nthreads2) schedule(static) for (GrB_Index i = 0 ; i < n ; i++) { I [i] = i ; V32 [i] = (uint32_t) i ; } LAGr_Vector_build (f, I, V32, n, GrB_PLUS_UINT32) ; LAGr_Vector_dup (&gp, f) ; LAGr_Vector_dup (&mngp, f) ; //-------------------------------------------------------------------------- // main computation //-------------------------------------------------------------------------- bool diff = true ; while (diff) { // hooking & shortcutting LAGr_mxv (mngp, NULL, GrB_MIN_UINT32, GxB_MIN_SECOND_UINT32, S, gp, NULL) ; LAGRAPH_OK (Reduce_assign32 (&f, &mngp, V32, n, nthreads)) ; LAGr_eWiseAdd (f, NULL, GrB_MIN_UINT32, GrB_MIN_UINT32, mngp, gp, NULL); // calculate grandparent LAGr_Vector_extractTuples (NULL, V32, &n, f) ; #pragma omp parallel for num_threads(nthreads2) schedule(static) for (uint32_t i = 0 ; i < n ; i++) { I [i] = (GrB_Index) V32 [i] ; } LAGr_extract (gp_new, NULL, NULL, f, I, n, NULL) ; // check termination LAGr_eWiseMult (mod, NULL, NULL, GrB_NE_UINT32, gp_new, gp, NULL) ; LAGr_reduce (&diff, NULL, GxB_LOR_BOOL_MONOID, mod, NULL) ; // swap gp and gp_new GrB_Vector t = gp ; gp = gp_new ; gp_new = t ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- *result = f ; f = NULL ; LAGRAPH_FREE_ALL ; return (GrB_SUCCESS) ; }
GB_unaryop__abs_int16_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int16_bool // op(A') function: GB_tran__abs_int16_bool // C type: int16_t // A type: bool // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ bool #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int16_bool ( int16_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int16_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int16_bool // op(A') function: GB_tran__abs_int16_bool // C type: int16_t // A type: bool // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ bool #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int16_bool ( int16_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int16_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int16_bool // op(A') function: GB_tran__abs_int16_bool // C type: int16_t // A type: bool // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ bool #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int16_bool ( int16_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int16_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_winograd_transform_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_input_pack4_sse(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[8][8][4]; __m128 _v5_25 = _mm_set1_ps(5.25f); __m128 _vm4_25 = _mm_set1_ps(-4.25f); __m128 _vm1_25 = _mm_set1_ps(-1.25f); __m128 _v0_25 = _mm_set1_ps(0.25f); __m128 _vm2_5 = _mm_set1_ps(-2.5f); __m128 _v0_5 = _mm_set1_ps(0.5f); __m128 _v2 = _mm_set1_ps(2.f); __m128 _v4 = _mm_set1_ps(4.f); // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { __m128 _r00 = _mm_load_ps(r0); __m128 _r01 = _mm_load_ps(r0 + 4); __m128 _r02 = _mm_load_ps(r0 + 4 * 2); __m128 _r03 = _mm_load_ps(r0 + 4 * 3); __m128 _r04 = _mm_load_ps(r0 + 4 * 4); __m128 _r05 = _mm_load_ps(r0 + 4 * 5); __m128 _r06 = _mm_load_ps(r0 + 4 * 6); __m128 _r07 = _mm_load_ps(r0 + 4 * 7); __m128 _tmp0m = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_r04, _r02), _mm_sub_ps(_r00, _r06)); __m128 _tmp7m = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_r03, _r05), _mm_sub_ps(_r07, _r01)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[7][m], _tmp7m); __m128 _tmp12a = _mm_comp_fmadd_ps(_vm4_25, _r04, _mm_add_ps(_r02, _r06)); __m128 _tmp12b = _mm_comp_fmadd_ps(_vm4_25, _r03, _mm_add_ps(_r01, _r05)); __m128 _tmp1m = _mm_add_ps(_tmp12a, _tmp12b); __m128 _tmp2m = _mm_sub_ps(_tmp12a, _tmp12b); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); __m128 _tmp34a = _mm_comp_fmadd_ps(_vm1_25, _r04, _mm_comp_fmadd_ps(_v0_25, _r02, _r06)); __m128 _tmp34b = _mm_comp_fmadd_ps(_v2, _r05, _mm_comp_fmadd_ps(_vm2_5, _r03, _mm_mul_ps(_r01, _v0_5))); __m128 _tmp3m = _mm_add_ps(_tmp34a, _tmp34b); __m128 _tmp4m = _mm_sub_ps(_tmp34a, _tmp34b); _mm_store_ps(tmp[3][m], _tmp3m); _mm_store_ps(tmp[4][m], _tmp4m); __m128 _tmp56a = _mm_comp_fmadd_ps(_v4, _mm_comp_fmadd_ps(_vm1_25, _r04, _r02), _r06); __m128 _tmp56b = _mm_comp_fmadd_ps(_v0_5, _r05, _mm_comp_fmadd_ps(_vm2_5, _r03, _mm_mul_ps(_r01, _v2))); __m128 _tmp5m = _mm_add_ps(_tmp56a, _tmp56b); __m128 _tmp6m = _mm_sub_ps(_tmp56a, _tmp56b); _mm_store_ps(tmp[5][m], _tmp5m); _mm_store_ps(tmp[6][m], _tmp6m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5; float* r0_tm_6 = r0_tm_0 + tiles * 4 * 6; float* r0_tm_7 = r0_tm_0 + tiles * 4 * 7; for (int m = 0; m < 8; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _tmp06 = _mm_load_ps(tmp[m][6]); __m128 _tmp07 = _mm_load_ps(tmp[m][7]); __m128 _r0tm0 = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_tmp04, _tmp02), _mm_sub_ps(_tmp00, _tmp06)); __m128 _r0tm7 = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_tmp03, _tmp05), _mm_sub_ps(_tmp07, _tmp01)); __m128 _tmp12a = _mm_comp_fmadd_ps(_vm4_25, _tmp04, _mm_add_ps(_tmp02, _tmp06)); __m128 _tmp12b = _mm_comp_fmadd_ps(_vm4_25, _tmp03, _mm_add_ps(_tmp01, _tmp05)); __m128 _r0tm1 = _mm_add_ps(_tmp12a, _tmp12b); __m128 _r0tm2 = _mm_sub_ps(_tmp12a, _tmp12b); __m128 _tmp34a = _mm_comp_fmadd_ps(_vm1_25, _tmp04, _mm_comp_fmadd_ps(_v0_25, _tmp02, _tmp06)); __m128 _tmp34b = _mm_comp_fmadd_ps(_v2, _tmp05, _mm_comp_fmadd_ps(_vm2_5, _tmp03, _mm_mul_ps(_tmp01, _v0_5))); __m128 _r0tm3 = _mm_add_ps(_tmp34a, _tmp34b); __m128 _r0tm4 = _mm_sub_ps(_tmp34a, _tmp34b); __m128 _tmp56a = _mm_comp_fmadd_ps(_v4, _mm_comp_fmadd_ps(_vm1_25, _tmp04, _tmp02), _tmp06); __m128 _tmp56b = _mm_comp_fmadd_ps(_v0_5, _tmp05, _mm_comp_fmadd_ps(_vm2_5, _tmp03, _mm_mul_ps(_tmp01, _v2))); __m128 _r0tm5 = _mm_add_ps(_tmp56a, _tmp56b); __m128 _r0tm6 = _mm_sub_ps(_tmp56a, _tmp56b); _mm_store_ps(r0_tm_0, _r0tm0); _mm_store_ps(r0_tm_1, _r0tm1); _mm_store_ps(r0_tm_2, _r0tm2); _mm_store_ps(r0_tm_3, _r0tm3); _mm_store_ps(r0_tm_4, _r0tm4); _mm_store_ps(r0_tm_5, _r0tm5); _mm_store_ps(r0_tm_6, _r0tm6); _mm_store_ps(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 4 * 8; r0_tm_1 += tiles * 4 * 8; r0_tm_2 += tiles * 4 * 8; r0_tm_3 += tiles * 4 * 8; r0_tm_4 += tiles * 4 * 8; r0_tm_5 += tiles * 4 * 8; r0_tm_6 += tiles * 4 * 8; r0_tm_7 += tiles * 4 * 8; } } } } } static void conv3x3s1_winograd63_transform_output_pack4_sse(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); __m128 _bias0 = biasptr ? _mm_loadu_ps(biasptr + p * 4) : _mm_setzero_ps(); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[6][8][4]; __m128 _v32 = _mm_set1_ps(32.f); __m128 _v16 = _mm_set1_ps(16.f); __m128 _v8 = _mm_set1_ps(8.f); __m128 _v4 = _mm_set1_ps(4.f); __m128 _v2 = _mm_set1_ps(2.f); // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5; const float* output0_tm_6 = output0_tm_0 + tiles * 4 * 6; const float* output0_tm_7 = output0_tm_0 + tiles * 4 * 7; float* output0 = out0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { __m128 _out0tm0 = _mm_load_ps(output0_tm_0); __m128 _out0tm1 = _mm_load_ps(output0_tm_1); __m128 _out0tm2 = _mm_load_ps(output0_tm_2); __m128 _out0tm3 = _mm_load_ps(output0_tm_3); __m128 _out0tm4 = _mm_load_ps(output0_tm_4); __m128 _out0tm5 = _mm_load_ps(output0_tm_5); __m128 _out0tm6 = _mm_load_ps(output0_tm_6); __m128 _out0tm7 = _mm_load_ps(output0_tm_7); __m128 _tmp024a = _mm_add_ps(_out0tm1, _out0tm2); __m128 _tmp135a = _mm_sub_ps(_out0tm1, _out0tm2); __m128 _tmp024b = _mm_add_ps(_out0tm3, _out0tm4); __m128 _tmp135b = _mm_sub_ps(_out0tm3, _out0tm4); __m128 _tmp024c = _mm_add_ps(_out0tm5, _out0tm6); __m128 _tmp135c = _mm_sub_ps(_out0tm5, _out0tm6); __m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _tmp024a), _mm_comp_fmadd_ps(_v32, _tmp024c, _tmp024b)); __m128 _tmp2m = _mm_comp_fmadd_ps(_v8, _tmp024c, _mm_comp_fmadd_ps(_v4, _tmp024b, _tmp024a)); __m128 _tmp4m = _mm_comp_fmadd_ps(_v2, _tmp024c, _mm_comp_fmadd_ps(_v16, _tmp024b, _tmp024a)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[4][m], _tmp4m); __m128 _tmp1m = _mm_comp_fmadd_ps(_v16, _tmp135c, _mm_comp_fmadd_ps(_v2, _tmp135b, _tmp135a)); __m128 _tmp3m = _mm_comp_fmadd_ps(_v4, _tmp135c, _mm_comp_fmadd_ps(_v8, _tmp135b, _tmp135a)); __m128 _tmp5m = _mm_add_ps(_mm_add_ps(_out0tm7, _tmp135a), _mm_comp_fmadd_ps(_v32, _tmp135b, _tmp135c)); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[3][m], _tmp3m); _mm_store_ps(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 4 * 8; output0_tm_1 += tiles * 4 * 8; output0_tm_2 += tiles * 4 * 8; output0_tm_3 += tiles * 4 * 8; output0_tm_4 += tiles * 4 * 8; output0_tm_5 += tiles * 4 * 8; output0_tm_6 += tiles * 4 * 8; output0_tm_7 += tiles * 4 * 8; } for (int m = 0; m < 6; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _tmp06 = _mm_load_ps(tmp[m][6]); __m128 _tmp07 = _mm_load_ps(tmp[m][7]); __m128 _tmp024a = _mm_add_ps(_tmp01, _tmp02); __m128 _tmp135a = _mm_sub_ps(_tmp01, _tmp02); __m128 _tmp024b = _mm_add_ps(_tmp03, _tmp04); __m128 _tmp135b = _mm_sub_ps(_tmp03, _tmp04); __m128 _tmp024c = _mm_add_ps(_tmp05, _tmp06); __m128 _tmp135c = _mm_sub_ps(_tmp05, _tmp06); __m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp024a), _mm_comp_fmadd_ps(_v32, _tmp024c, _tmp024b))); __m128 _out02 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v8, _tmp024c, _mm_comp_fmadd_ps(_v4, _tmp024b, _tmp024a))); __m128 _out04 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v2, _tmp024c, _mm_comp_fmadd_ps(_v16, _tmp024b, _tmp024a))); _mm_store_ps(output0, _out00); _mm_store_ps(output0 + 4 * 2, _out02); _mm_store_ps(output0 + 4 * 4, _out04); __m128 _out01 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v16, _tmp135c, _mm_comp_fmadd_ps(_v2, _tmp135b, _tmp135a))); __m128 _out03 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v4, _tmp135c, _mm_comp_fmadd_ps(_v8, _tmp135b, _tmp135a))); __m128 _out05 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp07, _tmp135a), _mm_comp_fmadd_ps(_v32, _tmp135b, _tmp135c))); _mm_store_ps(output0 + 4, _out01); _mm_store_ps(output0 + 4 * 3, _out03); _mm_store_ps(output0 + 4 * 5, _out05); output0 += outw * 4; } } } } } static void conv3x3s1_winograd43_transform_input_pack4_sse(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[6][6][4]; __m128 _vm5 = _mm_set1_ps(-5.f); __m128 _vm4 = _mm_set1_ps(-4.f); __m128 _v4 = _mm_set1_ps(4.f); __m128 _vm2 = _mm_set1_ps(-2.f); __m128 _v2 = _mm_set1_ps(2.f); // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { __m128 _r00 = _mm_load_ps(r0); __m128 _r01 = _mm_load_ps(r0 + 4); __m128 _r02 = _mm_load_ps(r0 + 4 * 2); __m128 _r03 = _mm_load_ps(r0 + 4 * 3); __m128 _r04 = _mm_load_ps(r0 + 4 * 4); __m128 _r05 = _mm_load_ps(r0 + 4 * 5); __m128 _tmp0m = _mm_comp_fmadd_ps(_vm5, _r02, _mm_comp_fmadd_ps(_v4, _r00, _r04)); __m128 _tmp1m = _mm_comp_fmadd_ps(_vm4, _mm_add_ps(_r01, _r02), _mm_add_ps(_r04, _r03)); __m128 _tmp2m = _mm_comp_fmadd_ps(_v4, _mm_sub_ps(_r01, _r02), _mm_sub_ps(_r04, _r03)); __m128 _tmp3m = _mm_comp_fmadd_ps(_vm2, _mm_sub_ps(_r01, _r03), _mm_sub_ps(_r04, _r02)); __m128 _tmp4m = _mm_comp_fmadd_ps(_v2, _mm_sub_ps(_r01, _r03), _mm_sub_ps(_r04, _r02)); __m128 _tmp5m = _mm_comp_fmadd_ps(_vm5, _r03, _mm_comp_fmadd_ps(_v4, _r01, _r05)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[3][m], _tmp3m); _mm_store_ps(tmp[4][m], _tmp4m); _mm_store_ps(tmp[5][m], _tmp5m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5; for (int m = 0; m < 6; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _r0tm0 = _mm_comp_fmadd_ps(_vm5, _tmp02, _mm_comp_fmadd_ps(_v4, _tmp00, _tmp04)); __m128 _r0tm1 = _mm_comp_fmadd_ps(_vm4, _mm_add_ps(_tmp01, _tmp02), _mm_add_ps(_tmp04, _tmp03)); __m128 _r0tm2 = _mm_comp_fmadd_ps(_v4, _mm_sub_ps(_tmp01, _tmp02), _mm_sub_ps(_tmp04, _tmp03)); __m128 _r0tm3 = _mm_comp_fmadd_ps(_vm2, _mm_sub_ps(_tmp01, _tmp03), _mm_sub_ps(_tmp04, _tmp02)); __m128 _r0tm4 = _mm_comp_fmadd_ps(_v2, _mm_sub_ps(_tmp01, _tmp03), _mm_sub_ps(_tmp04, _tmp02)); __m128 _r0tm5 = _mm_comp_fmadd_ps(_vm5, _tmp03, _mm_comp_fmadd_ps(_v4, _tmp01, _tmp05)); _mm_store_ps(r0_tm_0, _r0tm0); _mm_store_ps(r0_tm_1, _r0tm1); _mm_store_ps(r0_tm_2, _r0tm2); _mm_store_ps(r0_tm_3, _r0tm3); _mm_store_ps(r0_tm_4, _r0tm4); _mm_store_ps(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 4 * 6; r0_tm_1 += tiles * 4 * 6; r0_tm_2 += tiles * 4 * 6; r0_tm_3 += tiles * 4 * 6; r0_tm_4 += tiles * 4 * 6; r0_tm_5 += tiles * 4 * 6; } } } } } static void conv3x3s1_winograd43_transform_output_pack4_sse(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); __m128 _bias0 = biasptr ? _mm_loadu_ps(biasptr + p * 4) : _mm_setzero_ps(); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[4][6][4]; __m128 _v2 = _mm_set1_ps(2.f); __m128 _v4 = _mm_set1_ps(4.f); __m128 _v8 = _mm_set1_ps(8.f); // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5; float* output0 = out0.row(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { __m128 _out0tm0 = _mm_load_ps(output0_tm_0); __m128 _out0tm1 = _mm_load_ps(output0_tm_1); __m128 _out0tm2 = _mm_load_ps(output0_tm_2); __m128 _out0tm3 = _mm_load_ps(output0_tm_3); __m128 _out0tm4 = _mm_load_ps(output0_tm_4); __m128 _out0tm5 = _mm_load_ps(output0_tm_5); __m128 _tmp02a = _mm_add_ps(_out0tm1, _out0tm2); __m128 _tmp13a = _mm_sub_ps(_out0tm1, _out0tm2); __m128 _tmp02b = _mm_add_ps(_out0tm3, _out0tm4); __m128 _tmp13b = _mm_sub_ps(_out0tm3, _out0tm4); __m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _tmp02a), _tmp02b); __m128 _tmp1m = _mm_comp_fmadd_ps(_v2, _tmp13b, _tmp13a); __m128 _tmp2m = _mm_comp_fmadd_ps(_v4, _tmp02b, _tmp02a); __m128 _tmp3m = _mm_comp_fmadd_ps(_v8, _tmp13b, _mm_add_ps(_out0tm5, _tmp13a)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 4 * 6; output0_tm_1 += tiles * 4 * 6; output0_tm_2 += tiles * 4 * 6; output0_tm_3 += tiles * 4 * 6; output0_tm_4 += tiles * 4 * 6; output0_tm_5 += tiles * 4 * 6; } for (int m = 0; m < 4; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _tmp02a = _mm_add_ps(_tmp01, _tmp02); __m128 _tmp13a = _mm_sub_ps(_tmp01, _tmp02); __m128 _tmp02b = _mm_add_ps(_tmp03, _tmp04); __m128 _tmp13b = _mm_sub_ps(_tmp03, _tmp04); __m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp02a), _tmp02b)); __m128 _out01 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v2, _tmp13b, _tmp13a)); __m128 _out02 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v4, _tmp02b, _tmp02a)); __m128 _out03 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v8, _tmp13b, _mm_add_ps(_tmp05, _tmp13a))); _mm_store_ps(output0, _out00); _mm_store_ps(output0 + 4, _out01); _mm_store_ps(output0 + 4 * 2, _out02); _mm_store_ps(output0 + 4 * 3, _out03); output0 += outw * 4; } } } } } static void conv3x3s1_winograd23_transform_input_pack4_sse(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 2; const int h_tiles = (h - 2) / 2; const int tiles = w_tiles * h_tiles; // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; // 0 = r00 - r02 // 1 = r01 + r02 // 2 = r02 - r01 // 3 = r03 - r01 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[4][4][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 2) + (j * 2) * 4; for (int m = 0; m < 4; m++) { __m128 _r00 = _mm_load_ps(r0); __m128 _r01 = _mm_load_ps(r0 + 4); __m128 _r02 = _mm_load_ps(r0 + 4 * 2); __m128 _r03 = _mm_load_ps(r0 + 4 * 3); __m128 _tmp0m = _mm_sub_ps(_r00, _r02); __m128 _tmp1m = _mm_add_ps(_r01, _r02); __m128 _tmp2m = _mm_sub_ps(_r02, _r01); __m128 _tmp3m = _mm_sub_ps(_r03, _r01); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[3][m], _tmp3m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3; for (int m = 0; m < 4; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _r0tm0 = _mm_sub_ps(_tmp00, _tmp02); __m128 _r0tm1 = _mm_add_ps(_tmp01, _tmp02); __m128 _r0tm2 = _mm_sub_ps(_tmp02, _tmp01); __m128 _r0tm3 = _mm_sub_ps(_tmp03, _tmp01); _mm_store_ps(r0_tm_0, _r0tm0); _mm_store_ps(r0_tm_1, _r0tm1); _mm_store_ps(r0_tm_2, _r0tm2); _mm_store_ps(r0_tm_3, _r0tm3); r0_tm_0 += tiles * 4 * 4; r0_tm_1 += tiles * 4 * 4; r0_tm_2 += tiles * 4 * 4; r0_tm_3 += tiles * 4 * 4; } } } } } static void conv3x3s1_winograd23_transform_output_pack4_sse(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 2; const int h_tiles = outh / 2; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r00 + r01 + r02 // 1 = r01 - r02 + r03 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); __m128 _bias0 = biasptr ? _mm_loadu_ps(biasptr + p * 4) : _mm_setzero_ps(); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[2][4][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3; float* output0 = out0.row(i * 2) + (j * 2) * 4; for (int m = 0; m < 4; m++) { __m128 _out0tm0 = _mm_load_ps(output0_tm_0); __m128 _out0tm1 = _mm_load_ps(output0_tm_1); __m128 _out0tm2 = _mm_load_ps(output0_tm_2); __m128 _out0tm3 = _mm_load_ps(output0_tm_3); __m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _out0tm1), _out0tm2); __m128 _tmp1m = _mm_add_ps(_mm_sub_ps(_out0tm1, _out0tm2), _out0tm3); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[1][m], _tmp1m); output0_tm_0 += tiles * 4 * 4; output0_tm_1 += tiles * 4 * 4; output0_tm_2 += tiles * 4 * 4; output0_tm_3 += tiles * 4 * 4; } for (int m = 0; m < 2; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp01), _tmp02)); __m128 _out01 = _mm_add_ps(_bias0, _mm_add_ps(_mm_sub_ps(_tmp01, _tmp02), _tmp03)); _mm_store_ps(output0, _out00); _mm_store_ps(output0 + 4, _out01); output0 += outw * 4; } } } } }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2022 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_input_pack4_sse(const Mat & bottom_blob, Mat & bottom_blob_tm, const Option & opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; //const float itm[8][8] = { //{1.0 f, 0.0 f, -5.25 f, 0.00 f, 5.25 f, 0.00 f, -1.0 f, 0.0 f}, // //{0.0 f, 1.0 f, 1.00 f, -4.25 f, -4.25 f, 1.00 f, 1.0 f, 0.0 f}, //{0.0 f, -1.0 f, 1.00 f, 4.25 f, -4.25 f, -1.00 f, 1.0 f, 0.0 f}, // //{0.0 f, 0.5 f, 0.25 f, -2.50 f, -1.25 f, 2.00 f, 1.0 f, 0.0 f}, //{0.0 f, -0.5 f, 0.25 f, 2.50 f, -1.25 f, -2.00 f, 1.0 f, 0.0 f}, // //{0.0 f, 2.0 f, 4.00 f, -2.50 f, -5.00 f, 0.50 f, 1.0 f, 0.0 f}, //{0.0 f, -2.0 f, 4.00 f, 2.50 f, -5.00 f, -0.50 f, 1.0 f, 0.0 f}, // //{0.0 f, -1.0 f, 0.00 f, 5.25 f, 0.00 f, -5.25 f, 0.0 f, 1.0 f} //}; //0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[8][8][4]; __m128 _v5_25 = _mm_set1_ps(5.25 f); __m128 _vm4_25 = _mm_set1_ps(-4.25 f); __m128 _vm1_25 = _mm_set1_ps(-1.25 f); __m128 _v0_25 = _mm_set1_ps(0.25 f); __m128 _vm2_5 = _mm_set1_ps(-2.5 f); __m128 _v0_5 = _mm_set1_ps(0.5 f); __m128 _v2 = _mm_set1_ps(2. f); __m128 _v4 = _mm_set1_ps(4. f); //tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float *r0 = img0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { __m128 _r00 = _mm_load_ps(r0); __m128 _r01 = _mm_load_ps(r0 + 4); __m128 _r02 = _mm_load_ps(r0 + 4 * 2); __m128 _r03 = _mm_load_ps(r0 + 4 * 3); __m128 _r04 = _mm_load_ps(r0 + 4 * 4); __m128 _r05 = _mm_load_ps(r0 + 4 * 5); __m128 _r06 = _mm_load_ps(r0 + 4 * 6); __m128 _r07 = _mm_load_ps(r0 + 4 * 7); __m128 _tmp0m = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_r04, _r02), _mm_sub_ps(_r00, _r06)); __m128 _tmp7m = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_r03, _r05), _mm_sub_ps(_r07, _r01)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[7][m], _tmp7m); __m128 _tmp12a = _mm_comp_fmadd_ps(_vm4_25, _r04, _mm_add_ps(_r02, _r06)); __m128 _tmp12b = _mm_comp_fmadd_ps(_vm4_25, _r03, _mm_add_ps(_r01, _r05)); __m128 _tmp1m = _mm_add_ps(_tmp12a, _tmp12b); __m128 _tmp2m = _mm_sub_ps(_tmp12a, _tmp12b); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); __m128 _tmp34a = _mm_comp_fmadd_ps(_vm1_25, _r04, _mm_comp_fmadd_ps(_v0_25, _r02, _r06)); __m128 _tmp34b = _mm_comp_fmadd_ps(_v2, _r05, _mm_comp_fmadd_ps(_vm2_5, _r03, _mm_mul_ps(_r01, _v0_5))); __m128 _tmp3m = _mm_add_ps(_tmp34a, _tmp34b); __m128 _tmp4m = _mm_sub_ps(_tmp34a, _tmp34b); _mm_store_ps(tmp[3][m], _tmp3m); _mm_store_ps(tmp[4][m], _tmp4m); __m128 _tmp56a = _mm_comp_fmadd_ps(_v4, _mm_comp_fmadd_ps(_vm1_25, _r04, _r02), _r06); __m128 _tmp56b = _mm_comp_fmadd_ps(_v0_5, _r05, _mm_comp_fmadd_ps(_vm2_5, _r03, _mm_mul_ps(_r01, _v2))); __m128 _tmp5m = _mm_add_ps(_tmp56a, _tmp56b); __m128 _tmp6m = _mm_sub_ps(_tmp56a, _tmp56b); _mm_store_ps(tmp[5][m], _tmp5m); _mm_store_ps(tmp[6][m], _tmp6m); r0 += w * 4; } float *r0_tm_0 = (float *)img0_tm + (i * w_tiles + j) * 4; float *r0_tm_1 = r0_tm_0 + tiles * 4; float *r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float *r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float *r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float *r0_tm_5 = r0_tm_0 + tiles * 4 * 5; float *r0_tm_6 = r0_tm_0 + tiles * 4 * 6; float *r0_tm_7 = r0_tm_0 + tiles * 4 * 7; for (int m = 0; m < 8; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _tmp06 = _mm_load_ps(tmp[m][6]); __m128 _tmp07 = _mm_load_ps(tmp[m][7]); __m128 _r0tm0 = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_tmp04, _tmp02), _mm_sub_ps(_tmp00, _tmp06)); __m128 _r0tm7 = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_tmp03, _tmp05), _mm_sub_ps(_tmp07, _tmp01)); __m128 _tmp12a = _mm_comp_fmadd_ps(_vm4_25, _tmp04, _mm_add_ps(_tmp02, _tmp06)); __m128 _tmp12b = _mm_comp_fmadd_ps(_vm4_25, _tmp03, _mm_add_ps(_tmp01, _tmp05)); __m128 _r0tm1 = _mm_add_ps(_tmp12a, _tmp12b); __m128 _r0tm2 = _mm_sub_ps(_tmp12a, _tmp12b); __m128 _tmp34a = _mm_comp_fmadd_ps(_vm1_25, _tmp04, _mm_comp_fmadd_ps(_v0_25, _tmp02, _tmp06)); __m128 _tmp34b = _mm_comp_fmadd_ps(_v2, _tmp05, _mm_comp_fmadd_ps(_vm2_5, _tmp03, _mm_mul_ps(_tmp01, _v0_5))); __m128 _r0tm3 = _mm_add_ps(_tmp34a, _tmp34b); __m128 _r0tm4 = _mm_sub_ps(_tmp34a, _tmp34b); __m128 _tmp56a = _mm_comp_fmadd_ps(_v4, _mm_comp_fmadd_ps(_vm1_25, _tmp04, _tmp02), _tmp06); __m128 _tmp56b = _mm_comp_fmadd_ps(_v0_5, _tmp05, _mm_comp_fmadd_ps(_vm2_5, _tmp03, _mm_mul_ps(_tmp01, _v2))); __m128 _r0tm5 = _mm_add_ps(_tmp56a, _tmp56b); __m128 _r0tm6 = _mm_sub_ps(_tmp56a, _tmp56b); _mm_store_ps(r0_tm_0, _r0tm0); _mm_store_ps(r0_tm_1, _r0tm1); _mm_store_ps(r0_tm_2, _r0tm2); _mm_store_ps(r0_tm_3, _r0tm3); _mm_store_ps(r0_tm_4, _r0tm4); _mm_store_ps(r0_tm_5, _r0tm5); _mm_store_ps(r0_tm_6, _r0tm6); _mm_store_ps(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 4 * 8; r0_tm_1 += tiles * 4 * 8; r0_tm_2 += tiles * 4 * 8; r0_tm_3 += tiles * 4 * 8; r0_tm_4 += tiles * 4 * 8; r0_tm_5 += tiles * 4 * 8; r0_tm_6 += tiles * 4 * 8; r0_tm_7 += tiles * 4 * 8; } } } } } static void conv3x3s1_winograd63_transform_output_pack4_sse(const Mat & top_blob_tm, Mat & top_blob, const Mat & bias, const Option & opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const float *biasptr = bias; //const float otm[6][8] = { //{1.0 f, 1.0 f, 1.0 f, 1.0 f, 1.0 f, 32.0 f, 32.0 f, 0.0 f}, //{0.0 f, 1.0 f, -1.0 f, 2.0 f, -2.0 f, 16.0 f, -16.0 f, 0.0 f}, //{0.0 f, 1.0 f, 1.0 f, 4.0 f, 4.0 f, 8.0 f, 8.0 f, 0.0 f}, //{0.0 f, 1.0 f, -1.0 f, 8.0 f, -8.0 f, 4.0 f, -4.0 f, 0.0 f}, //{0.0 f, 1.0 f, 1.0 f, 16.0 f, 16.0 f, 2.0 f, 2.0 f, 0.0 f}, //{0.0 f, 1.0 f, -1.0 f, 32.0 f, -32.0 f, 1.0 f, -1.0 f, 1.0 f} //}; //0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16 + (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32 + (r5 - r6) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); __m128 _bias0 = biasptr ? _mm_loadu_ps(biasptr + p * 4) : _mm_setzero_ps(); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[6][8][4]; __m128 _v32 = _mm_set1_ps(32. f); __m128 _v16 = _mm_set1_ps(16. f); __m128 _v8 = _mm_set1_ps(8. f); __m128 _v4 = _mm_set1_ps(4. f); __m128 _v2 = _mm_set1_ps(2. f); //tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float *output0_tm_0 = (const float *)out0_tm + (i * w_tiles + j) * 4; const float *output0_tm_1 = output0_tm_0 + tiles * 4; const float *output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float *output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float *output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float *output0_tm_5 = output0_tm_0 + tiles * 4 * 5; const float *output0_tm_6 = output0_tm_0 + tiles * 4 * 6; const float *output0_tm_7 = output0_tm_0 + tiles * 4 * 7; float *output0 = out0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { __m128 _out0tm0 = _mm_load_ps(output0_tm_0); __m128 _out0tm1 = _mm_load_ps(output0_tm_1); __m128 _out0tm2 = _mm_load_ps(output0_tm_2); __m128 _out0tm3 = _mm_load_ps(output0_tm_3); __m128 _out0tm4 = _mm_load_ps(output0_tm_4); __m128 _out0tm5 = _mm_load_ps(output0_tm_5); __m128 _out0tm6 = _mm_load_ps(output0_tm_6); __m128 _out0tm7 = _mm_load_ps(output0_tm_7); __m128 _tmp024a = _mm_add_ps(_out0tm1, _out0tm2); __m128 _tmp135a = _mm_sub_ps(_out0tm1, _out0tm2); __m128 _tmp024b = _mm_add_ps(_out0tm3, _out0tm4); __m128 _tmp135b = _mm_sub_ps(_out0tm3, _out0tm4); __m128 _tmp024c = _mm_add_ps(_out0tm5, _out0tm6); __m128 _tmp135c = _mm_sub_ps(_out0tm5, _out0tm6); __m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _tmp024a), _mm_comp_fmadd_ps(_v32, _tmp024c, _tmp024b)); __m128 _tmp2m = _mm_comp_fmadd_ps(_v8, _tmp024c, _mm_comp_fmadd_ps(_v4, _tmp024b, _tmp024a)); __m128 _tmp4m = _mm_comp_fmadd_ps(_v2, _tmp024c, _mm_comp_fmadd_ps(_v16, _tmp024b, _tmp024a)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[4][m], _tmp4m); __m128 _tmp1m = _mm_comp_fmadd_ps(_v16, _tmp135c, _mm_comp_fmadd_ps(_v2, _tmp135b, _tmp135a)); __m128 _tmp3m = _mm_comp_fmadd_ps(_v4, _tmp135c, _mm_comp_fmadd_ps(_v8, _tmp135b, _tmp135a)); __m128 _tmp5m = _mm_add_ps(_mm_add_ps(_out0tm7, _tmp135a), _mm_comp_fmadd_ps(_v32, _tmp135b, _tmp135c)); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[3][m], _tmp3m); _mm_store_ps(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 4 * 8; output0_tm_1 += tiles * 4 * 8; output0_tm_2 += tiles * 4 * 8; output0_tm_3 += tiles * 4 * 8; output0_tm_4 += tiles * 4 * 8; output0_tm_5 += tiles * 4 * 8; output0_tm_6 += tiles * 4 * 8; output0_tm_7 += tiles * 4 * 8; } for (int m = 0; m < 6; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _tmp06 = _mm_load_ps(tmp[m][6]); __m128 _tmp07 = _mm_load_ps(tmp[m][7]); __m128 _tmp024a = _mm_add_ps(_tmp01, _tmp02); __m128 _tmp135a = _mm_sub_ps(_tmp01, _tmp02); __m128 _tmp024b = _mm_add_ps(_tmp03, _tmp04); __m128 _tmp135b = _mm_sub_ps(_tmp03, _tmp04); __m128 _tmp024c = _mm_add_ps(_tmp05, _tmp06); __m128 _tmp135c = _mm_sub_ps(_tmp05, _tmp06); __m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp024a), _mm_comp_fmadd_ps(_v32, _tmp024c, _tmp024b))); __m128 _out02 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v8, _tmp024c, _mm_comp_fmadd_ps(_v4, _tmp024b, _tmp024a))); __m128 _out04 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v2, _tmp024c, _mm_comp_fmadd_ps(_v16, _tmp024b, _tmp024a))); _mm_store_ps(output0, _out00); _mm_store_ps(output0 + 4 * 2, _out02); _mm_store_ps(output0 + 4 * 4, _out04); __m128 _out01 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v16, _tmp135c, _mm_comp_fmadd_ps(_v2, _tmp135b, _tmp135a))); __m128 _out03 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v4, _tmp135c, _mm_comp_fmadd_ps(_v8, _tmp135b, _tmp135a))); __m128 _out05 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp07, _tmp135a), _mm_comp_fmadd_ps(_v32, _tmp135b, _tmp135c))); _mm_store_ps(output0 + 4, _out01); _mm_store_ps(output0 + 4 * 3, _out03); _mm_store_ps(output0 + 4 * 5, _out05); output0 += outw * 4; } } } } } static void conv3x3s1_winograd43_transform_input_pack4_sse(const Mat & bottom_blob, Mat & bottom_blob_tm, const Option & opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; //const float itm[6][6] = { //{4.0 f, 0.0 f, -5.0 f, 0.0 f, 1.0 f, 0.0 f}, //{0.0 f, -4.0 f, -4.0 f, 1.0 f, 1.0 f, 0.0 f}, //{0.0 f, 4.0 f, -4.0 f, -1.0 f, 1.0 f, 0.0 f}, //{0.0 f, -2.0 f, -1.0 f, 2.0 f, 1.0 f, 0.0 f}, //{0.0 f, 2.0 f, -1.0 f, -2.0 f, 1.0 f, 0.0 f}, //{0.0 f, 4.0 f, 0.0 f, -5.0 f, 0.0 f, 1.0 f} //}; //0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[6][6][4]; __m128 _vm5 = _mm_set1_ps(-5. f); __m128 _vm4 = _mm_set1_ps(-4. f); __m128 _v4 = _mm_set1_ps(4. f); __m128 _vm2 = _mm_set1_ps(-2. f); __m128 _v2 = _mm_set1_ps(2. f); //tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float *r0 = img0.row(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { __m128 _r00 = _mm_load_ps(r0); __m128 _r01 = _mm_load_ps(r0 + 4); __m128 _r02 = _mm_load_ps(r0 + 4 * 2); __m128 _r03 = _mm_load_ps(r0 + 4 * 3); __m128 _r04 = _mm_load_ps(r0 + 4 * 4); __m128 _r05 = _mm_load_ps(r0 + 4 * 5); __m128 _tmp0m = _mm_comp_fmadd_ps(_vm5, _r02, _mm_comp_fmadd_ps(_v4, _r00, _r04)); __m128 _tmp1m = _mm_comp_fmadd_ps(_vm4, _mm_add_ps(_r01, _r02), _mm_add_ps(_r04, _r03)); __m128 _tmp2m = _mm_comp_fmadd_ps(_v4, _mm_sub_ps(_r01, _r02), _mm_sub_ps(_r04, _r03)); __m128 _tmp3m = _mm_comp_fmadd_ps(_vm2, _mm_sub_ps(_r01, _r03), _mm_sub_ps(_r04, _r02)); __m128 _tmp4m = _mm_comp_fmadd_ps(_v2, _mm_sub_ps(_r01, _r03), _mm_sub_ps(_r04, _r02)); __m128 _tmp5m = _mm_comp_fmadd_ps(_vm5, _r03, _mm_comp_fmadd_ps(_v4, _r01, _r05)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[3][m], _tmp3m); _mm_store_ps(tmp[4][m], _tmp4m); _mm_store_ps(tmp[5][m], _tmp5m); r0 += w * 4; } float *r0_tm_0 = (float *)img0_tm + (i * w_tiles + j) * 4; float *r0_tm_1 = r0_tm_0 + tiles * 4; float *r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float *r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float *r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float *r0_tm_5 = r0_tm_0 + tiles * 4 * 5; for (int m = 0; m < 6; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _r0tm0 = _mm_comp_fmadd_ps(_vm5, _tmp02, _mm_comp_fmadd_ps(_v4, _tmp00, _tmp04)); __m128 _r0tm1 = _mm_comp_fmadd_ps(_vm4, _mm_add_ps(_tmp01, _tmp02), _mm_add_ps(_tmp04, _tmp03)); __m128 _r0tm2 = _mm_comp_fmadd_ps(_v4, _mm_sub_ps(_tmp01, _tmp02), _mm_sub_ps(_tmp04, _tmp03)); __m128 _r0tm3 = _mm_comp_fmadd_ps(_vm2, _mm_sub_ps(_tmp01, _tmp03), _mm_sub_ps(_tmp04, _tmp02)); __m128 _r0tm4 = _mm_comp_fmadd_ps(_v2, _mm_sub_ps(_tmp01, _tmp03), _mm_sub_ps(_tmp04, _tmp02)); __m128 _r0tm5 = _mm_comp_fmadd_ps(_vm5, _tmp03, _mm_comp_fmadd_ps(_v4, _tmp01, _tmp05)); _mm_store_ps(r0_tm_0, _r0tm0); _mm_store_ps(r0_tm_1, _r0tm1); _mm_store_ps(r0_tm_2, _r0tm2); _mm_store_ps(r0_tm_3, _r0tm3); _mm_store_ps(r0_tm_4, _r0tm4); _mm_store_ps(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 4 * 6; r0_tm_1 += tiles * 4 * 6; r0_tm_2 += tiles * 4 * 6; r0_tm_3 += tiles * 4 * 6; r0_tm_4 += tiles * 4 * 6; r0_tm_5 += tiles * 4 * 6; } } } } } static void conv3x3s1_winograd43_transform_output_pack4_sse(const Mat & top_blob_tm, Mat & top_blob, const Mat & bias, const Option & opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const float *biasptr = bias; //const float otm[4][6] = { //{1.0 f, 1.0 f, 1.0 f, 1.0 f, 1.0 f, 0.0 f}, //{0.0 f, 1.0 f, -1.0 f, 2.0 f, -2.0 f, 0.0 f}, //{0.0 f, 1.0 f, 1.0 f, 4.0 f, 4.0 f, 0.0 f}, //{0.0 f, 1.0 f, -1.0 f, 8.0 f, -8.0 f, 1.0 f} //}; //0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); __m128 _bias0 = biasptr ? _mm_loadu_ps(biasptr + p * 4) : _mm_setzero_ps(); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[4][6][4]; __m128 _v2 = _mm_set1_ps(2. f); __m128 _v4 = _mm_set1_ps(4. f); __m128 _v8 = _mm_set1_ps(8. f); //tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float *output0_tm_0 = (const float *)out0_tm + (i * w_tiles + j) * 4; const float *output0_tm_1 = output0_tm_0 + tiles * 4; const float *output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float *output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float *output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float *output0_tm_5 = output0_tm_0 + tiles * 4 * 5; float *output0 = out0.row(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { __m128 _out0tm0 = _mm_load_ps(output0_tm_0); __m128 _out0tm1 = _mm_load_ps(output0_tm_1); __m128 _out0tm2 = _mm_load_ps(output0_tm_2); __m128 _out0tm3 = _mm_load_ps(output0_tm_3); __m128 _out0tm4 = _mm_load_ps(output0_tm_4); __m128 _out0tm5 = _mm_load_ps(output0_tm_5); __m128 _tmp02a = _mm_add_ps(_out0tm1, _out0tm2); __m128 _tmp13a = _mm_sub_ps(_out0tm1, _out0tm2); __m128 _tmp02b = _mm_add_ps(_out0tm3, _out0tm4); __m128 _tmp13b = _mm_sub_ps(_out0tm3, _out0tm4); __m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _tmp02a), _tmp02b); __m128 _tmp1m = _mm_comp_fmadd_ps(_v2, _tmp13b, _tmp13a); __m128 _tmp2m = _mm_comp_fmadd_ps(_v4, _tmp02b, _tmp02a); __m128 _tmp3m = _mm_comp_fmadd_ps(_v8, _tmp13b, _mm_add_ps(_out0tm5, _tmp13a)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 4 * 6; output0_tm_1 += tiles * 4 * 6; output0_tm_2 += tiles * 4 * 6; output0_tm_3 += tiles * 4 * 6; output0_tm_4 += tiles * 4 * 6; output0_tm_5 += tiles * 4 * 6; } for (int m = 0; m < 4; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _tmp02a = _mm_add_ps(_tmp01, _tmp02); __m128 _tmp13a = _mm_sub_ps(_tmp01, _tmp02); __m128 _tmp02b = _mm_add_ps(_tmp03, _tmp04); __m128 _tmp13b = _mm_sub_ps(_tmp03, _tmp04); __m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp02a), _tmp02b)); __m128 _out01 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v2, _tmp13b, _tmp13a)); __m128 _out02 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v4, _tmp02b, _tmp02a)); __m128 _out03 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v8, _tmp13b, _mm_add_ps(_tmp05, _tmp13a))); _mm_store_ps(output0, _out00); _mm_store_ps(output0 + 4, _out01); _mm_store_ps(output0 + 4 * 2, _out02); _mm_store_ps(output0 + 4 * 3, _out03); output0 += outw * 4; } } } } } static void conv3x3s1_winograd23_transform_input_pack4_sse(const Mat & bottom_blob, Mat & bottom_blob_tm, const Option & opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 2; const int h_tiles = (h - 2) / 2; const int tiles = w_tiles * h_tiles; //const float itm[4][4] = { //{1.0 f, 0.0 f, -1.0 f, 0.0 f}, //{0.0 f, 1.0 f, 1.00 f, 0.0 f}, //{0.0 f, -1.0 f, 1.00 f, 0.0 f}, //{0.0 f, -1.0 f, 0.00 f, 1.0 f} //}; //0 = r00 - r02 // 1 = r01 + r02 // 2 = r02 - r01 // 3 = r03 - r01 for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[4][4][4]; //tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float *r0 = img0.row(i * 2) + (j * 2) * 4; for (int m = 0; m < 4; m++) { __m128 _r00 = _mm_load_ps(r0); __m128 _r01 = _mm_load_ps(r0 + 4); __m128 _r02 = _mm_load_ps(r0 + 4 * 2); __m128 _r03 = _mm_load_ps(r0 + 4 * 3); __m128 _tmp0m = _mm_sub_ps(_r00, _r02); __m128 _tmp1m = _mm_add_ps(_r01, _r02); __m128 _tmp2m = _mm_sub_ps(_r02, _r01); __m128 _tmp3m = _mm_sub_ps(_r03, _r01); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[3][m], _tmp3m); r0 += w * 4; } float *r0_tm_0 = (float *)img0_tm + (i * w_tiles + j) * 4; float *r0_tm_1 = r0_tm_0 + tiles * 4; float *r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float *r0_tm_3 = r0_tm_0 + tiles * 4 * 3; for (int m = 0; m < 4; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _r0tm0 = _mm_sub_ps(_tmp00, _tmp02); __m128 _r0tm1 = _mm_add_ps(_tmp01, _tmp02); __m128 _r0tm2 = _mm_sub_ps(_tmp02, _tmp01); __m128 _r0tm3 = _mm_sub_ps(_tmp03, _tmp01); _mm_store_ps(r0_tm_0, _r0tm0); _mm_store_ps(r0_tm_1, _r0tm1); _mm_store_ps(r0_tm_2, _r0tm2); _mm_store_ps(r0_tm_3, _r0tm3); r0_tm_0 += tiles * 4 * 4; r0_tm_1 += tiles * 4 * 4; r0_tm_2 += tiles * 4 * 4; r0_tm_3 += tiles * 4 * 4; } } } } } static void conv3x3s1_winograd23_transform_output_pack4_sse(const Mat & top_blob_tm, Mat & top_blob, const Mat & bias, const Option & opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 2; const int h_tiles = outh / 2; const int tiles = w_tiles * h_tiles; const float *biasptr = bias; //const float otm[2][4] = { //{1.0 f, 1.0 f, 1.0 f, 0.0 f}, //{0.0 f, 1.0 f, -1.0 f, 1.0 f} //}; //0 = r00 + r01 + r02 // 1 = r01 - r02 + r03 for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); __m128 _bias0 = biasptr ? _mm_loadu_ps(biasptr + p * 4) : _mm_setzero_ps(); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[2][4][4]; //tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float *output0_tm_0 = (const float *)out0_tm + (i * w_tiles + j) * 4; const float *output0_tm_1 = output0_tm_0 + tiles * 4; const float *output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float *output0_tm_3 = output0_tm_0 + tiles * 4 * 3; float *output0 = out0.row(i * 2) + (j * 2) * 4; for (int m = 0; m < 4; m++) { __m128 _out0tm0 = _mm_load_ps(output0_tm_0); __m128 _out0tm1 = _mm_load_ps(output0_tm_1); __m128 _out0tm2 = _mm_load_ps(output0_tm_2); __m128 _out0tm3 = _mm_load_ps(output0_tm_3); __m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _out0tm1), _out0tm2); __m128 _tmp1m = _mm_add_ps(_mm_sub_ps(_out0tm1, _out0tm2), _out0tm3); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[1][m], _tmp1m); output0_tm_0 += tiles * 4 * 4; output0_tm_1 += tiles * 4 * 4; output0_tm_2 += tiles * 4 * 4; output0_tm_3 += tiles * 4 * 4; } for (int m = 0; m < 2; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp01), _tmp02)); __m128 _out01 = _mm_add_ps(_bias0, _mm_add_ps(_mm_sub_ps(_tmp01, _tmp02), _tmp03)); _mm_store_ps(output0, _out00); _mm_store_ps(output0 + 4, _out01); output0 += outw * 4; } } } } }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2022 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_input_pack4_sse(const Mat & bottom_blob, Mat & bottom_blob_tm, const Option & opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; //const float itm[8][8] = { //{1.0 f, 0.0 f, -5.25 f, 0.00 f, 5.25 f, 0.00 f, -1.0 f, 0.0 f}, // //{0.0 f, 1.0 f, 1.00 f, -4.25 f, -4.25 f, 1.00 f, 1.0 f, 0.0 f}, //{0.0 f, -1.0 f, 1.00 f, 4.25 f, -4.25 f, -1.00 f, 1.0 f, 0.0 f}, // //{0.0 f, 0.5 f, 0.25 f, -2.50 f, -1.25 f, 2.00 f, 1.0 f, 0.0 f}, //{0.0 f, -0.5 f, 0.25 f, 2.50 f, -1.25 f, -2.00 f, 1.0 f, 0.0 f}, // //{0.0 f, 2.0 f, 4.00 f, -2.50 f, -5.00 f, 0.50 f, 1.0 f, 0.0 f}, //{0.0 f, -2.0 f, 4.00 f, 2.50 f, -5.00 f, -0.50 f, 1.0 f, 0.0 f}, // //{0.0 f, -1.0 f, 0.00 f, 5.25 f, 0.00 f, -5.25 f, 0.0 f, 1.0 f} //}; //0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[8][8][4]; __m128 _v5_25 = _mm_set1_ps(5.25 f); __m128 _vm4_25 = _mm_set1_ps(-4.25 f); __m128 _vm1_25 = _mm_set1_ps(-1.25 f); __m128 _v0_25 = _mm_set1_ps(0.25 f); __m128 _vm2_5 = _mm_set1_ps(-2.5 f); __m128 _v0_5 = _mm_set1_ps(0.5 f); __m128 _v2 = _mm_set1_ps(2. f); __m128 _v4 = _mm_set1_ps(4. f); //tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float *r0 = img0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { __m128 _r00 = _mm_load_ps(r0); __m128 _r01 = _mm_load_ps(r0 + 4); __m128 _r02 = _mm_load_ps(r0 + 4 * 2); __m128 _r03 = _mm_load_ps(r0 + 4 * 3); __m128 _r04 = _mm_load_ps(r0 + 4 * 4); __m128 _r05 = _mm_load_ps(r0 + 4 * 5); __m128 _r06 = _mm_load_ps(r0 + 4 * 6); __m128 _r07 = _mm_load_ps(r0 + 4 * 7); __m128 _tmp0m = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_r04, _r02), _mm_sub_ps(_r00, _r06)); __m128 _tmp7m = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_r03, _r05), _mm_sub_ps(_r07, _r01)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[7][m], _tmp7m); __m128 _tmp12a = _mm_comp_fmadd_ps(_vm4_25, _r04, _mm_add_ps(_r02, _r06)); __m128 _tmp12b = _mm_comp_fmadd_ps(_vm4_25, _r03, _mm_add_ps(_r01, _r05)); __m128 _tmp1m = _mm_add_ps(_tmp12a, _tmp12b); __m128 _tmp2m = _mm_sub_ps(_tmp12a, _tmp12b); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); __m128 _tmp34a = _mm_comp_fmadd_ps(_vm1_25, _r04, _mm_comp_fmadd_ps(_v0_25, _r02, _r06)); __m128 _tmp34b = _mm_comp_fmadd_ps(_v2, _r05, _mm_comp_fmadd_ps(_vm2_5, _r03, _mm_mul_ps(_r01, _v0_5))); __m128 _tmp3m = _mm_add_ps(_tmp34a, _tmp34b); __m128 _tmp4m = _mm_sub_ps(_tmp34a, _tmp34b); _mm_store_ps(tmp[3][m], _tmp3m); _mm_store_ps(tmp[4][m], _tmp4m); __m128 _tmp56a = _mm_comp_fmadd_ps(_v4, _mm_comp_fmadd_ps(_vm1_25, _r04, _r02), _r06); __m128 _tmp56b = _mm_comp_fmadd_ps(_v0_5, _r05, _mm_comp_fmadd_ps(_vm2_5, _r03, _mm_mul_ps(_r01, _v2))); __m128 _tmp5m = _mm_add_ps(_tmp56a, _tmp56b); __m128 _tmp6m = _mm_sub_ps(_tmp56a, _tmp56b); _mm_store_ps(tmp[5][m], _tmp5m); _mm_store_ps(tmp[6][m], _tmp6m); r0 += w * 4; } float *r0_tm_0 = (float *)img0_tm + (i * w_tiles + j) * 4; float *r0_tm_1 = r0_tm_0 + tiles * 4; float *r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float *r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float *r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float *r0_tm_5 = r0_tm_0 + tiles * 4 * 5; float *r0_tm_6 = r0_tm_0 + tiles * 4 * 6; float *r0_tm_7 = r0_tm_0 + tiles * 4 * 7; for (int m = 0; m < 8; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _tmp06 = _mm_load_ps(tmp[m][6]); __m128 _tmp07 = _mm_load_ps(tmp[m][7]); __m128 _r0tm0 = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_tmp04, _tmp02), _mm_sub_ps(_tmp00, _tmp06)); __m128 _r0tm7 = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_tmp03, _tmp05), _mm_sub_ps(_tmp07, _tmp01)); __m128 _tmp12a = _mm_comp_fmadd_ps(_vm4_25, _tmp04, _mm_add_ps(_tmp02, _tmp06)); __m128 _tmp12b = _mm_comp_fmadd_ps(_vm4_25, _tmp03, _mm_add_ps(_tmp01, _tmp05)); __m128 _r0tm1 = _mm_add_ps(_tmp12a, _tmp12b); __m128 _r0tm2 = _mm_sub_ps(_tmp12a, _tmp12b); __m128 _tmp34a = _mm_comp_fmadd_ps(_vm1_25, _tmp04, _mm_comp_fmadd_ps(_v0_25, _tmp02, _tmp06)); __m128 _tmp34b = _mm_comp_fmadd_ps(_v2, _tmp05, _mm_comp_fmadd_ps(_vm2_5, _tmp03, _mm_mul_ps(_tmp01, _v0_5))); __m128 _r0tm3 = _mm_add_ps(_tmp34a, _tmp34b); __m128 _r0tm4 = _mm_sub_ps(_tmp34a, _tmp34b); __m128 _tmp56a = _mm_comp_fmadd_ps(_v4, _mm_comp_fmadd_ps(_vm1_25, _tmp04, _tmp02), _tmp06); __m128 _tmp56b = _mm_comp_fmadd_ps(_v0_5, _tmp05, _mm_comp_fmadd_ps(_vm2_5, _tmp03, _mm_mul_ps(_tmp01, _v2))); __m128 _r0tm5 = _mm_add_ps(_tmp56a, _tmp56b); __m128 _r0tm6 = _mm_sub_ps(_tmp56a, _tmp56b); _mm_store_ps(r0_tm_0, _r0tm0); _mm_store_ps(r0_tm_1, _r0tm1); _mm_store_ps(r0_tm_2, _r0tm2); _mm_store_ps(r0_tm_3, _r0tm3); _mm_store_ps(r0_tm_4, _r0tm4); _mm_store_ps(r0_tm_5, _r0tm5); _mm_store_ps(r0_tm_6, _r0tm6); _mm_store_ps(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 4 * 8; r0_tm_1 += tiles * 4 * 8; r0_tm_2 += tiles * 4 * 8; r0_tm_3 += tiles * 4 * 8; r0_tm_4 += tiles * 4 * 8; r0_tm_5 += tiles * 4 * 8; r0_tm_6 += tiles * 4 * 8; r0_tm_7 += tiles * 4 * 8; } } } } } static void conv3x3s1_winograd63_transform_output_pack4_sse(const Mat & top_blob_tm, Mat & top_blob, const Mat & bias, const Option & opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const float *biasptr = bias; //const float otm[6][8] = { //{1.0 f, 1.0 f, 1.0 f, 1.0 f, 1.0 f, 32.0 f, 32.0 f, 0.0 f}, //{0.0 f, 1.0 f, -1.0 f, 2.0 f, -2.0 f, 16.0 f, -16.0 f, 0.0 f}, //{0.0 f, 1.0 f, 1.0 f, 4.0 f, 4.0 f, 8.0 f, 8.0 f, 0.0 f}, //{0.0 f, 1.0 f, -1.0 f, 8.0 f, -8.0 f, 4.0 f, -4.0 f, 0.0 f}, //{0.0 f, 1.0 f, 1.0 f, 16.0 f, 16.0 f, 2.0 f, 2.0 f, 0.0 f}, //{0.0 f, 1.0 f, -1.0 f, 32.0 f, -32.0 f, 1.0 f, -1.0 f, 1.0 f} //}; //0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16 + (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32 + (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); __m128 _bias0 = biasptr ? _mm_loadu_ps(biasptr + p * 4) : _mm_setzero_ps(); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[6][8][4]; __m128 _v32 = _mm_set1_ps(32. f); __m128 _v16 = _mm_set1_ps(16. f); __m128 _v8 = _mm_set1_ps(8. f); __m128 _v4 = _mm_set1_ps(4. f); __m128 _v2 = _mm_set1_ps(2. f); //tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float *output0_tm_0 = (const float *)out0_tm + (i * w_tiles + j) * 4; const float *output0_tm_1 = output0_tm_0 + tiles * 4; const float *output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float *output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float *output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float *output0_tm_5 = output0_tm_0 + tiles * 4 * 5; const float *output0_tm_6 = output0_tm_0 + tiles * 4 * 6; const float *output0_tm_7 = output0_tm_0 + tiles * 4 * 7; float *output0 = out0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { __m128 _out0tm0 = _mm_load_ps(output0_tm_0); __m128 _out0tm1 = _mm_load_ps(output0_tm_1); __m128 _out0tm2 = _mm_load_ps(output0_tm_2); __m128 _out0tm3 = _mm_load_ps(output0_tm_3); __m128 _out0tm4 = _mm_load_ps(output0_tm_4); __m128 _out0tm5 = _mm_load_ps(output0_tm_5); __m128 _out0tm6 = _mm_load_ps(output0_tm_6); __m128 _out0tm7 = _mm_load_ps(output0_tm_7); __m128 _tmp024a = _mm_add_ps(_out0tm1, _out0tm2); __m128 _tmp135a = _mm_sub_ps(_out0tm1, _out0tm2); __m128 _tmp024b = _mm_add_ps(_out0tm3, _out0tm4); __m128 _tmp135b = _mm_sub_ps(_out0tm3, _out0tm4); __m128 _tmp024c = _mm_add_ps(_out0tm5, _out0tm6); __m128 _tmp135c = _mm_sub_ps(_out0tm5, _out0tm6); __m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _tmp024a), _mm_comp_fmadd_ps(_v32, _tmp024c, _tmp024b)); __m128 _tmp2m = _mm_comp_fmadd_ps(_v8, _tmp024c, _mm_comp_fmadd_ps(_v4, _tmp024b, _tmp024a)); __m128 _tmp4m = _mm_comp_fmadd_ps(_v2, _tmp024c, _mm_comp_fmadd_ps(_v16, _tmp024b, _tmp024a)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[4][m], _tmp4m); __m128 _tmp1m = _mm_comp_fmadd_ps(_v16, _tmp135c, _mm_comp_fmadd_ps(_v2, _tmp135b, _tmp135a)); __m128 _tmp3m = _mm_comp_fmadd_ps(_v4, _tmp135c, _mm_comp_fmadd_ps(_v8, _tmp135b, _tmp135a)); __m128 _tmp5m = _mm_add_ps(_mm_add_ps(_out0tm7, _tmp135a), _mm_comp_fmadd_ps(_v32, _tmp135b, _tmp135c)); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[3][m], _tmp3m); _mm_store_ps(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 4 * 8; output0_tm_1 += tiles * 4 * 8; output0_tm_2 += tiles * 4 * 8; output0_tm_3 += tiles * 4 * 8; output0_tm_4 += tiles * 4 * 8; output0_tm_5 += tiles * 4 * 8; output0_tm_6 += tiles * 4 * 8; output0_tm_7 += tiles * 4 * 8; } for (int m = 0; m < 6; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _tmp06 = _mm_load_ps(tmp[m][6]); __m128 _tmp07 = _mm_load_ps(tmp[m][7]); __m128 _tmp024a = _mm_add_ps(_tmp01, _tmp02); __m128 _tmp135a = _mm_sub_ps(_tmp01, _tmp02); __m128 _tmp024b = _mm_add_ps(_tmp03, _tmp04); __m128 _tmp135b = _mm_sub_ps(_tmp03, _tmp04); __m128 _tmp024c = _mm_add_ps(_tmp05, _tmp06); __m128 _tmp135c = _mm_sub_ps(_tmp05, _tmp06); __m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp024a), _mm_comp_fmadd_ps(_v32, _tmp024c, _tmp024b))); __m128 _out02 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v8, _tmp024c, _mm_comp_fmadd_ps(_v4, _tmp024b, _tmp024a))); __m128 _out04 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v2, _tmp024c, _mm_comp_fmadd_ps(_v16, _tmp024b, _tmp024a))); _mm_store_ps(output0, _out00); _mm_store_ps(output0 + 4 * 2, _out02); _mm_store_ps(output0 + 4 * 4, _out04); __m128 _out01 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v16, _tmp135c, _mm_comp_fmadd_ps(_v2, _tmp135b, _tmp135a))); __m128 _out03 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v4, _tmp135c, _mm_comp_fmadd_ps(_v8, _tmp135b, _tmp135a))); __m128 _out05 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp07, _tmp135a), _mm_comp_fmadd_ps(_v32, _tmp135b, _tmp135c))); _mm_store_ps(output0 + 4, _out01); _mm_store_ps(output0 + 4 * 3, _out03); _mm_store_ps(output0 + 4 * 5, _out05); output0 += outw * 4; } } } } } static void conv3x3s1_winograd43_transform_input_pack4_sse(const Mat & bottom_blob, Mat & bottom_blob_tm, const Option & opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; //const float itm[6][6] = { //{4.0 f, 0.0 f, -5.0 f, 0.0 f, 1.0 f, 0.0 f}, //{0.0 f, -4.0 f, -4.0 f, 1.0 f, 1.0 f, 0.0 f}, //{0.0 f, 4.0 f, -4.0 f, -1.0 f, 1.0 f, 0.0 f}, //{0.0 f, -2.0 f, -1.0 f, 2.0 f, 1.0 f, 0.0 f}, //{0.0 f, 2.0 f, -1.0 f, -2.0 f, 1.0 f, 0.0 f}, //{0.0 f, 4.0 f, 0.0 f, -5.0 f, 0.0 f, 1.0 f} //}; //0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[6][6][4]; __m128 _vm5 = _mm_set1_ps(-5. f); __m128 _vm4 = _mm_set1_ps(-4. f); __m128 _v4 = _mm_set1_ps(4. f); __m128 _vm2 = _mm_set1_ps(-2. f); __m128 _v2 = _mm_set1_ps(2. f); //tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float *r0 = img0.row(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { __m128 _r00 = _mm_load_ps(r0); __m128 _r01 = _mm_load_ps(r0 + 4); __m128 _r02 = _mm_load_ps(r0 + 4 * 2); __m128 _r03 = _mm_load_ps(r0 + 4 * 3); __m128 _r04 = _mm_load_ps(r0 + 4 * 4); __m128 _r05 = _mm_load_ps(r0 + 4 * 5); __m128 _tmp0m = _mm_comp_fmadd_ps(_vm5, _r02, _mm_comp_fmadd_ps(_v4, _r00, _r04)); __m128 _tmp1m = _mm_comp_fmadd_ps(_vm4, _mm_add_ps(_r01, _r02), _mm_add_ps(_r04, _r03)); __m128 _tmp2m = _mm_comp_fmadd_ps(_v4, _mm_sub_ps(_r01, _r02), _mm_sub_ps(_r04, _r03)); __m128 _tmp3m = _mm_comp_fmadd_ps(_vm2, _mm_sub_ps(_r01, _r03), _mm_sub_ps(_r04, _r02)); __m128 _tmp4m = _mm_comp_fmadd_ps(_v2, _mm_sub_ps(_r01, _r03), _mm_sub_ps(_r04, _r02)); __m128 _tmp5m = _mm_comp_fmadd_ps(_vm5, _r03, _mm_comp_fmadd_ps(_v4, _r01, _r05)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[3][m], _tmp3m); _mm_store_ps(tmp[4][m], _tmp4m); _mm_store_ps(tmp[5][m], _tmp5m); r0 += w * 4; } float *r0_tm_0 = (float *)img0_tm + (i * w_tiles + j) * 4; float *r0_tm_1 = r0_tm_0 + tiles * 4; float *r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float *r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float *r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float *r0_tm_5 = r0_tm_0 + tiles * 4 * 5; for (int m = 0; m < 6; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _r0tm0 = _mm_comp_fmadd_ps(_vm5, _tmp02, _mm_comp_fmadd_ps(_v4, _tmp00, _tmp04)); __m128 _r0tm1 = _mm_comp_fmadd_ps(_vm4, _mm_add_ps(_tmp01, _tmp02), _mm_add_ps(_tmp04, _tmp03)); __m128 _r0tm2 = _mm_comp_fmadd_ps(_v4, _mm_sub_ps(_tmp01, _tmp02), _mm_sub_ps(_tmp04, _tmp03)); __m128 _r0tm3 = _mm_comp_fmadd_ps(_vm2, _mm_sub_ps(_tmp01, _tmp03), _mm_sub_ps(_tmp04, _tmp02)); __m128 _r0tm4 = _mm_comp_fmadd_ps(_v2, _mm_sub_ps(_tmp01, _tmp03), _mm_sub_ps(_tmp04, _tmp02)); __m128 _r0tm5 = _mm_comp_fmadd_ps(_vm5, _tmp03, _mm_comp_fmadd_ps(_v4, _tmp01, _tmp05)); _mm_store_ps(r0_tm_0, _r0tm0); _mm_store_ps(r0_tm_1, _r0tm1); _mm_store_ps(r0_tm_2, _r0tm2); _mm_store_ps(r0_tm_3, _r0tm3); _mm_store_ps(r0_tm_4, _r0tm4); _mm_store_ps(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 4 * 6; r0_tm_1 += tiles * 4 * 6; r0_tm_2 += tiles * 4 * 6; r0_tm_3 += tiles * 4 * 6; r0_tm_4 += tiles * 4 * 6; r0_tm_5 += tiles * 4 * 6; } } } } } static void conv3x3s1_winograd43_transform_output_pack4_sse(const Mat & top_blob_tm, Mat & top_blob, const Mat & bias, const Option & opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const float *biasptr = bias; //const float otm[4][6] = { //{1.0 f, 1.0 f, 1.0 f, 1.0 f, 1.0 f, 0.0 f}, //{0.0 f, 1.0 f, -1.0 f, 2.0 f, -2.0 f, 0.0 f}, //{0.0 f, 1.0 f, 1.0 f, 4.0 f, 4.0 f, 0.0 f}, //{0.0 f, 1.0 f, -1.0 f, 8.0 f, -8.0 f, 1.0 f} //}; //0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); __m128 _bias0 = biasptr ? _mm_loadu_ps(biasptr + p * 4) : _mm_setzero_ps(); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[4][6][4]; __m128 _v2 = _mm_set1_ps(2. f); __m128 _v4 = _mm_set1_ps(4. f); __m128 _v8 = _mm_set1_ps(8. f); //tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float *output0_tm_0 = (const float *)out0_tm + (i * w_tiles + j) * 4; const float *output0_tm_1 = output0_tm_0 + tiles * 4; const float *output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float *output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float *output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float *output0_tm_5 = output0_tm_0 + tiles * 4 * 5; float *output0 = out0.row(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { __m128 _out0tm0 = _mm_load_ps(output0_tm_0); __m128 _out0tm1 = _mm_load_ps(output0_tm_1); __m128 _out0tm2 = _mm_load_ps(output0_tm_2); __m128 _out0tm3 = _mm_load_ps(output0_tm_3); __m128 _out0tm4 = _mm_load_ps(output0_tm_4); __m128 _out0tm5 = _mm_load_ps(output0_tm_5); __m128 _tmp02a = _mm_add_ps(_out0tm1, _out0tm2); __m128 _tmp13a = _mm_sub_ps(_out0tm1, _out0tm2); __m128 _tmp02b = _mm_add_ps(_out0tm3, _out0tm4); __m128 _tmp13b = _mm_sub_ps(_out0tm3, _out0tm4); __m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _tmp02a), _tmp02b); __m128 _tmp1m = _mm_comp_fmadd_ps(_v2, _tmp13b, _tmp13a); __m128 _tmp2m = _mm_comp_fmadd_ps(_v4, _tmp02b, _tmp02a); __m128 _tmp3m = _mm_comp_fmadd_ps(_v8, _tmp13b, _mm_add_ps(_out0tm5, _tmp13a)); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 4 * 6; output0_tm_1 += tiles * 4 * 6; output0_tm_2 += tiles * 4 * 6; output0_tm_3 += tiles * 4 * 6; output0_tm_4 += tiles * 4 * 6; output0_tm_5 += tiles * 4 * 6; } for (int m = 0; m < 4; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _tmp04 = _mm_load_ps(tmp[m][4]); __m128 _tmp05 = _mm_load_ps(tmp[m][5]); __m128 _tmp02a = _mm_add_ps(_tmp01, _tmp02); __m128 _tmp13a = _mm_sub_ps(_tmp01, _tmp02); __m128 _tmp02b = _mm_add_ps(_tmp03, _tmp04); __m128 _tmp13b = _mm_sub_ps(_tmp03, _tmp04); __m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp02a), _tmp02b)); __m128 _out01 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v2, _tmp13b, _tmp13a)); __m128 _out02 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v4, _tmp02b, _tmp02a)); __m128 _out03 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v8, _tmp13b, _mm_add_ps(_tmp05, _tmp13a))); _mm_store_ps(output0, _out00); _mm_store_ps(output0 + 4, _out01); _mm_store_ps(output0 + 4 * 2, _out02); _mm_store_ps(output0 + 4 * 3, _out03); output0 += outw * 4; } } } } } static void conv3x3s1_winograd23_transform_input_pack4_sse(const Mat & bottom_blob, Mat & bottom_blob_tm, const Option & opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 2; const int h_tiles = (h - 2) / 2; const int tiles = w_tiles * h_tiles; //const float itm[4][4] = { //{1.0 f, 0.0 f, -1.0 f, 0.0 f}, //{0.0 f, 1.0 f, 1.00 f, 0.0 f}, //{0.0 f, -1.0 f, 1.00 f, 0.0 f}, //{0.0 f, -1.0 f, 0.00 f, 1.0 f} //}; //0 = r00 - r02 // 1 = r01 + r02 // 2 = r02 - r01 // 3 = r03 - r01 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[4][4][4]; //tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float *r0 = img0.row(i * 2) + (j * 2) * 4; for (int m = 0; m < 4; m++) { __m128 _r00 = _mm_load_ps(r0); __m128 _r01 = _mm_load_ps(r0 + 4); __m128 _r02 = _mm_load_ps(r0 + 4 * 2); __m128 _r03 = _mm_load_ps(r0 + 4 * 3); __m128 _tmp0m = _mm_sub_ps(_r00, _r02); __m128 _tmp1m = _mm_add_ps(_r01, _r02); __m128 _tmp2m = _mm_sub_ps(_r02, _r01); __m128 _tmp3m = _mm_sub_ps(_r03, _r01); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[1][m], _tmp1m); _mm_store_ps(tmp[2][m], _tmp2m); _mm_store_ps(tmp[3][m], _tmp3m); r0 += w * 4; } float *r0_tm_0 = (float *)img0_tm + (i * w_tiles + j) * 4; float *r0_tm_1 = r0_tm_0 + tiles * 4; float *r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float *r0_tm_3 = r0_tm_0 + tiles * 4 * 3; for (int m = 0; m < 4; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _r0tm0 = _mm_sub_ps(_tmp00, _tmp02); __m128 _r0tm1 = _mm_add_ps(_tmp01, _tmp02); __m128 _r0tm2 = _mm_sub_ps(_tmp02, _tmp01); __m128 _r0tm3 = _mm_sub_ps(_tmp03, _tmp01); _mm_store_ps(r0_tm_0, _r0tm0); _mm_store_ps(r0_tm_1, _r0tm1); _mm_store_ps(r0_tm_2, _r0tm2); _mm_store_ps(r0_tm_3, _r0tm3); r0_tm_0 += tiles * 4 * 4; r0_tm_1 += tiles * 4 * 4; r0_tm_2 += tiles * 4 * 4; r0_tm_3 += tiles * 4 * 4; } } } } } static void conv3x3s1_winograd23_transform_output_pack4_sse(const Mat & top_blob_tm, Mat & top_blob, const Mat & bias, const Option & opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 2; const int h_tiles = outh / 2; const int tiles = w_tiles * h_tiles; const float *biasptr = bias; //const float otm[2][4] = { //{1.0 f, 1.0 f, 1.0 f, 0.0 f}, //{0.0 f, 1.0 f, -1.0 f, 1.0 f} //}; //0 = r00 + r01 + r02 // 1 = r01 - r02 + r03 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); __m128 _bias0 = biasptr ? _mm_loadu_ps(biasptr + p * 4) : _mm_setzero_ps(); #ifdef _MSC_VER __declspec(align(16)) #else __attribute__((aligned(16))) #endif float tmp[2][4][4]; //tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float *output0_tm_0 = (const float *)out0_tm + (i * w_tiles + j) * 4; const float *output0_tm_1 = output0_tm_0 + tiles * 4; const float *output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float *output0_tm_3 = output0_tm_0 + tiles * 4 * 3; float *output0 = out0.row(i * 2) + (j * 2) * 4; for (int m = 0; m < 4; m++) { __m128 _out0tm0 = _mm_load_ps(output0_tm_0); __m128 _out0tm1 = _mm_load_ps(output0_tm_1); __m128 _out0tm2 = _mm_load_ps(output0_tm_2); __m128 _out0tm3 = _mm_load_ps(output0_tm_3); __m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _out0tm1), _out0tm2); __m128 _tmp1m = _mm_add_ps(_mm_sub_ps(_out0tm1, _out0tm2), _out0tm3); _mm_store_ps(tmp[0][m], _tmp0m); _mm_store_ps(tmp[1][m], _tmp1m); output0_tm_0 += tiles * 4 * 4; output0_tm_1 += tiles * 4 * 4; output0_tm_2 += tiles * 4 * 4; output0_tm_3 += tiles * 4 * 4; } for (int m = 0; m < 2; m++) { __m128 _tmp00 = _mm_load_ps(tmp[m][0]); __m128 _tmp01 = _mm_load_ps(tmp[m][1]); __m128 _tmp02 = _mm_load_ps(tmp[m][2]); __m128 _tmp03 = _mm_load_ps(tmp[m][3]); __m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp01), _tmp02)); __m128 _out01 = _mm_add_ps(_bias0, _mm_add_ps(_mm_sub_ps(_tmp01, _tmp02), _tmp03)); _mm_store_ps(output0, _out00); _mm_store_ps(output0 + 4, _out01); output0 += outw * 4; } } } } }
section1.c
#include <omp.h> #include <stdio.h> int main() { #pragma omp parallel { #pragma omp single printf ("This is from the single directive\n"); #pragma omp sections nowait { #pragma omp section { printf("hello from section 1\n"); } #pragma omp section { printf("hello from section 2\n"); } #pragma omp section { printf("hello from section 3\n"); } } /* end of sections */ }/* end of parallel */ return 0; }
#include <omp.h> #include <stdio.h> int main() { printf("This is from the single directive\n"); #pragma omp sections nowait { #pragma omp section { printf("hello from section 1\n"); } printf("hello from section 2\n"); printf("hello from section 3\n"); } /* end of sections */ /* end of parallel */ return 0; }
/****************************************************************************** * * Member functions for hypre_SStructPMatrix class. * *****************************************************************************/ #include "_hypre_sstruct_mv.h" #include "_hypre_struct_mv.hpp" /* * ========================================================================== * SStructPMatrix routines * ========================================================================== */ /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixRef(hypre_SStructPMatrix * matrix, hypre_SStructPMatrix ** matrix_ref) { hypre_SStructPMatrixRefCount(matrix)++; *matrix_ref = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixCreate(MPI_Comm comm, hypre_SStructPGrid * pgrid, hypre_SStructStencil ** stencils, hypre_SStructPMatrix ** pmatrix_ptr) { hypre_SStructPMatrix *pmatrix; HYPRE_Int nvars; HYPRE_Int **smaps; hypre_StructStencil ***sstencils; hypre_StructMatrix ***smatrices; HYPRE_Int **symmetric; hypre_StructStencil *sstencil; HYPRE_Int *vars; hypre_Index *sstencil_shape; HYPRE_Int sstencil_size; HYPRE_Int new_dim; HYPRE_Int *new_sizes; hypre_Index **new_shapes; HYPRE_Int size; hypre_StructGrid *sgrid; HYPRE_Int vi, vj; HYPRE_Int i, j, k; pmatrix = hypre_TAlloc(hypre_SStructPMatrix, 1, HYPRE_MEMORY_HOST); hypre_SStructPMatrixComm(pmatrix) = comm; hypre_SStructPMatrixPGrid(pmatrix) = pgrid; hypre_SStructPMatrixStencils(pmatrix) = stencils; nvars = hypre_SStructPGridNVars(pgrid); hypre_SStructPMatrixNVars(pmatrix) = nvars; /* create sstencils */ smaps = hypre_TAlloc(HYPRE_Int *, nvars, HYPRE_MEMORY_HOST); sstencils = hypre_TAlloc(hypre_StructStencil **, nvars, HYPRE_MEMORY_HOST); new_sizes = hypre_TAlloc(HYPRE_Int, nvars, HYPRE_MEMORY_HOST); new_shapes = hypre_TAlloc(hypre_Index *, nvars, HYPRE_MEMORY_HOST); size = 0; for (vi = 0; vi < nvars; vi++) { sstencils[vi] = hypre_TAlloc(hypre_StructStencil *, nvars, HYPRE_MEMORY_HOST); for (vj = 0; vj < nvars; vj++) { sstencils[vi][vj] = NULL; new_sizes[vj] = 0; } sstencil = hypre_SStructStencilSStencil(stencils[vi]); vars = hypre_SStructStencilVars(stencils[vi]); sstencil_shape = hypre_StructStencilShape(sstencil); sstencil_size = hypre_StructStencilSize(sstencil); smaps[vi] = hypre_TAlloc(HYPRE_Int, sstencil_size, HYPRE_MEMORY_HOST); for (i = 0; i < sstencil_size; i++) { j = vars[i]; new_sizes[j]++; } for (vj = 0; vj < nvars; vj++) { if (new_sizes[vj]) { new_shapes[vj] = hypre_TAlloc(hypre_Index, new_sizes[vj], HYPRE_MEMORY_HOST); new_sizes[vj] = 0; } } for (i = 0; i < sstencil_size; i++) { j = vars[i]; k = new_sizes[j]; hypre_CopyIndex(sstencil_shape[i], new_shapes[j][k]); smaps[vi][i] = k; new_sizes[j]++; } new_dim = hypre_StructStencilNDim(sstencil); for (vj = 0; vj < nvars; vj++) { if (new_sizes[vj]) { sstencils[vi][vj] = hypre_StructStencilCreate(new_dim, new_sizes[vj], new_shapes[vj]); } size = hypre_max(size, new_sizes[vj]); } } hypre_SStructPMatrixSMaps(pmatrix) = smaps; hypre_SStructPMatrixSStencils(pmatrix) = sstencils; hypre_TFree(new_sizes, HYPRE_MEMORY_HOST); hypre_TFree(new_shapes, HYPRE_MEMORY_HOST); /* create smatrices */ smatrices = hypre_TAlloc(hypre_StructMatrix **, nvars, HYPRE_MEMORY_HOST); for (vi = 0; vi < nvars; vi++) { smatrices[vi] = hypre_TAlloc(hypre_StructMatrix *, nvars, HYPRE_MEMORY_HOST); for (vj = 0; vj < nvars; vj++) { smatrices[vi][vj] = NULL; if (sstencils[vi][vj] != NULL) { sgrid = hypre_SStructPGridSGrid(pgrid, vi); smatrices[vi][vj] = hypre_StructMatrixCreate(comm, sgrid, sstencils[vi][vj]); } } } hypre_SStructPMatrixSMatrices(pmatrix) = smatrices; /* create symmetric */ symmetric = hypre_TAlloc(HYPRE_Int *, nvars, HYPRE_MEMORY_HOST); for (vi = 0; vi < nvars; vi++) { symmetric[vi] = hypre_TAlloc(HYPRE_Int, nvars, HYPRE_MEMORY_HOST); for (vj = 0; vj < nvars; vj++) { symmetric[vi][vj] = 0; } } hypre_SStructPMatrixSymmetric(pmatrix) = symmetric; hypre_SStructPMatrixSEntriesSize(pmatrix) = size; hypre_SStructPMatrixSEntries(pmatrix) = hypre_TAlloc(HYPRE_Int, size, HYPRE_MEMORY_HOST); hypre_SStructPMatrixRefCount(pmatrix) = 1; *pmatrix_ptr = pmatrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixDestroy(hypre_SStructPMatrix * pmatrix) { hypre_SStructStencil **stencils; HYPRE_Int nvars; HYPRE_Int **smaps; hypre_StructStencil ***sstencils; hypre_StructMatrix ***smatrices; HYPRE_Int **symmetric; HYPRE_Int vi, vj; if (pmatrix) { hypre_SStructPMatrixRefCount(pmatrix)--; if (hypre_SStructPMatrixRefCount(pmatrix) == 0) { stencils = hypre_SStructPMatrixStencils(pmatrix); nvars = hypre_SStructPMatrixNVars(pmatrix); smaps = hypre_SStructPMatrixSMaps(pmatrix); sstencils = hypre_SStructPMatrixSStencils(pmatrix); smatrices = hypre_SStructPMatrixSMatrices(pmatrix); symmetric = hypre_SStructPMatrixSymmetric(pmatrix); for (vi = 0; vi < nvars; vi++) { HYPRE_SStructStencilDestroy(stencils[vi]); hypre_TFree(smaps[vi], HYPRE_MEMORY_HOST); for (vj = 0; vj < nvars; vj++) { hypre_StructStencilDestroy(sstencils[vi][vj]); hypre_StructMatrixDestroy(smatrices[vi][vj]); } hypre_TFree(sstencils[vi], HYPRE_MEMORY_HOST); hypre_TFree(smatrices[vi], HYPRE_MEMORY_HOST); hypre_TFree(symmetric[vi], HYPRE_MEMORY_HOST); } hypre_TFree(stencils, HYPRE_MEMORY_HOST); hypre_TFree(smaps, HYPRE_MEMORY_HOST); hypre_TFree(sstencils, HYPRE_MEMORY_HOST); hypre_TFree(smatrices, HYPRE_MEMORY_HOST); hypre_TFree(symmetric, HYPRE_MEMORY_HOST); hypre_TFree(hypre_SStructPMatrixSEntries(pmatrix), HYPRE_MEMORY_HOST); hypre_TFree(pmatrix, HYPRE_MEMORY_HOST); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixInitialize(hypre_SStructPMatrix * pmatrix) { HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix); HYPRE_Int **symmetric = hypre_SStructPMatrixSymmetric(pmatrix); hypre_StructMatrix *smatrix; HYPRE_Int vi, vj; /* HYPRE_Int num_ghost[2*HYPRE_MAXDIM]; */ /* HYPRE_Int vi, vj, d, ndim; */ #if 0 ndim = hypre_SStructPMatrixNDim(pmatrix); /* * RDF: Why are the ghosts being reset to one? Maybe it needs to be at * least one to set shared coefficients correctly, but not exactly one? */ for (d = 0; d < ndim; d++) { num_ghost[2 * d] = num_ghost[2 * d + 1] = 1; } #endGetInfo(toentries[toi], (void **)&toinfo); if (hypre_SStructBoxManInfoType(frinfo) == hypre_SStructBoxManInfoType(toinfo)) { continue; } } hypre_BoxManEntryGetExtents( frentries[fri], hypre_BoxIMin(frbox), hypre_BoxIMax(frbox)); hypre_IntersectBoxes(ibox0, frbox, ibox1); if (hypre_BoxVolume(ibox1)) { HYPRE_Int tvalues_new_size = hypre_BoxVolume(ibox1); tvalues = hypre_TReAlloc_v2(tvalues, HYPRE_Complex, tvalues_size, HYPRE_Complex, tvalues_new_size, HYPRE_MEMORY_DEVICE); tvalues_size = tvalues_new_size; if (action >= 0) { /* set or add */ /* copy values into tvalues */ start = hypre_BoxIMin(ibox1); hypre_BoxGetSize(ibox1, loop_size); #undef DEVICE_VAR #define DEVICE_VAR is_device_ptr(tvalues,values) hypre_BoxLoop2Begin(ndim, loop_size, ibox1, start, stride, mi, value_box, start, stride, vi); { tvalues[mi] = values[ei + vi * nentries]; } hypre_BoxLoop2End(mi, vi); #undef DEVICE_VAR #define DEVICE_VAR /* put values into UMatrix */ hypre_SStructUMatrixSetBoxValues( matrix, part, ibox1, var, 1, &entry, ibox1, tvalues, action); /* zero out values in PMatrix (possibly in ghost) */ hypre_StructMatrixClearBoxValues( smatrix, ibox1, 1, &sentry, -1, 1); } else { /* get */ /* get values from UMatrix */ hypre_SStructUMatrixSetBoxValues( matrix, part, ibox1, var, 1, &entry, ibox1, tvalues, action); /* copy tvalues into values */ start = hypre_BoxIMin(ibox1); hypre_BoxGetSize(ibox1, loop_size); #undef DEVICE_VAR #define DEVICE_VAR is_device_ptr(tvalues,values) hypre_BoxLoop2Begin(ndim, loop_size, ibox1, start, stride, mi, value_box, start, stride, vi); { values[ei + vi * nentries] = tvalues[mi]; } hypre_BoxLoop2End(mi, vi); #undef DEVICE_VAR #define DEVICE_VAR } /* end if action */ } /* end if nonzero ibox1 */ } /* end of "from" boxman entries loop */ hypre_TFree(frentries, HYPRE_MEMORY_HOST); } /* end if nonzero ibox0 */ } /* end of "to" boxman entries loop */ hypre_TFree(toentries, HYPRE_MEMORY_HOST); } /* end of entries loop */ hypre_BoxDestroy(box); hypre_BoxDestroy(ibox0); hypre_BoxDestroy(ibox1); hypre_BoxDestroy(tobox); hypre_BoxDestroy(frbox); hypre_TFree(tvalues, HYPRE_MEMORY_DEVICE); return hypre_error_flag; }
GrB_Vector_wait.c
//------------------------------------------------------------------------------ // GrB_Vector_wait: wait for a vector to complete //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Finishes all work on a vector, followed by an OpenMP flush. #include "GB.h" #define GB_FREE_ALL ; GrB_Info GrB_Vector_wait // finish all work on a vector ( GrB_Vector *v ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- #pragma omp flush GB_WHERE ((*v), "GrB_Vector_wait (&v)") ; GB_RETURN_IF_NULL (v) ; GB_RETURN_IF_NULL_OR_FAULTY (*v) ; //-------------------------------------------------------------------------- // finish all pending work on the vector //-------------------------------------------------------------------------- if (GB_ANY_PENDING_WORK (*v)) { GrB_Info info ; GB_BURBLE_START ("GrB_Vector_wait") ; GB_OK (GB_wait ((GrB_Matrix) (*v), "vector", Context)) ; GB_BURBLE_END ; } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; }
// ------------------------------------------------------------------------------ //GrB_Vector_wait:wait for a vector to complete // ------------------------------------------------------------------------------ //SuiteSparse:GraphBLAS, Timothy A.Davis, (c) 2017 - 2021, All Rights Reserved. // SPDX - License - Identifier:Apache - 2.0 // ------------------------------------------------------------------------------ //Finishes all work on a vector, followed by an OpenMP flush. #include "GB.h" #define GB_FREE_ALL ; GrB_Info GrB_Vector_wait // finish all work on a vector ( GrB_Vector * v ) { //-------------------------------------------------------------------------- //check inputs // -------------------------------------------------------------------------- GB_WHERE((*v), "GrB_Vector_wait (&v)"); GB_RETURN_IF_NULL(v); GB_RETURN_IF_NULL_OR_FAULTY(*v); //-------------------------------------------------------------------------- //finish all pending work on the vector // -------------------------------------------------------------------------- if (GB_ANY_PENDING_WORK(*v)) { GrB_Info info; GB_BURBLE_START("GrB_Vector_wait"); GB_OK(GB_wait((GrB_Matrix) (*v), "vector", Context)); GB_BURBLE_END; } //-------------------------------------------------------------------------- //return result // -------------------------------------------------------------------------- return (GrB_SUCCESS); }
// ------------------------------------------------------------------------------ //GrB_Vector_wait:wait for a vector to complete // ------------------------------------------------------------------------------ //SuiteSparse:GraphBLAS, Timothy A.Davis, (c) 2017 - 2021, All Rights Reserved. // SPDX - License - Identifier:Apache - 2.0 // ------------------------------------------------------------------------------ //Finishes all work on a vector, followed by an OpenMP flush. #include "GB.h" #define GB_FREE_ALL ; GrB_Info GrB_Vector_wait // finish all work on a vector ( GrB_Vector * v ) { //-------------------------------------------------------------------------- //check inputs // -------------------------------------------------------------------------- #pragma omp flush GB_WHERE((*v), "GrB_Vector_wait (&v)"); GB_RETURN_IF_NULL(v); GB_RETURN_IF_NULL_OR_FAULTY(*v); //-------------------------------------------------------------------------- //finish all pending work on the vector // -------------------------------------------------------------------------- if (GB_ANY_PENDING_WORK(*v)) { GrB_Info info; GB_BURBLE_START("GrB_Vector_wait"); GB_OK(GB_wait((GrB_Matrix) (*v), "vector", Context)); GB_BURBLE_END; } //-------------------------------------------------------------------------- //return result // -------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS); }
XT_ICD_update.c
/* ============================================================================ * Copyright (c) 2013 K. Aditya Mohan (Purdue University) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or * other materials provided with the distribution. * * Neither the name of K. Aditya Mohan, Purdue * University, nor the names of its contributors may be used * to endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include "XT_Constants.h" #include <stdio.h> #include <math.h> #include <stdlib.h> #include "allocate.h" #include "randlib.h" #include <time.h> #include "XT_AMatrix.h" #include "XT_Profile.h" #include "XT_Structures.h" #include "XT_IOMisc.h" #include "XT_NHICD.h" #include "omp.h" #include "XT_MPI.h" #include <mpi.h> #include "XT_VoxUpdate.h" #include "XT_ForwardProject.h" #include "XT_MPIIO.h" #include "XT_Debug.h" #include "XT_OffsetError.h" /*computes the location of (i,j,k) th element in a 1D array*/ int32_t array_loc_1D (int32_t i, int32_t j, int32_t k, int32_t N_j, int32_t N_k) { return (i*N_j*N_k + j*N_k + k); } /*finds the maximum in a array 'array_in' with number of elements being 'num'*/ int32_t find_max(int32_t* array_in, int32_t num) { int32_t i, maxnum; maxnum = array_in[0]; for (i=1; i<num; i++) if (array_in[i] > maxnum) maxnum = array_in[i]; return(maxnum); } /*converts the value 'val' to hounsfield units and returns it*/ Real_t convert2Hounsfield (Real_t val) { Real_t slope, c; slope=(HOUNSFIELD_WATER_MAP-HOUNSFIELD_AIR_MAP)/(WATER_MASS_ATT_COEFF*WATER_DENSITY-AIR_MASS_ATT_COEFF*AIR_DENSITY)/HFIELD_UNIT_CONV_CONST; c=-slope*(AIR_MASS_ATT_COEFF*AIR_DENSITY*HFIELD_UNIT_CONV_CONST); return (slope*val + c); } /*Computes the qGGMRF spatial prior cost value at delta = x_i - x_j. i & j being the voxel and its neighbor*/ Real_t CE_QGGMRF_Spatial_Value(Real_t delta, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr) { return ((pow(fabs(delta),MRF_Q)/TomoInputsPtr->Sigma_S_Q)/(ScannedObjectPtr->C_S + pow(fabs(delta),MRF_Q - MRF_P)/TomoInputsPtr->Sigma_S_Q_P)); } /*Computes the qGGMRF temporal prior cost value at delta = x_i - x_j. i & j being the voxel and its neighbor*/ Real_t CE_QGGMRF_Temporal_Value(Real_t delta, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr) { return ((pow(fabs(delta),MRF_Q)/TomoInputsPtr->Sigma_T_Q)/(ScannedObjectPtr->C_T + pow(fabs(delta),MRF_Q - MRF_P)/TomoInputsPtr->Sigma_T_Q_P)); } /*Computes the qGGMRF spatial prior derivative at delta = x_i - x_j. i & j being the voxel and its neighbor*/ Real_t CE_QGGMRF_Spatial_Derivative(Real_t delta, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr) { Real_t temp1,temp2,temp3; temp1=pow(fabs(delta),MRF_Q - MRF_P)/(TomoInputsPtr->Sigma_S_Q_P); temp2=pow(fabs(delta),MRF_Q - 1); temp3 = ScannedObjectPtr->C_S + temp1; if(delta < 0) return ((-1*temp2/(temp3*TomoInputsPtr->Sigma_S_Q))*(MRF_Q - ((MRF_Q-MRF_P)*temp1)/(temp3))); else { return ((temp2/(temp3*TomoInputsPtr->Sigma_S_Q))*(MRF_Q - ((MRF_Q-MRF_P)*temp1)/(temp3))); } } /*Computes the qGGMRF temporal prior derivative at delta = x_i - x_j. i & j being the voxel and its neighbor*/ Real_t CE_QGGMRF_Temporal_Derivative(Real_t delta, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr) { Real_t temp1,temp2,temp3; temp1 = pow(fabs(delta),MRF_Q - MRF_P)/(TomoInputsPtr->Sigma_T_Q_P); temp2 = pow(fabs(delta),MRF_Q - 1); temp3 = ScannedObjectPtr->C_T + temp1; if(delta < 0) return ((-1*temp2/(temp3*TomoInputsPtr->Sigma_T_Q))*(MRF_Q - ((MRF_Q-MRF_P)*temp1)/(temp3))); else { return ((temp2/(temp3*TomoInputsPtr->Sigma_T_Q))*(MRF_Q - ((MRF_Q-MRF_P)*temp1)/(temp3))); } } /*Computes the qGGMRF spatial prior second derivative at delta = 0*/ Real_t CE_QGGMRF_Spatial_SecondDerivative(ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr) { return MRF_Q/(TomoInputsPtr->Sigma_S_Q*ScannedObjectPtr->C_S); } /*Computes the qGGMRF spatial prior second derivative at delta = 0*/ Real_t CE_QGGMRF_Temporal_SecondDerivative(ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr) { return MRF_Q/(TomoInputsPtr->Sigma_T_Q*ScannedObjectPtr->C_T); } /*Computes the voxel update and returns it. V is the present value of voxel. THETA1 and THETA2 are the values used in voxel update. Spatial_Nhood and Time_Nhood gives the values of voxels in the neighborhood of V. Time_BDFlag and Spatial_BDFlag are masks which determine whether a neighbor should be included in the neighorhood or not.*/ Real_t CE_FunctionalSubstitution(Real_t V, Real_t THETA1, Real_t THETA2, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, Real_t Spatial_Nhood[NHOOD_Y_MAXDIM][NHOOD_X_MAXDIM][NHOOD_Z_MAXDIM], Real_t Time_Nhood[NHOOD_TIME_MAXDIM-1], bool Spatial_BDFlag[NHOOD_Y_MAXDIM][NHOOD_X_MAXDIM][NHOOD_Z_MAXDIM], bool Time_BDFlag[NHOOD_TIME_MAXDIM-1]) { Real_t u,temp1=0,temp2=0,temp_const,RefValue=0,Delta0; Real_t QGGMRF_Params; int32_t i,j,k; RefValue = V; /*Need to Loop this for multiple iterations of substitute function*/ for (i=0; i < NHOOD_Y_MAXDIM; i++) for (j=0; j < NHOOD_X_MAXDIM; j++) for (k=0; k < NHOOD_Z_MAXDIM; k++) { if(Spatial_BDFlag[i][j][k] == true && (i != (NHOOD_Y_MAXDIM-1)/2 || j != (NHOOD_X_MAXDIM-1)/2 || k != (NHOOD_Z_MAXDIM-1)/2)) { Delta0 = (RefValue - Spatial_Nhood[i][j][k]); if(Delta0 != 0) QGGMRF_Params = CE_QGGMRF_Spatial_Derivative(Delta0,ScannedObjectPtr,TomoInputsPtr)/(Delta0); else { QGGMRF_Params = CE_QGGMRF_Spatial_SecondDerivative(ScannedObjectPtr,TomoInputsPtr); } temp_const = TomoInputsPtr->Spatial_Filter[i][j][k]*QGGMRF_Params; temp1 += temp_const*Spatial_Nhood[i][j][k]; temp2 += temp_const; } } for (i=0; i < NHOOD_TIME_MAXDIM - 1; i++) { if(Time_BDFlag[i] == true) { Delta0 = (RefValue - Time_Nhood[i]); if(Delta0 != 0) QGGMRF_Params = CE_QGGMRF_Temporal_Derivative(Delta0,ScannedObjectPtr,TomoInputsPtr)/(Delta0); else { QGGMRF_Params = CE_QGGMRF_Temporal_SecondDerivative(ScannedObjectPtr,TomoInputsPtr); } temp_const = TomoInputsPtr->Time_Filter[0]*QGGMRF_Params; temp1 += temp_const*Time_Nhood[i]; temp2 += temp_const; } } u=(temp1+ (THETA2*V) - THETA1)/(temp2 + THETA2); RefValue = RefValue + TomoInputsPtr->alpha*(u-RefValue); #ifdef POSITIVITY_CONSTRAINT if (RefValue <= 0) RefValue = 0; #endif return RefValue; } /*computes the value of cost function. 'ErrorSino' is the error sinogram*/ Real_t computeCost(Sinogram* SinogramPtr, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino) { Real_t cost=0,temp=0, forward=0, prior=0; Real_t delta; int32_t i,j,k,p,N_z; bool j_minus, k_minus, i_plus, j_plus, k_plus, p_plus; #pragma omp parallel for private(j, k, temp) reduction(+:cost) for (i = 0; i < SinogramPtr->N_p; i++) for (j = 0; j < SinogramPtr->N_r; j++) for (k = 0; k < SinogramPtr->N_t; k++) { temp = ErrorSino[i][j][k] * sqrt(TomoInputsPtr->Weight[i][j][k]); if (SinogramPtr->ProjSelect[i][j][k] == true) temp = temp*temp; else temp = 2.0*TomoInputsPtr->ErrorSinoDelta*TomoInputsPtr->ErrorSinoThresh*fabs(temp) + TomoInputsPtr->ErrorSinoThresh*TomoInputsPtr->ErrorSinoThresh*(1.0-2.0*TomoInputsPtr->ErrorSinoDelta); cost += temp; } cost /= 2.0; /*When computing the cost of the prior term it is important to make sure that you don't include the cost of any pair of neighbors more than once. In this code, a certain sense of causality is used to compute the cost. We also assume that the weghting kernel given by 'Filter' is symmetric. Let i, j and k correspond to the three dimensions. If we go forward to i+1, then all neighbors at j-1, j, j+1, k+1, k, k-1 are to be considered. However, if for the same i, if we go forward to j+1, then all k-1, k, and k+1 should be considered. For same i and j, only the neighbor at k+1 is considred.*/ temp = 0; N_z = ScannedObjectPtr->N_z + 2; if (TomoInputsPtr->node_rank == TomoInputsPtr->node_num-1) N_z = ScannedObjectPtr->N_z + 1; #pragma omp parallel for private(delta, p, j, k, j_minus, k_minus, p_plus, i_plus, j_plus, k_plus) reduction(+:temp) for (i = 0; i < ScannedObjectPtr->N_time; i++) for (p = 1; p < ScannedObjectPtr->N_z + 1; p++) for (j = 0; j < ScannedObjectPtr->N_y; j++) { for (k = 0; k < ScannedObjectPtr->N_x; k++) { j_minus = (j - 1 >= 0)? true : false; k_minus = (k - 1 >= 0)? true : false; p_plus = (p + 1 < N_z)? true : false; i_plus = (i + 1 < ScannedObjectPtr->N_time)? true : false; j_plus = (j + 1 < ScannedObjectPtr->N_y)? true : false; k_plus = (k + 1 < ScannedObjectPtr->N_x)? true : false; if(k_plus == true) { delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j][k + 1]); temp += TomoInputsPtr->Spatial_Filter[1][1][2] * CE_QGGMRF_Spatial_Value(delta,ScannedObjectPtr,TomoInputsPtr); } if(j_plus == true) { if(k_minus == true) { delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j + 1][k - 1]); temp += TomoInputsPtr->Spatial_Filter[1][2][0] * CE_QGGMRF_Spatial_Value(delta,ScannedObjectPtr,TomoInputsPtr); } delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j + 1][k]); temp += TomoInputsPtr->Spatial_Filter[1][2][1] * CE_QGGMRF_Spatial_Value(delta,ScannedObjectPtr,TomoInputsPtr); if(k_plus == true) { delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j + 1][k + 1]); temp += TomoInputsPtr->Spatial_Filter[1][2][2] * CE_QGGMRF_Spatial_Value(delta,ScannedObjectPtr,TomoInputsPtr); } } if (p_plus == true) { if(j_minus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j - 1][k]; temp += TomoInputsPtr->Spatial_Filter[2][0][1] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p+1][j][k]; temp += TomoInputsPtr->Spatial_Filter[2][1][1] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); if(j_plus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p+1][j + 1][k]; temp += TomoInputsPtr->Spatial_Filter[2][2][1] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } if(j_minus == true) { if(k_minus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j - 1][k - 1]; temp += TomoInputsPtr->Spatial_Filter[2][0][0] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } if(k_plus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j - 1][k + 1]; temp += TomoInputsPtr->Spatial_Filter[2][0][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } } if(k_minus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j][k - 1]; temp += TomoInputsPtr->Spatial_Filter[2][1][0] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } if(j_plus == true) { if(k_minus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j + 1][k - 1]; temp += TomoInputsPtr->Spatial_Filter[2][2][0] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } if(k_plus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j + 1][k + 1]; temp += TomoInputsPtr->Spatial_Filter[2][2][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } } if(k_plus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j][k + 1]; temp += TomoInputsPtr->Spatial_Filter[2][1][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } } if(i_plus == true) { delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i+1][p][j][k]); temp += TomoInputsPtr->Time_Filter[0] * CE_QGGMRF_Temporal_Value(delta,ScannedObjectPtr,TomoInputsPtr); } } } /*Use MPI reduction operation to add the forward and prior costs from all nodes*/ MPI_Reduce(&cost, &forward, 1, MPI_REAL_DATATYPE, MPI_SUM, 0, MPI_COMM_WORLD); MPI_Reduce(&temp, &prior, 1, MPI_REAL_DATATYPE, MPI_SUM, 0, MPI_COMM_WORLD); if (TomoInputsPtr->node_rank == 0) { check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Scaled error sino cost = %f\n",forward); check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Decrease in scaled error sino cost = %f\n",TomoInputsPtr->ErrorSino_Cost-forward); TomoInputsPtr->ErrorSino_Cost = forward; forward += (Real_t)TomoInputsPtr->node_num*(Real_t)SinogramPtr->N_p*(Real_t)SinogramPtr->N_r*(Real_t)SinogramPtr->N_t*log(TomoInputsPtr->var_est)/2; check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Forward cost = %f\n",forward); check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Prior cost = %f\n",prior); TomoInputsPtr->Forward_Cost = forward; TomoInputsPtr->Prior_Cost = prior; cost = forward + prior; } /*Broadcase the value of cost to all nodes*/ MPI_Bcast(&cost, 1, MPI_REAL_DATATYPE, 0, MPI_COMM_WORLD); return cost; } /*Upsamples the (N_time x N_z x N_y x N_x) size 'Init' by a factor of 2 along the x-y plane and stores it in 'Object'*/ void upsample_bilinear_2D (Real_arr_t**** Object, Real_arr_t**** Init, int32_t N_time, int32_t N_z, int32_t N_y, int32_t N_x) { int32_t i, j, k, m; Real_arr_t **buffer; #pragma omp parallel for private(buffer, m, j, k) for (i=0; i < N_time; i++) for (m=0; m < N_z; m++) { buffer = (Real_arr_t**)multialloc(sizeof(Real_arr_t), 2, N_y, 2*N_x); for (j=0; j < N_y; j++){ buffer[j][0] = Init[i][m][j][0]; buffer[j][1] = (3.0*Init[i][m][j][0] + Init[i][m][j][1])/4.0; buffer[j][2*N_x - 1] = Init[i][m][j][N_x - 1]; buffer[j][2*N_x - 2] = (Init[i][m][j][N_x - 2] + 3.0*Init[i][m][j][N_x - 1])/4.0; for (k=1; k < N_x - 1; k++){ buffer[j][2*k] = (Init[i][m][j][k-1] + 3.0*Init[i][m][j][k])/4.0; buffer[j][2*k + 1] = (3.0*Init[i][m][j][k] + Init[i][m][j][k+1])/4.0; } } for (k=0; k < 2*N_x; k++){ Object[i][m][0][k] = buffer[0][k]; Object[i][m][1][k] = (3.0*buffer[0][k] + buffer[1][k])/4.0; Object[i][m][2*N_y-1][k] = buffer[N_y-1][k]; Object[i][m][2*N_y-2][k] = (buffer[N_y-2][k] + 3.0*buffer[N_y-1][k])/4.0; } for (j=1; j<N_y-1; j++){ for (k=0; k<2*N_x; k++){ Object[i][m][2*j][k] = (buffer[j-1][k] + 3.0*buffer[j][k])/4.0; Object[i][m][2*j + 1][k] = (3*buffer[j][k] + buffer[j+1][k])/4.0; } } multifree(buffer,2); } } /*Upsamples the (N_z x N_y x N_x) size 'Init' by a factor of 2 along the x-y plane and stores it in 'Object'*/ void upsample_object_bilinear_2D (Real_arr_t*** Object, Real_arr_t*** Init, int32_t N_z, int32_t N_y, int32_t N_x) { int32_t j, k, slice; Real_arr_t **buffer; buffer = (Real_arr_t**)multialloc(sizeof(Real_arr_t), 2, N_y, 2*N_x); for (slice=0; slice < N_z; slice++){ for (j=0; j < N_y; j++){ buffer[j][0] = Init[slice][j][0]; buffer[j][1] = (3.0*Init[slice][j][0] + Init[slice][j][1])/4.0; buffer[j][2*N_x - 1] = Init[slice][j][N_x - 1]; buffer[j][2*N_x - 2] = (Init[slice][j][N_x - 2] + 3.0*Init[slice][j][N_x - 1])/4.0; for (k=1; k < N_x - 1; k++){ buffer[j][2*k] = (Init[slice][j][k-1] + 3.0*Init[slice][j][k])/4.0; buffer[j][2*k + 1] = (3.0*Init[slice][j][k] + Init[slice][j][k+1])/4.0; } } for (k=0; k < 2*N_x; k++){ Object[slice+1][0][k] = buffer[0][k]; Object[slice+1][1][k] = (3.0*buffer[0][k] + buffer[1][k])/4.0; Object[slice+1][2*N_y-1][k] = buffer[N_y-1][k]; Object[slice+1][2*N_y-2][k] = (buffer[N_y-2][k] + 3.0*buffer[N_y-1][k])/4.0; } for (j=1; j<N_y-1; j++){ for (k=0; k<2*N_x; k++){ Object[slice+1][2*j][k] = (buffer[j-1][k] + 3.0*buffer[j][k])/4.0; Object[slice+1][2*j + 1][k] = (3*buffer[j][k] + buffer[j+1][k])/4.0; } } } multifree(buffer,2); } void upsample_bilinear_3D (Real_arr_t**** Object, Real_arr_t**** Init, int32_t N_time, int32_t N_z, int32_t N_y, int32_t N_x) { int32_t i, j, k, slice; Real_t ***buffer2D, ***buffer3D; #pragma omp parallel for private(buffer2D, buffer3D, slice, j, k) for (i=0; i < N_time; i++) { buffer2D = (Real_t***)multialloc(sizeof(Real_t), 3, N_z, N_y, 2*N_x); buffer3D = (Real_t***)multialloc(sizeof(Real_t), 3, N_z, 2*N_y, 2*N_x); for (slice=0; slice < N_z; slice++){ for (j=0; j < N_y; j++){ buffer2D[slice][j][0] = Init[i][slice][j][0]; buffer2D[slice][j][1] = (3.0*Init[i][slice][j][0] + Init[i][slice][j][1])/4.0; buffer2D[slice][j][2*N_x - 1] = Init[i][slice][j][N_x - 1]; buffer2D[slice][j][2*N_x - 2] = (Init[i][slice][j][N_x - 2] + 3.0*Init[i][slice][j][N_x - 1])/4.0; for (k=1; k < N_x - 1; k++){ buffer2D[slice][j][2*k] = (Init[i][slice][j][k-1] + 3.0*Init[i][slice][j][k])/4.0; buffer2D[slice][j][2*k + 1] = (3.0*Init[i][slice][j][k] + Init[i][slice][j][k+1])/4.0; } } for (k=0; k < 2*N_x; k++){ buffer3D[slice][0][k] = buffer2D[slice][0][k]; buffer3D[slice][1][k] = (3.0*buffer2D[slice][0][k] + buffer2D[slice][1][k])/4.0; buffer3D[slice][2*N_y-1][k] = buffer2D[slice][N_y-1][k]; buffer3D[slice][2*N_y-2][k] = (buffer2D[slice][N_y-2][k] + 3.0*buffer2D[slice][N_y-1][k])/4.0; } for (j=1; j<N_y-1; j++) for (k=0; k<2*N_x; k++){ buffer3D[slice][2*j][k] = (buffer2D[slice][j-1][k] + 3.0*buffer2D[slice][j][k])/4.0; buffer3D[slice][2*j + 1][k] = (3*buffer2D[slice][j][k] + buffer2D[slice][j+1][k])/4.0; } } for (j=0; j<2*N_y; j++) for (k=0; k<2*N_x; k++){ Object[i][0][j][k] = buffer3D[0][j][k]; Object[i][1][j][k] = (3.0*buffer3D[0][j][k] + buffer3D[1][j][k])/4.0; Object[i][2*N_z-1][j][k] = buffer3D[N_z-1][j][k]; Object[i][2*N_z-2][j][k] = (3.0*buffer3D[N_z-1][j][k] + buffer3D[N_z-2][j][k])/4.0; } for (slice=1; slice < N_z-1; slice++) for (j=0; j<2*N_y; j++) for (k=0; k<2*N_x; k++){ Object[i][2*slice][j][k] = (buffer3D[slice-1][j][k] + 3.0*buffer3D[slice][j][k])/4.0; Object[i][2*slice+1][j][k] = (3.0*buffer3D[slice][j][k] + buffer3D[slice+1][j][k])/4.0; } multifree(buffer2D,3); multifree(buffer3D,3); } } /*'InitObject' intializes the Object to be reconstructed to either 0 or an interpolated version of the previous reconstruction. It is used in multi resolution reconstruction in which after every coarse resolution reconstruction the object should be intialized with an interpolated version of the reconstruction following which the object will be reconstructed at a finer resolution.*/ /*Upsamples the (N_time x N_z x N_y x N_x) size 'Init' by a factor of 2 along the in 3D x-y-z coordinates and stores it in 'Object'*/ void upsample_object_bilinear_3D (Real_arr_t*** Object, Real_arr_t*** Init, int32_t N_z, int32_t N_y, int32_t N_x) { int32_t j, k, slice; Real_t ***buffer2D, ***buffer3D; buffer2D = (Real_t***)multialloc(sizeof(Real_t), 3, N_z, N_y, 2*N_x); buffer3D = (Real_t***)multialloc(sizeof(Real_t), 3, N_z, 2*N_y, 2*N_x); for (slice=0; slice < N_z; slice++){ for (j=0; j < N_y; j++){ buffer2D[slice][j][0] = Init[slice][j][0]; buffer2D[slice][j][1] = (3.0*Init[slice][j][0] + Init[slice][j][1])/4.0; buffer2D[slice][j][2*N_x - 1] = Init[slice][j][N_x - 1]; buffer2D[slice][j][2*N_x - 2] = (Init[slice][j][N_x - 2] + 3.0*Init[slice][j][N_x - 1])/4.0; for (k=1; k < N_x - 1; k++){ buffer2D[slice][j][2*k] = (Init[slice][j][k-1] + 3.0*Init[slice][j][k])/4.0; buffer2D[slice][j][2*k + 1] = (3.0*Init[slice][j][k] + Init[slice][j][k+1])/4.0; } } for (k=0; k < 2*N_x; k++){ buffer3D[slice][0][k] = buffer2D[slice][0][k]; buffer3D[slice][1][k] = (3.0*buffer2D[slice][0][k] + buffer2D[slice][1][k])/4.0; buffer3D[slice][2*N_y-1][k] = buffer2D[slice][N_y-1][k]; buffer3D[slice][2*N_y-2][k] = (buffer2D[slice][N_y-2][k] + 3.0*buffer2D[slice][N_y-1][k])/4.0; } for (j=1; j<N_y-1; j++) for (k=0; k<2*N_x; k++){ buffer3D[slice][2*j][k] = (buffer2D[slice][j-1][k] + 3.0*buffer2D[slice][j][k])/4.0; buffer3D[slice][2*j + 1][k] = (3*buffer2D[slice][j][k] + buffer2D[slice][j+1][k])/4.0; } } for (j=0; j<2*N_y; j++) for (k=0; k<2*N_x; k++){ Object[1][j][k] = buffer3D[0][j][k]; Object[2][j][k] = (3.0*buffer3D[0][j][k] + buffer3D[1][j][k])/4.0; Object[2*N_z][j][k] = buffer3D[N_z-1][j][k]; Object[2*N_z-1][j][k] = (3.0*buffer3D[N_z-1][j][k] + buffer3D[N_z-2][j][k])/4.0; } for (slice=1; slice < N_z-1; slice++) for (j=0; j<2*N_y; j++) for (k=0; k<2*N_x; k++){ Object[2*slice+1][j][k] = (buffer3D[slice-1][j][k] + 3.0*buffer3D[slice][j][k])/4.0; Object[2*slice+2][j][k] = (3.0*buffer3D[slice][j][k] + buffer3D[slice+1][j][k])/4.0; } multifree(buffer2D,3); multifree(buffer3D,3); } /*randomly select the voxels lines which need to be updated along the x-y plane for each z-block and time slice*/ void randomly_select_x_y (ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, uint8_t*** Mask) { int32_t i, j, num,n, Index, col, row, *Counter, ArraySize, block; ArraySize = ScannedObjectPtr->N_y*ScannedObjectPtr->N_x; Counter = (int32_t*)get_spc(ArraySize, sizeof(int32_t)); for (i=0; i<ScannedObjectPtr->N_time; i++) for (block=0; block<TomoInputsPtr->num_z_blocks; block++) { ArraySize = ScannedObjectPtr->N_y*ScannedObjectPtr->N_x; for (Index = 0; Index < ArraySize; Index++) Counter[Index] = Index; TomoInputsPtr->UpdateSelectNum[i][block] = 0; for (j=0; j<ScannedObjectPtr->N_x*ScannedObjectPtr->N_y; j++){ Index = floor(random2() * ArraySize); Index = (Index == ArraySize)?ArraySize-1:Index; col = Counter[Index] % ScannedObjectPtr->N_x; row = Counter[Index] / ScannedObjectPtr->N_x; for (n = block*(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks); n < (block+1)*(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks); n++) if (Mask[i][row][col] == 1) { num = TomoInputsPtr->UpdateSelectNum[i][block]; TomoInputsPtr->x_rand_select[i][block][num] = col; TomoInputsPtr->y_rand_select[i][block][num] = row; (TomoInputsPtr->UpdateSelectNum[i][block])++; break; } Counter[Index] = Counter[ArraySize - 1]; ArraySize--; } } free(Counter); } /*'InitObject' intializes the Object to be reconstructed to either 0 or an interpolated version of the previous reconstruction. It is used in multi resolution reconstruction in which after every coarse resolution reconstruction the object should be intialized with an interpolated version of the reconstruction following which the object will be reconstructed at a finer resolution. --initICD-- If 1, initializes the object to 0 If 2, the code uses bilinear interpolation to initialize the object if the previous reconstruction was at a lower resolution The function also initializes the magnitude update map 'MagUpdateMap' from the previous coarser resolution reconstruction. */ int32_t initObject (Sinogram* SinogramPtr, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, Real_arr_t**** MagUpdateMap) { char object_file[100]; int dimTiff[4]; int32_t i, j, k, l, size, flag = 0; Real_arr_t ***Init, ****UpMapInit; for (i = 0; i < ScannedObjectPtr->N_time; i++) for (j = 0; j < ScannedObjectPtr->N_z; j++) for (k = 0; k < ScannedObjectPtr->N_y; k++) for (l = 0; l < ScannedObjectPtr->N_x; l++) ScannedObjectPtr->Object[i][j+1][k][l] = OBJECT_INIT_VAL; if (TomoInputsPtr->initICD > 3 || TomoInputsPtr->initICD < 0){ sentinel(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "ERROR: initICD value not recognized.\n"); } else if (TomoInputsPtr->initICD == 1) { size = ScannedObjectPtr->N_z*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x; for (i = 0; i < ScannedObjectPtr->N_time; i++) { sprintf(object_file, "%s_time_%d", OBJECT_FILENAME,i); if (read_SharedBinFile_At (object_file, &(ScannedObjectPtr->Object[i][1][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; } if (TomoInputsPtr->initMagUpMap == 1) { size = ScannedObjectPtr->N_time*TomoInputsPtr->num_z_blocks*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x; if (read_SharedBinFile_At (MAG_UPDATE_FILENAME, &(MagUpdateMap[0][0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; } } else if (TomoInputsPtr->initICD == 2 || TomoInputsPtr->initICD == 3) { if (TomoInputsPtr->initICD == 3) { Init = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, ScannedObjectPtr->N_z/2, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2); check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Interpolating object using 3D bilinear interpolation.\n"); for (i = 0; i < ScannedObjectPtr->N_time; i++) { sprintf(object_file, "%s_time_%d", OBJECT_FILENAME, i); size = ScannedObjectPtr->N_z*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x/8; if (read_SharedBinFile_At (object_file, &(Init[0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; upsample_object_bilinear_3D (ScannedObjectPtr->Object[i], Init, ScannedObjectPtr->N_z/2, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2); } multifree(Init,3); check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Done with interpolating object using 3D bilinear interpolation.\n"); } else { Init = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, ScannedObjectPtr->N_z, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2); check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Interpolating object using 2D bilinear interpolation.\n"); for (i = 0; i < ScannedObjectPtr->N_time; i++) { sprintf(object_file, "%s_time_%d", OBJECT_FILENAME,i); size = ScannedObjectPtr->N_z*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x/4; if (read_SharedBinFile_At (object_file, &(Init[0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; upsample_object_bilinear_2D (ScannedObjectPtr->Object[i], Init, ScannedObjectPtr->N_z, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2); } multifree(Init,3); check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Done with interpolating object using 2D bilinear interpolation.\n"); } if (TomoInputsPtr->initMagUpMap == 1) { if (TomoInputsPtr->prevnum_z_blocks == TomoInputsPtr->num_z_blocks) { UpMapInit = (Real_arr_t****)multialloc(sizeof(Real_arr_t), 4, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2); size = ScannedObjectPtr->N_time*TomoInputsPtr->num_z_blocks*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x/4; if (read_SharedBinFile_At (MAG_UPDATE_FILENAME, &(UpMapInit[0][0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Interpolating magnitude update map using 2D bilinear interpolation.\n"); upsample_bilinear_2D (MagUpdateMap, UpMapInit, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2); multifree(UpMapInit,4); } else if (TomoInputsPtr->prevnum_z_blocks == TomoInputsPtr->num_z_blocks/2) { UpMapInit = (Real_arr_t****)multialloc(sizeof(Real_arr_t), 4, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks/2, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2); size = ScannedObjectPtr->N_time*TomoInputsPtr->num_z_blocks*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x/8; if (read_SharedBinFile_At (MAG_UPDATE_FILENAME, &(UpMapInit[0][0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Interpolating magnitude update map using 3D bilinear interpolation.\n"); upsample_bilinear_3D (MagUpdateMap, UpMapInit, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks/2, ScannedObjectPtr->N_y/2, ScannedObjectPtr->N_x/2); multifree(UpMapInit,4); } else { check_warn(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Number of axial blocks is incompatible with previous stage of multi-resolution.\n"); check_warn(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Initializing the multi-resolution map to zeros.\n"); } } } dimTiff[0] = ScannedObjectPtr->N_time; dimTiff[1] = TomoInputsPtr->num_z_blocks; dimTiff[2] = ScannedObjectPtr->N_y; dimTiff[3] = ScannedObjectPtr->N_x; sprintf(object_file, "%s_n%d", MAG_UPDATE_FILENAME, TomoInputsPtr->node_rank); if (TomoInputsPtr->Write2Tiff == 1) if (WriteMultiDimArray2Tiff (object_file, dimTiff, 0, 1, 2, 3, &(MagUpdateMap[0][0][0][0]), 0, TomoInputsPtr->debug_file_ptr)) flag = -1; for (i = 0; i < ScannedObjectPtr->N_time; i++) { sprintf (object_file, "%s_n%d", INIT_OBJECT_FILENAME, TomoInputsPtr->node_rank); sprintf (object_file, "%s_time_%d", object_file, i); dimTiff[0] = 1; dimTiff[1] = ScannedObjectPtr->N_z; dimTiff[2] = ScannedObjectPtr->N_y; dimTiff[3] = ScannedObjectPtr->N_x; if (TomoInputsPtr->Write2Tiff == 1) if (WriteMultiDimArray2Tiff (object_file, dimTiff, 0, 1, 2, 3, &(ScannedObjectPtr->Object[i][1][0][0]), 0, TomoInputsPtr->debug_file_ptr)) flag = -1; } return (flag); error: return (-1); } /*'initErrorSinogram' is used to initialize the error sinogram before start of ICD. It computes e = y - Ax - d. Ax is computed by forward projecting the obkject x.*/ int32_t initErrorSinogam (Sinogram* SinogramPtr, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, Real_arr_t** DetectorResponse, Real_arr_t*** ErrorSino/*, AMatrixCol* VoxelLineResponse*/) { Real_t pixel, avg=0; int32_t dimTiff[4], i, j, k, p, sino_idx, slice, flag = 0; AMatrixCol* AMatrixPtr = (AMatrixCol*)get_spc(ScannedObjectPtr->N_time, sizeof(AMatrixCol)); uint8_t AvgNumXElements = (uint8_t)ceil(3*ScannedObjectPtr->delta_xy/SinogramPtr->delta_r); char error_file[100] = "error_sinogram"; sprintf(error_file, "%s_n%d", error_file, TomoInputsPtr->node_rank); for (i = 0; i < ScannedObjectPtr->N_time; i++) { AMatrixPtr[i].values = (Real_t*)get_spc(AvgNumXElements, sizeof(Real_t)); AMatrixPtr[i].index = (int32_t*)get_spc(AvgNumXElements, sizeof(int32_t)); } memset(&(ErrorSino[0][0][0]), 0, SinogramPtr->N_p*SinogramPtr->N_t*SinogramPtr->N_r*sizeof(Real_arr_t)); #pragma omp parallel for private(j, k, p, sino_idx, slice, pixel) for (i=0; i<ScannedObjectPtr->N_time; i++) { for (j=0; j<ScannedObjectPtr->N_y; j++) { for (k=0; k<ScannedObjectPtr->N_x; k++){ for (p=0; p<ScannedObjectPtr->ProjNum[i]; p++){ sino_idx = ScannedObjectPtr->ProjIdxPtr[i][p]; calcAMatrixColumnforAngle(SinogramPtr, ScannedObjectPtr, DetectorResponse, &(AMatrixPtr[i]), j, k, sino_idx); for (slice=0; slice<ScannedObjectPtr->N_z; slice++){ /* printf("count = %d, idx = %d, val = %f\n", VoxelLineResponse[slice].count, VoxelLineResponse[slice].index[0], VoxelLineResponse[slice].values[0]);*/ pixel = ScannedObjectPtr->Object[i][slice+1][j][k]; /*slice+1 to account for extra z slices required for MPI*/ forward_project_voxel (SinogramPtr, pixel, ErrorSino, &(AMatrixPtr[i])/*, &(VoxelLineResponse[slice])*/, sino_idx, slice); } } } } } #pragma omp parallel for private(j, k) reduction(+:avg) for(i=0; i < SinogramPtr->N_p; i++) for (j = 0; j < SinogramPtr->N_r; j++) for (k = 0; k < SinogramPtr->N_t; k++) { ErrorSino[i][j][k] = SinogramPtr->Projection[i][j][k] - ErrorSino[i][j][k] - SinogramPtr->ProjOffset[j][k]; if (fabs(ErrorSino[i][j][k]*sqrt(TomoInputsPtr->Weight[i][j][k])) < TomoInputsPtr->ErrorSinoThresh) SinogramPtr->ProjSelect[i][j][k] = true; else SinogramPtr->ProjSelect[i][j][k] = false; /* if (ErrorSino[i][j][k]*sqrt(TomoInputsPtr->Weight[i][j][k]) < -30) TomoInputsPtr->Weight[i][j][k] = 0;*/ avg+=ErrorSino[i][j][k]; } avg = avg/(SinogramPtr->N_r*SinogramPtr->N_t*SinogramPtr->N_p); check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Average of error sinogram in node %d is %f\n", TomoInputsPtr->node_rank, avg); dimTiff[0] = 1; dimTiff[1] = SinogramPtr->N_p; dimTiff[2] = SinogramPtr->N_r; dimTiff[3] = SinogramPtr->N_t; if (TomoInputsPtr->Write2Tiff == 1) flag = WriteMultiDimArray2Tiff (error_file, dimTiff, 0, 3, 1, 2, &(ErrorSino[0][0][0]), 0, TomoInputsPtr->debug_file_ptr); for (i = 0; i < ScannedObjectPtr->N_time; i++) { free(AMatrixPtr[i].values); free(AMatrixPtr[i].index); } free (AMatrixPtr); multifree(SinogramPtr->Projection,3); return (flag); } /*Updates the variance parameter \sigma*/ void update_variance_parameter (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino) { int32_t k, i, j; Real_t temp_acc = 0, temp = 0; #pragma omp parallel for private(i, j, temp) reduction(+:temp_acc) for (k = 0; k < SinogramPtr->N_p; k++) for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { TomoInputsPtr->Weight[k][i][j] = TomoInputsPtr->Weight[k][i][j]*TomoInputsPtr->var_est; if (SinogramPtr->ProjSelect[k][i][j] == true) temp = ErrorSino[k][i][j]*ErrorSino[k][i][j]*TomoInputsPtr->Weight[k][i][j]; else temp = fabs(ErrorSino[k][i][j])*TomoInputsPtr->ErrorSinoDelta*TomoInputsPtr->ErrorSinoThresh*sqrt(TomoInputsPtr->Weight[k][i][j]*TomoInputsPtr->var_est); temp_acc += temp; } MPI_Allreduce(&temp_acc, &temp, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); TomoInputsPtr->var_est = temp/((Real_t)TomoInputsPtr->node_num*(Real_t)SinogramPtr->N_p*(Real_t)SinogramPtr->N_r*(Real_t)SinogramPtr->N_t); #pragma omp parallel for private(i, j) for (k = 0; k < SinogramPtr->N_p; k++) for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { TomoInputsPtr->Weight[k][i][j] /= TomoInputsPtr->var_est; if (fabs(ErrorSino[k][i][j]*sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh) SinogramPtr->ProjSelect[k][i][j] = true; else SinogramPtr->ProjSelect[k][i][j] = false; } } void update_d_offset_rect_patch_constraint (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino) { Real_t sign, **b, **Lambda, temp; Real_arr_t **x; int32_t i, j, k; b = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); Lambda = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); x = (Real_arr_t**)multialloc(sizeof(Real_arr_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); memset(&(b[0][0]), 0, SinogramPtr->N_r*SinogramPtr->N_t*sizeof(Real_t)); memset(&(Lambda[0][0]), 0, SinogramPtr->N_r*SinogramPtr->N_t*sizeof(Real_t)); memset(&(x[0][0]), 0, SinogramPtr->N_r*SinogramPtr->N_t*sizeof(Real_arr_t)); #pragma omp parallel for collapse(2) private(i, j, k, temp, sign) for (i = 0; i < SinogramPtr->N_r; i++) { for (j = 0; j < SinogramPtr->N_t; j++) { b[i][j] = 0; Lambda[i][j] = 0; for (k = 0; k < SinogramPtr->N_p; k++) { temp = TomoInputsPtr->ErrorSinoThresh*TomoInputsPtr->ErrorSinoDelta*sqrt(TomoInputsPtr->Weight[k][i][j]); if (SinogramPtr->ProjSelect[k][i][j] == true) { Lambda[i][j] += TomoInputsPtr->Weight[k][i][j]; b[i][j] += (ErrorSino[k][i][j] + SinogramPtr->ProjOffset[i][j])*TomoInputsPtr->Weight[k][i][j]; } else { sign = (ErrorSino[k][i][j] > 0) - (ErrorSino[k][i][j] < 0); Lambda[i][j] += temp/fabs(ErrorSino[k][i][j]); b[i][j] += (ErrorSino[k][i][j] + SinogramPtr->ProjOffset[i][j])*temp/fabs(ErrorSino[k][i][j]); } } } } constrained_quad_opt (Lambda, b, SinogramPtr->off_constraint, x, SinogramPtr->N_r, SinogramPtr->N_t, SinogramPtr->off_constraint_num, TomoInputsPtr); #pragma omp parallel for collapse(3) private(i, j, k) for (k = 0; k < SinogramPtr->N_p; k++) { for (i = 0; i < SinogramPtr->N_r; i++) { for (j = 0; j < SinogramPtr->N_t; j++) { ErrorSino[k][i][j] += SinogramPtr->ProjOffset[i][j] - x[i][j]; if (fabs(ErrorSino[k][i][j]*sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh) SinogramPtr->ProjSelect[k][i][j] = true; else SinogramPtr->ProjSelect[k][i][j] = false; } } } memcpy(&(SinogramPtr->ProjOffset[0][0]),&(x[0][0]),SinogramPtr->N_r*SinogramPtr->N_t*sizeof(Real_arr_t)); multifree(b,2); multifree(Lambda,2); multifree(x,2); } /*Updates the projection offset error parameter d_i*/ void update_d_offset_zero_mean_constraint (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino) { Real_t sign, **numerator, num_sum = 0, temp, **denominator, den_sum = 0, gamma = 0; int32_t i, j, k; numerator = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); denominator = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); #pragma omp parallel for private(j, k, temp, sign) reduction(+:num_sum, den_sum) for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { numerator[i][j] = 0; denominator[i][j] = 0; for (k = 0; k < SinogramPtr->N_p; k++) { temp = TomoInputsPtr->ErrorSinoThresh*TomoInputsPtr->ErrorSinoDelta*sqrt(TomoInputsPtr->Weight[k][i][j]); if (SinogramPtr->ProjSelect[k][i][j] == true) { numerator[i][j] += ErrorSino[k][i][j]*TomoInputsPtr->Weight[k][i][j]; denominator[i][j] += TomoInputsPtr->Weight[k][i][j]; } else { sign = (ErrorSino[k][i][j] > 0) - (ErrorSino[k][i][j] < 0); numerator[i][j] += temp*sign; denominator[i][j] += temp/fabs(ErrorSino[k][i][j]); } } num_sum += SinogramPtr->ProjOffset[i][j] + (numerator[i][j]/denominator[i][j]); den_sum += 1.0/denominator[i][j]; } gamma = num_sum/den_sum; #pragma omp parallel for private(j, k) for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { SinogramPtr->ProjOffset[i][j] = SinogramPtr->ProjOffset[i][j] + (numerator[i][j]-gamma)/denominator[i][j]; for (k = 0; k < SinogramPtr->N_p; k++) { ErrorSino[k][i][j] -= (numerator[i][j]-gamma)/denominator[i][j]; if (fabs(ErrorSino[k][i][j]*sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh) SinogramPtr->ProjSelect[k][i][j] = true; else SinogramPtr->ProjSelect[k][i][j] = false; } } multifree(numerator,2); multifree(denominator,2); } /*Updates the projection offset error parameter d_i*/ void update_d_offset_unconstrained (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino) { Real_t sign, **numerator, temp, **denominator; int32_t i, j, k; numerator = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); denominator = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); #pragma omp parallel for private(j, k, temp, sign) for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { numerator[i][j] = 0; denominator[i][j] = 0; for (k = 0; k < SinogramPtr->N_p; k++) { temp = TomoInputsPtr->ErrorSinoThresh*TomoInputsPtr->ErrorSinoDelta*sqrt(TomoInputsPtr->Weight[k][i][j]); if (SinogramPtr->ProjSelect[k][i][j] == true) { numerator[i][j] += ErrorSino[k][i][j]*TomoInputsPtr->Weight[k][i][j]; denominator[i][j] += TomoInputsPtr->Weight[k][i][j]; } else { sign = (ErrorSino[k][i][j] > 0) - (ErrorSino[k][i][j] < 0); numerator[i][j] += temp*sign; denominator[i][j] += temp/fabs(ErrorSino[k][i][j]); } } } #pragma omp parallel for private(j, k) for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { SinogramPtr->ProjOffset[i][j] = SinogramPtr->ProjOffset[i][j] + (numerator[i][j])/denominator[i][j]; for (k = 0; k < SinogramPtr->N_p; k++) { ErrorSino[k][i][j] -= (numerator[i][j])/denominator[i][j]; if (fabs(ErrorSino[k][i][j]*sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh) SinogramPtr->ProjSelect[k][i][j] = true; else SinogramPtr->ProjSelect[k][i][j] = false; } } multifree(numerator,2); multifree(denominator,2); } void update_Sinogram_Offset (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr, Real_arr_t*** ErrorSino) { if (TomoInputsPtr->OffsetConstraintType == 1) update_d_offset_unconstrained (SinogramPtr, TomoInputsPtr, ErrorSino); else if (TomoInputsPtr->OffsetConstraintType == 2) update_d_offset_zero_mean_constraint (SinogramPtr, TomoInputsPtr, ErrorSino); else if (TomoInputsPtr->OffsetConstraintType == 3) update_d_offset_rect_patch_constraint (SinogramPtr, TomoInputsPtr, ErrorSino); } /*Implements mutithreaded shared memory parallelization using OpenMP and splits work among threads. Each thread gets a certain time slice and z block to update. Multithreading is done within the z-blocks assigned to each node. ErrorSino - Error sinogram Iter - Present iteration number MagUpdateMap - Magnitude update map containing the magnitude of update of each voxel Mask - If a certain element is true then the corresponding voxel is updated*/ int updateVoxelsTimeSlices(Sinogram* SinogramPtr, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr, Real_arr_t** DetectorResponse, /*AMatrixCol* VoxelLineResponse,*/ Real_arr_t*** ErrorSino, int32_t Iter, Real_arr_t**** MagUpdateMap, uint8_t*** Mask) { Real_t AverageUpdate = 0, tempUpdate, avg_update_percentage, total_vox_mag = 0.0, vox_mag = 0.0; int32_t xy_start, xy_end, i, j, K, block, idx, **z_start, **z_stop; Real_t tempTotPix = 0, total_pix = 0; long int **zero_count, total_zero_count = 0; int32_t** thread_num = (int32_t**)multialloc(sizeof(int32_t), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks); MPI_Request *send_reqs, *recv_reqs; send_reqs = (MPI_Request*)get_spc(ScannedObjectPtr->N_time, sizeof(MPI_Request)); recv_reqs = (MPI_Request*)get_spc(ScannedObjectPtr->N_time, sizeof(MPI_Request)); z_start = (int32_t**)multialloc(sizeof(int32_t), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks); z_stop = (int32_t**)multialloc(sizeof(int32_t), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks); randomly_select_x_y (ScannedObjectPtr, TomoInputsPtr, Mask); zero_count = (long int**)multialloc(sizeof(long int), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks); /* offset_numerator = (Real_t**)multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); memset(&(offset_denominator[0][0]), 0, SinogramPtr->N_r*SinogramPtr->N_t*sizeof(Real_t)); for (k = 0; k < SinogramPtr->N_p; k++) for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) offset_denominator[i][j] += TomoInputsPtr->Weight[k][i][j]; */ memset(&(zero_count[0][0]), 0, ScannedObjectPtr->N_time*TomoInputsPtr->num_z_blocks*sizeof(long int)); /* K = ScannedObjectPtr->N_time*ScannedObjectPtr->N_z*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x; K = (K - total_zero_count)/(ScannedObjectPtr->gamma*K);*/ K = ScannedObjectPtr->NHICD_Iterations; check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Number of NHICD iterations is %d.\n", K); for (j = 0; j < K; j++) { total_vox_mag = 0.0; #pragma omp parallel for collapse(2) private(i, block, idx, xy_start, xy_end) reduction(+:total_vox_mag) for (i = 0; i < ScannedObjectPtr->N_time; i++) for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2) { idx = (i % 2 == 0) ? block: block + 1; z_start[i][idx] = idx*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks); z_stop[i][idx] = (idx + 1)*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks) - 1; z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1: z_stop[i][idx]; xy_start = j*floor(TomoInputsPtr->UpdateSelectNum[i][idx]/K); xy_end = (j + 1)*floor(TomoInputsPtr->UpdateSelectNum[i][idx]/K) - 1; xy_end = (j == K - 1) ? TomoInputsPtr->UpdateSelectNum[i][idx] - 1: xy_end; /* printf ("Loop 1 Start - j = %d, i = %d, idx = %d, z_start = %d, z_stop = %d, xy_start = %d, xy_end = %d\n", j, i, idx, z_start[i][idx], z_stop[i][idx], xy_start, xy_end);*/ total_vox_mag += updateVoxels (i, i, z_start[i][idx], z_stop[i][idx], xy_start, xy_end, TomoInputsPtr->x_rand_select[i][idx], TomoInputsPtr->y_rand_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /*VoxelLineResponse,*/ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]); thread_num[i][idx] = omp_get_thread_num(); } /*check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Send MPI info\n");*/ MPI_Send_Recv_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0); /* check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "update_Sinogram_Offset: Will compute projection offset error\n");*/ if (TomoInputsPtr->updateProjOffset > 1) update_Sinogram_Offset (SinogramPtr, TomoInputsPtr, ErrorSino); /* check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "update_Sinogram_Offset: Done computing projection offset error\n");*/ MPI_Wait_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0); #pragma omp parallel for collapse(2) private(i, block, idx, xy_start, xy_end) reduction(+:total_vox_mag) for (i = 0; i < ScannedObjectPtr->N_time; i++) for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2) { idx = (i % 2 == 0) ? block + 1: block; z_start[i][idx] = idx*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks); z_stop[i][idx] = (idx + 1)*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks) - 1; z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1: z_stop[i][idx]; xy_start = j*floor(TomoInputsPtr->UpdateSelectNum[i][idx]/K); xy_end = (j + 1)*floor(TomoInputsPtr->UpdateSelectNum[i][idx]/K) - 1; xy_end = (j == K - 1) ? TomoInputsPtr->UpdateSelectNum[i][idx] - 1: xy_end; total_vox_mag += updateVoxels (i, i, z_start[i][idx], z_stop[i][idx], xy_start, xy_end, TomoInputsPtr->x_rand_select[i][idx], TomoInputsPtr->y_rand_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /*VoxelLineResponse,*/ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]); thread_num[i][idx] = omp_get_thread_num(); /* printf ("Loop 2 - i = %d, idx = %d, z_start = %d, z_stop = %d, xy_start = %d, xy_end = %d\n", i, idx, z_start[i][idx], z_stop[i][idx], xy_start, xy_end);*/ } MPI_Send_Recv_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1); if (TomoInputsPtr->updateProjOffset > 1) update_Sinogram_Offset (SinogramPtr, TomoInputsPtr, ErrorSino); MPI_Wait_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1); VSC_based_Voxel_Line_Select(ScannedObjectPtr, TomoInputsPtr, MagUpdateMap); /* check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Number of NHICD voxel lines to be updated in iteration %d is %d\n", j, num_voxel_lines);*/ if (Iter > 1 && TomoInputsPtr->no_NHICD == 0) { #pragma omp parallel for collapse(2) private(i, block, idx) for (i = 0; i < ScannedObjectPtr->N_time; i++) for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2) { idx = (i % 2 == 0) ? block: block + 1; z_start[i][idx] = idx*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks); z_stop[i][idx] = (idx + 1)*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks) - 1; z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1: z_stop[i][idx]; updateVoxels (i, i, z_start[i][idx], z_stop[i][idx], 0, TomoInputsPtr->NHICDSelectNum[i][idx]-1, TomoInputsPtr->x_NHICD_select[i][idx], TomoInputsPtr->y_NHICD_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /*VoxelLineResponse,*/ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]); thread_num[i][idx] = omp_get_thread_num(); /* printf ("Loop 1 NHICD - i = %d, idx = %d, z_start = %d, z_stop = %d\n", i, idx, z_start[i][idx], z_stop[i][idx]);*/ } MPI_Send_Recv_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0); if (TomoInputsPtr->updateProjOffset > 1) update_Sinogram_Offset (SinogramPtr, TomoInputsPtr, ErrorSino); MPI_Wait_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0); #pragma omp parallel for collapse(2) private(i, block, idx) for (i = 0; i < ScannedObjectPtr->N_time; i++) for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2) { idx = (i % 2 == 0) ? block + 1: block; z_start[i][idx] = idx*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks); z_stop[i][idx] = (idx + 1)*floor(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks) - 1; z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1: z_stop[i][idx]; updateVoxels (i, i, z_start[i][idx], z_stop[i][idx], 0, TomoInputsPtr->NHICDSelectNum[i][idx]-1, TomoInputsPtr->x_NHICD_select[i][idx], TomoInputsPtr->y_NHICD_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /*VoxelLineResponse,*/ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]); thread_num[i][idx] = omp_get_thread_num(); /* printf ("Loop 2 NHICD - i = %d, idx = %d, z_start = %d, z_stop = %d\n", i, idx, z_start[i][idx], z_stop[i][idx]);*/ } MPI_Send_Recv_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1); if (TomoInputsPtr->updateProjOffset > 1) update_Sinogram_Offset (SinogramPtr, TomoInputsPtr, ErrorSino); MPI_Wait_Z_Slices (ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1); } } if (TomoInputsPtr->updateVar == 1) update_variance_parameter (SinogramPtr, TomoInputsPtr, ErrorSino); check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Time Slice, Z Start, Z End - Thread : "); total_pix = 0; for (i=0; i<ScannedObjectPtr->N_time; i++){ for (block=0; block<TomoInputsPtr->num_z_blocks; block++){ total_pix += TomoInputsPtr->UpdateSelectNum[i][block]*(ScannedObjectPtr->N_z/TomoInputsPtr->num_z_blocks); for (j=0; j<TomoInputsPtr->UpdateSelectNum[i][block]; j++){ AverageUpdate += MagUpdateMap[i][block][TomoInputsPtr->y_rand_select[i][block][j]][TomoInputsPtr->x_rand_select[i][block][j]]; } total_zero_count += zero_count[i][block]; check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "%d,%d,%d-%d; ", i, z_start[i][block], z_stop[i][block], thread_num[i][block]); } } check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "\n"); MPI_Allreduce(&AverageUpdate, &tempUpdate, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&total_pix, &tempTotPix, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&total_vox_mag, &vox_mag, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); AverageUpdate = tempUpdate/(tempTotPix); AverageUpdate = convert2Hounsfield(AverageUpdate); check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Average voxel update over all voxels is %f, total voxels is %f.\n", AverageUpdate, tempTotPix); check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Zero count is %ld.\n", total_zero_count); check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Variance parameter divisor is %f.\n", (Real_t)TomoInputsPtr->node_num*(Real_t)SinogramPtr->N_p*(Real_t)SinogramPtr->N_r*(Real_t)SinogramPtr->N_t); multifree(zero_count,2); multifree(thread_num,2); multifree(z_start,2); multifree(z_stop,2); free(send_reqs); free(recv_reqs); /* multifree(offset_numerator,2); multifree(offset_denominator,2);*/ avg_update_percentage = 100*tempUpdate/vox_mag; check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Percentage average magnitude of voxel updates is %f.\n", avg_update_percentage); if (avg_update_percentage < TomoInputsPtr->StopThreshold) { check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Percentage average magnitude of voxel updates is less than convergence threshold.\n"); return (1); } return(0); } /*ICD_BackProject calls the ICD optimization function repeatedly till the stopping criteria is met.*/ int ICD_BackProject(Sinogram* SinogramPtr, ScannedObject* ScannedObjectPtr, TomoInputs* TomoInputsPtr) { #ifndef NO_COST_CALCULATE Real_t cost, cost_0_iter, cost_last_iter, percentage_change_in_cost = 0; char costfile[100]=COST_FILENAME; #endif Real_arr_t ***ErrorSino, **H_r, *H_t; Real_t x, y; int32_t j, flag = 0, Iter, i, k; int dimTiff[4]; char VarEstFile[100] = VAR_PARAM_FILENAME; char scaled_error_file[100] = SCALED_ERROR_SINO_FILENAME; time_t start; char detect_file[100] = DETECTOR_RESPONSE_FILENAME; char projselect_file[100] = PROJ_SELECT_FILENAME; char MagUpdateMapFile[100] = MAG_UPDATE_FILENAME; uint8_t ***Mask; /*AMatrixCol *VoxelLineResponse;*/ #ifdef POSITIVITY_CONSTRAINT check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Enforcing positivity constraint\n"); #endif Real_arr_t**** MagUpdateMap = (Real_arr_t****)multialloc(sizeof(Real_arr_t), 4, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks, ScannedObjectPtr->N_y, ScannedObjectPtr->N_x); H_r = (Real_arr_t **)multialloc(sizeof(Real_arr_t), 2, SinogramPtr->N_p, DETECTOR_RESPONSE_BINS + 1); H_t = (Real_arr_t *)get_spc(DETECTOR_RESPONSE_BINS + 1, sizeof(Real_arr_t)); ErrorSino = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, SinogramPtr->N_p, SinogramPtr->N_r, SinogramPtr->N_t); Mask = (uint8_t***)multialloc(sizeof(uint8_t), 3, ScannedObjectPtr->N_time, ScannedObjectPtr->N_y, ScannedObjectPtr->N_x); memset(&(MagUpdateMap[0][0][0][0]), 0, ScannedObjectPtr->N_time*TomoInputsPtr->num_z_blocks*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x*sizeof(Real_arr_t)); /* omp_set_num_threads(TomoInputsPtr->num_threads);*/ check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Number of CPU cores is %d\n", (int)omp_get_num_procs()); /* check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "ICD_BackProject: Number of threads is %d\n", TomoInputsPtr->num_threads) ;*/ for (i = 0; i < ScannedObjectPtr->N_time; i++) for (j = 0; j < ScannedObjectPtr->N_y; j++) for (k = 0; k < ScannedObjectPtr->N_x; k++){ x = ScannedObjectPtr->x0 + ((Real_t)k + 0.5)*ScannedObjectPtr->delta_xy; y = ScannedObjectPtr->y0 + ((Real_t)j + 0.5)*ScannedObjectPtr->delta_xy; if (x*x + y*y < TomoInputsPtr->radius_obj*TomoInputsPtr->radius_obj) Mask[i][j][k] = 1; else Mask[i][j][k] = 0; } DetectorResponseProfile (H_r, H_t, SinogramPtr, ScannedObjectPtr, TomoInputsPtr); dimTiff[0] = 1; dimTiff[1] = 1; dimTiff[2] = SinogramPtr->N_p; dimTiff[3] = DETECTOR_RESPONSE_BINS+1; sprintf(detect_file, "%s_n%d", detect_file, TomoInputsPtr->node_rank); if (TomoInputsPtr->Write2Tiff == 1) if (WriteMultiDimArray2Tiff (detect_file, dimTiff, 0, 1, 2, 3, &(H_r[0][0]), 0, TomoInputsPtr->debug_file_ptr)) goto error; start = time(NULL); if (initObject(SinogramPtr, ScannedObjectPtr, TomoInputsPtr, MagUpdateMap)) goto error; check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Time taken to read object = %fmins\n", difftime(time(NULL),start)/60.0); if (initErrorSinogam(SinogramPtr, ScannedObjectPtr, TomoInputsPtr, H_r, ErrorSino/*, VoxelLineResponse*/)) goto error; check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Time taken to initialize object and compute error sinogram = %fmins\n", difftime(time(NULL),start)/60.0); #ifndef NO_COST_CALCULATE cost = computeCost(SinogramPtr,ScannedObjectPtr,TomoInputsPtr,ErrorSino); cost_0_iter = cost; cost_last_iter = cost; check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "------------- Iteration 0, Cost = %f------------\n",cost); if (TomoInputsPtr->node_rank == 0) Write2Bin (costfile, 1, 1, 1, 1, sizeof(Real_t), &cost, TomoInputsPtr->debug_file_ptr); #endif /*Cost calculation endif*/ start=time(NULL); for (Iter = 1; Iter <= TomoInputsPtr->NumIter; Iter++) { flag = updateVoxelsTimeSlices (SinogramPtr, ScannedObjectPtr, TomoInputsPtr, H_r, /*VoxelLineResponse,*/ ErrorSino, Iter, MagUpdateMap, Mask); if (TomoInputsPtr->WritePerIter == 1) if (write_ObjectProjOff2TiffBinPerIter (SinogramPtr, ScannedObjectPtr, TomoInputsPtr)) goto error; #ifndef NO_COST_CALCULATE cost = computeCost(SinogramPtr,ScannedObjectPtr,TomoInputsPtr,ErrorSino); percentage_change_in_cost = ((cost - cost_last_iter)/(cost - cost_0_iter))*100.0; check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Percentage change in cost is %f.\n", percentage_change_in_cost); check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Variance parameter estimate = %f.\n", TomoInputsPtr->var_est); check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "------------- Iteration = %d, Cost = %f, Time since start of ICD = %fmins ------------\n",Iter,cost,difftime(time(NULL),start)/60.0); if (TomoInputsPtr->node_rank == 0) Append2Bin (costfile, 1, 1, 1, 1, sizeof(Real_t), &cost, TomoInputsPtr->debug_file_ptr); check_error(cost > cost_last_iter, TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Cost value increased.\n"); cost_last_iter = cost; /*if (percentage_change_in_cost < TomoInputsPtr->cost_thresh && flag != 0 && Iter > 1){*/ if (flag != 0 && Iter > 1){ check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Convergence criteria is met.\n"); break; } #else check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Variance parameter estimate = %f\n",TomoInputsPtr->var_est); check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "-------------ICD_BackProject: ICD Iter = %d, time since start of ICD = %fmins------------.\n",Iter,difftime(time(NULL),start)/60.0); if (flag != 0 && Iter > 1){ check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Convergence criteria is met.\n"); break; } #endif flag = fflush(TomoInputsPtr->debug_file_ptr); if (flag != 0) check_warn(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Cannot flush buffer.\n"); } for (i = 0; i < SinogramPtr->N_p; i++) for (j = 0; j < SinogramPtr->N_r; j++) for (k = 0; k < SinogramPtr->N_t; k++) ErrorSino[i][j][k] *= sqrt(TomoInputsPtr->Weight[i][j][k]); if (TomoInputsPtr->node_rank == 0) Write2Bin (VarEstFile, 1, 1, 1, 1, sizeof(Real_t), &(TomoInputsPtr->var_est), TomoInputsPtr->debug_file_ptr); int32_t size = ScannedObjectPtr->N_time*TomoInputsPtr->num_z_blocks*ScannedObjectPtr->N_y*ScannedObjectPtr->N_x; if (write_SharedBinFile_At (MagUpdateMapFile, &(MagUpdateMap[0][0][0][0]), TomoInputsPtr->node_rank*size, size, TomoInputsPtr->debug_file_ptr)) goto error; sprintf(scaled_error_file, "%s_n%d", scaled_error_file, TomoInputsPtr->node_rank); sprintf(projselect_file, "%s_n%d", projselect_file, TomoInputsPtr->node_rank); dimTiff[0] = 1; dimTiff[1] = SinogramPtr->N_p; dimTiff[2] = SinogramPtr->N_r; dimTiff[3] = SinogramPtr->N_t; if (TomoInputsPtr->Write2Tiff == 1) { if (WriteMultiDimArray2Tiff (scaled_error_file, dimTiff, 0, 3, 1, 2, &(ErrorSino[0][0][0]), 0, TomoInputsPtr->debug_file_ptr)) goto error; if (WriteBoolArray2Tiff (projselect_file, dimTiff, 0, 3, 1, 2, &(SinogramPtr->ProjSelect[0][0][0]), 0, TomoInputsPtr->debug_file_ptr)) goto error; } multifree(ErrorSino,3); multifree(H_r,2); free(H_t); multifree(Mask,3); multifree(MagUpdateMap, 4); check_debug(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Finished running ICD_BackProject.\n"); flag = fflush(TomoInputsPtr->debug_file_ptr); if (flag != 0 ) check_warn(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "Cannot flush buffer.\n"); check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, "The estimated value of variance parameter is %f.\n", TomoInputsPtr->var_est); return(0); error: multifree(ErrorSino,3); multifree(H_r,2); free(H_t); multifree(Mask,3); multifree(MagUpdateMap, 4); return(-1); }
#include "XT_Constants.h" #include <stdio.h> #include <math.h> #include <stdlib.h> #include "allocate.h" #include "randlib.h" #include <time.h> #include "XT_AMatrix.h" #include "XT_Profile.h" #include "XT_Structures.h" #include "XT_IOMisc.h" #include "XT_NHICD.h" #include "omp.h" #include "XT_MPI.h" #include <mpi.h> #include "XT_VoxUpdate.h" #include "XT_ForwardProject.h" #include "XT_MPIIO.h" #include "XT_Debug.h" #include "XT_OffsetError.h" /* computes the location of (i,j,k) th element in a 1D array */ int32_t array_loc_1D(int32_t i, int32_t j, int32_t k, int32_t N_j, int32_t N_k) { return (i * N_j * N_k + j * N_k + k); } /* * finds the maximum in a array 'array_in' with number of elements being * 'num' */ int32_t find_max(int32_t * array_in, int32_t num) { int32_t i, maxnum; maxnum = array_in[0]; for (i = 1; i < num; i++) if (array_in[i] > maxnum) maxnum = array_in[i]; return (maxnum); } /* converts the value 'val' to hounsfield units and returns it */ Real_t convert2Hounsfield(Real_t val) { Real_t slope, c; slope = (HOUNSFIELD_WATER_MAP - HOUNSFIELD_AIR_MAP) / (WATER_MASS_ATT_COEFF * WATER_DENSITY - AIR_MASS_ATT_COEFF * AIR_DENSITY) / HFIELD_UNIT_CONV_CONST; c = -slope * (AIR_MASS_ATT_COEFF * AIR_DENSITY * HFIELD_UNIT_CONV_CONST); return (slope * val + c); } /* * Computes the qGGMRF spatial prior cost value at delta = x_i - x_j. i & j * being the voxel and its neighbor */ Real_t CE_QGGMRF_Spatial_Value(Real_t delta, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr) { return ((pow(fabs(delta), MRF_Q) / TomoInputsPtr->Sigma_S_Q) / (ScannedObjectPtr->C_S + pow(fabs(delta), MRF_Q - MRF_P) / TomoInputsPtr->Sigma_S_Q_P)); } /* * Computes the qGGMRF temporal prior cost value at delta = x_i - x_j. i & j * being the voxel and its neighbor */ Real_t CE_QGGMRF_Temporal_Value(Real_t delta, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr) { return ((pow(fabs(delta), MRF_Q) / TomoInputsPtr->Sigma_T_Q) / (ScannedObjectPtr->C_T + pow(fabs(delta), MRF_Q - MRF_P) / TomoInputsPtr->Sigma_T_Q_P)); } /* * Computes the qGGMRF spatial prior derivative at delta = x_i - x_j. i & j * being the voxel and its neighbor */ Real_t CE_QGGMRF_Spatial_Derivative(Real_t delta, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr) { Real_t temp1, temp2, temp3; temp1 = pow(fabs(delta), MRF_Q - MRF_P) / (TomoInputsPtr->Sigma_S_Q_P); temp2 = pow(fabs(delta), MRF_Q - 1); temp3 = ScannedObjectPtr->C_S + temp1; if (delta < 0) return ((-1 * temp2 / (temp3 * TomoInputsPtr->Sigma_S_Q)) * (MRF_Q - ((MRF_Q - MRF_P) * temp1) / (temp3))); else { return ((temp2 / (temp3 * TomoInputsPtr->Sigma_S_Q)) * (MRF_Q - ((MRF_Q - MRF_P) * temp1) / (temp3))); } } /* * Computes the qGGMRF temporal prior derivative at delta = x_i - x_j. i & j * being the voxel and its neighbor */ Real_t CE_QGGMRF_Temporal_Derivative(Real_t delta, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr) { Real_t temp1, temp2, temp3; temp1 = pow(fabs(delta), MRF_Q - MRF_P) / (TomoInputsPtr->Sigma_T_Q_P); temp2 = pow(fabs(delta), MRF_Q - 1); temp3 = ScannedObjectPtr->C_T + temp1; if (delta < 0) return ((-1 * temp2 / (temp3 * TomoInputsPtr->Sigma_T_Q)) * (MRF_Q - ((MRF_Q - MRF_P) * temp1) / (temp3))); else { return ((temp2 / (temp3 * TomoInputsPtr->Sigma_T_Q)) * (MRF_Q - ((MRF_Q - MRF_P) * temp1) / (temp3))); } } /* Computes the qGGMRF spatial prior second derivative at delta = 0 */ Real_t CE_QGGMRF_Spatial_SecondDerivative(ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr) { return MRF_Q / (TomoInputsPtr->Sigma_S_Q * ScannedObjectPtr->C_S); } /* Computes the qGGMRF spatial prior second derivative at delta = 0 */ Real_t CE_QGGMRF_Temporal_SecondDerivative(ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr) { return MRF_Q / (TomoInputsPtr->Sigma_T_Q * ScannedObjectPtr->C_T); } /* * Computes the voxel update and returns it. V is the present value of voxel. * THETA1 and THETA2 are the values used in voxel update. Spatial_Nhood and * Time_Nhood gives the values of voxels in the neighborhood of V. * Time_BDFlag and Spatial_BDFlag are masks which determine whether a * neighbor should be included in the neighorhood or not. */ Real_t CE_FunctionalSubstitution(Real_t V, Real_t THETA1, Real_t THETA2, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr, Real_t Spatial_Nhood[NHOOD_Y_MAXDIM][NHOOD_X_MAXDIM][NHOOD_Z_MAXDIM], Real_t Time_Nhood[NHOOD_TIME_MAXDIM - 1], bool Spatial_BDFlag[NHOOD_Y_MAXDIM][NHOOD_X_MAXDIM][NHOOD_Z_MAXDIM], bool Time_BDFlag[NHOOD_TIME_MAXDIM - 1]) { Real_t u, temp1 = 0, temp2 = 0, temp_const, RefValue = 0, Delta0; Real_t QGGMRF_Params; int32_t i, j, k; RefValue = V; /* Need to Loop this for multiple iterations of substitute function */ for (i = 0; i < NHOOD_Y_MAXDIM; i++) for (j = 0; j < NHOOD_X_MAXDIM; j++) for (k = 0; k < NHOOD_Z_MAXDIM; k++) { if (Spatial_BDFlag[i][j][k] == true && (i != (NHOOD_Y_MAXDIM - 1) / 2 || j != (NHOOD_X_MAXDIM - 1) / 2 || k != (NHOOD_Z_MAXDIM - 1) / 2)) { Delta0 = (RefValue - Spatial_Nhood[i][j][k]); if (Delta0 != 0) QGGMRF_Params = CE_QGGMRF_Spatial_Derivative(Delta0, ScannedObjectPtr, TomoInputsPtr) / (Delta0); else { QGGMRF_Params = CE_QGGMRF_Spatial_SecondDerivative(ScannedObjectPtr, TomoInputsPtr); } temp_const = TomoInputsPtr->Spatial_Filter[i][j][k] * QGGMRF_Params; temp1 += temp_const * Spatial_Nhood[i][j][k]; temp2 += temp_const; } } for (i = 0; i < NHOOD_TIME_MAXDIM - 1; i++) { if (Time_BDFlag[i] == true) { Delta0 = (RefValue - Time_Nhood[i]); if (Delta0 != 0) QGGMRF_Params = CE_QGGMRF_Temporal_Derivative(Delta0, ScannedObjectPtr, TomoInputsPtr) / (Delta0); else { QGGMRF_Params = CE_QGGMRF_Temporal_SecondDerivative(ScannedObjectPtr, TomoInputsPtr); } temp_const = TomoInputsPtr->Time_Filter[0] * QGGMRF_Params; temp1 += temp_const * Time_Nhood[i]; temp2 += temp_const; } } u = (temp1 + (THETA2 * V) - THETA1) / (temp2 + THETA2); RefValue = RefValue + TomoInputsPtr->alpha * (u - RefValue); #ifdef POSITIVITY_CONSTRAINT if (RefValue <= 0) RefValue = 0; #endif return RefValue; } /* computes the value of cost function. 'ErrorSino' is the error sinogram */ Real_t computeCost(Sinogram * SinogramPtr, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr, Real_arr_t *** ErrorSino) { Real_t cost = 0, temp = 0, forward = 0, prior = 0; Real_t delta; int32_t i, j, k, p, N_z; bool j_minus, k_minus, i_plus, j_plus, k_plus, p_plus; for (i = 0; i < SinogramPtr->N_p; i++) for (j = 0; j < SinogramPtr->N_r; j++) for (k = 0; k < SinogramPtr->N_t; k++) { temp = ErrorSino[i][j][k] * sqrt(TomoInputsPtr->Weight[i][j][k]); if (SinogramPtr->ProjSelect[i][j][k] == true) temp = temp * temp; else temp = 2.0 * TomoInputsPtr->ErrorSinoDelta * TomoInputsPtr->ErrorSinoThresh * fabs(temp) + TomoInputsPtr->ErrorSinoThresh * TomoInputsPtr->ErrorSinoThresh * (1.0 - 2.0 * TomoInputsPtr->ErrorSinoDelta); cost += temp; } cost /= 2.0; /* * When computing the cost of the prior term it is important to make sure * that you don't include the cost of any pair of neighbors more than * once. In this code, a certain sense of causality is used to compute * the cost. We also assume that the weghting kernel given by 'Filter' is * symmetric. Let i, j and k correspond to the three dimensions. If we go * forward to i+1, then all neighbors at j-1, j, j+1, k+1, k, k-1 are to * be considered. However, if for the same i, if we go forward to j+1, * then all k-1, k, and k+1 should be considered. For same i and j, only * the neighbor at k+1 is considred. */ temp = 0; N_z = ScannedObjectPtr->N_z + 2; if (TomoInputsPtr->node_rank == TomoInputsPtr->node_num - 1) N_z = ScannedObjectPtr->N_z + 1; for (i = 0; i < ScannedObjectPtr->N_time; i++) for (p = 1; p < ScannedObjectPtr->N_z + 1; p++) for (j = 0; j < ScannedObjectPtr->N_y; j++) { for (k = 0; k < ScannedObjectPtr->N_x; k++) { j_minus = (j - 1 >= 0) ? true : false; k_minus = (k - 1 >= 0) ? true : false; p_plus = (p + 1 < N_z) ? true : false; i_plus = (i + 1 < ScannedObjectPtr->N_time) ? true : false; j_plus = (j + 1 < ScannedObjectPtr->N_y) ? true : false; k_plus = (k + 1 < ScannedObjectPtr->N_x) ? true : false; if (k_plus == true) { delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j][k + 1]); temp += TomoInputsPtr->Spatial_Filter[1][1][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } if (j_plus == true) { if (k_minus == true) { delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j + 1][k - 1]); temp += TomoInputsPtr->Spatial_Filter[1][2][0] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j + 1][k]); temp += TomoInputsPtr->Spatial_Filter[1][2][1] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); if (k_plus == true) { delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j + 1][k + 1]); temp += TomoInputsPtr->Spatial_Filter[1][2][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } } if (p_plus == true) { if (j_minus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j - 1][k]; temp += TomoInputsPtr->Spatial_Filter[2][0][1] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j][k]; temp += TomoInputsPtr->Spatial_Filter[2][1][1] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); if (j_plus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j + 1][k]; temp += TomoInputsPtr->Spatial_Filter[2][2][1] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } if (j_minus == true) { if (k_minus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j - 1][k - 1]; temp += TomoInputsPtr->Spatial_Filter[2][0][0] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } if (k_plus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j - 1][k + 1]; temp += TomoInputsPtr->Spatial_Filter[2][0][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } } if (k_minus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j][k - 1]; temp += TomoInputsPtr->Spatial_Filter[2][1][0] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } if (j_plus == true) { if (k_minus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j + 1][k - 1]; temp += TomoInputsPtr->Spatial_Filter[2][2][0] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } if (k_plus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j + 1][k + 1]; temp += TomoInputsPtr->Spatial_Filter[2][2][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } } if (k_plus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j][k + 1]; temp += TomoInputsPtr->Spatial_Filter[2][1][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } } if (i_plus == true) { delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i + 1][p][j][k]); temp += TomoInputsPtr->Time_Filter[0] * CE_QGGMRF_Temporal_Value(delta, ScannedObjectPtr, TomoInputsPtr); } } } /* * Use MPI reduction operation to add the forward and prior costs from * all nodes */ MPI_Reduce(&cost, &forward, 1, MPI_REAL_DATATYPE, MPI_SUM, 0, MPI_COMM_WORLD); MPI_Reduce(&temp, &prior, 1, MPI_REAL_DATATYPE, MPI_SUM, 0, MPI_COMM_WORLD); if (TomoInputsPtr->node_rank == 0) { check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Scaled error sino cost = %f\n", forward); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Decrease in scaled error sino cost = %f\n", TomoInputsPtr->ErrorSino_Cost - forward); TomoInputsPtr->ErrorSino_Cost = forward; forward += (Real_t) TomoInputsPtr->node_num * (Real_t) SinogramPtr->N_p * (Real_t) SinogramPtr->N_r * (Real_t) SinogramPtr->N_t * log(TomoInputsPtr->var_est) / 2; check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Forward cost = %f\n", forward); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Prior cost = %f\n", prior); TomoInputsPtr->Forward_Cost = forward; TomoInputsPtr->Prior_Cost = prior; cost = forward + prior; } /* Broadcase the value of cost to all nodes */ MPI_Bcast(&cost, 1, MPI_REAL_DATATYPE, 0, MPI_COMM_WORLD); return cost; } /* * Upsamples the (N_time x N_z x N_y x N_x) size 'Init' by a factor of 2 * along the x-y plane and stores it in 'Object' */ void upsample_bilinear_2D(Real_arr_t **** Object, Real_arr_t **** Init, int32_t N_time, int32_t N_z, int32_t N_y, int32_t N_x) { int32_t i, j, k, m; Real_arr_t **buffer; for (i = 0; i < N_time; i++) for (m = 0; m < N_z; m++) { buffer = (Real_arr_t **) multialloc(sizeof(Real_arr_t), 2, N_y, 2 * N_x); for (j = 0; j < N_y; j++) { buffer[j][0] = Init[i][m][j][0]; buffer[j][1] = (3.0 * Init[i][m][j][0] + Init[i][m][j][1]) / 4.0; buffer[j][2 * N_x - 1] = Init[i][m][j][N_x - 1]; buffer[j][2 * N_x - 2] = (Init[i][m][j][N_x - 2] + 3.0 * Init[i][m][j][N_x - 1]) / 4.0; for (k = 1; k < N_x - 1; k++) { buffer[j][2 * k] = (Init[i][m][j][k - 1] + 3.0 * Init[i][m][j][k]) / 4.0; buffer[j][2 * k + 1] = (3.0 * Init[i][m][j][k] + Init[i][m][j][k + 1]) / 4.0; } } for (k = 0; k < 2 * N_x; k++) { Object[i][m][0][k] = buffer[0][k]; Object[i][m][1][k] = (3.0 * buffer[0][k] + buffer[1][k]) / 4.0; Object[i][m][2 * N_y - 1][k] = buffer[N_y - 1][k]; Object[i][m][2 * N_y - 2][k] = (buffer[N_y - 2][k] + 3.0 * buffer[N_y - 1][k]) / 4.0; } for (j = 1; j < N_y - 1; j++) { for (k = 0; k < 2 * N_x; k++) { Object[i][m][2 * j][k] = (buffer[j - 1][k] + 3.0 * buffer[j][k]) / 4.0; Object[i][m][2 * j + 1][k] = (3 * buffer[j][k] + buffer[j + 1][k]) / 4.0; } } multifree(buffer, 2); } } /* * Upsamples the (N_z x N_y x N_x) size 'Init' by a factor of 2 along the x-y * plane and stores it in 'Object' */ void upsample_object_bilinear_2D(Real_arr_t *** Object, Real_arr_t *** Init, int32_t N_z, int32_t N_y, int32_t N_x) { int32_t j, k, slice; Real_arr_t **buffer; buffer = (Real_arr_t **) multialloc(sizeof(Real_arr_t), 2, N_y, 2 * N_x); for (slice = 0; slice < N_z; slice++) { for (j = 0; j < N_y; j++) { buffer[j][0] = Init[slice][j][0]; buffer[j][1] = (3.0 * Init[slice][j][0] + Init[slice][j][1]) / 4.0; buffer[j][2 * N_x - 1] = Init[slice][j][N_x - 1]; buffer[j][2 * N_x - 2] = (Init[slice][j][N_x - 2] + 3.0 * Init[slice][j][N_x - 1]) / 4.0; for (k = 1; k < N_x - 1; k++) { buffer[j][2 * k] = (Init[slice][j][k - 1] + 3.0 * Init[slice][j][k]) / 4.0; buffer[j][2 * k + 1] = (3.0 * Init[slice][j][k] + Init[slice][j][k + 1]) / 4.0; } } for (k = 0; k < 2 * N_x; k++) { Object[slice + 1][0][k] = buffer[0][k]; Object[slice + 1][1][k] = (3.0 * buffer[0][k] + buffer[1][k]) / 4.0; Object[slice + 1][2 * N_y - 1][k] = buffer[N_y - 1][k]; Object[slice + 1][2 * N_y - 2][k] = (buffer[N_y - 2][k] + 3.0 * buffer[N_y - 1][k]) / 4.0; } for (j = 1; j < N_y - 1; j++) { for (k = 0; k < 2 * N_x; k++) { Object[slice + 1][2 * j][k] = (buffer[j - 1][k] + 3.0 * buffer[j][k]) / 4.0; Object[slice + 1][2 * j + 1][k] = (3 * buffer[j][k] + buffer[j + 1][k]) / 4.0; } } } multifree(buffer, 2); } void upsample_bilinear_3D(Real_arr_t **** Object, Real_arr_t **** Init, int32_t N_time, int32_t N_z, int32_t N_y, int32_t N_x) { int32_t i, j, k, slice; Real_t ***buffer2D, ***buffer3D; for (i = 0; i < N_time; i++) { buffer2D = (Real_t ***) multialloc(sizeof(Real_t), 3, N_z, N_y, 2 * N_x); buffer3D = (Real_t ***) multialloc(sizeof(Real_t), 3, N_z, 2 * N_y, 2 * N_x); for (slice = 0; slice < N_z; slice++) { for (j = 0; j < N_y; j++) { buffer2D[slice][j][0] = Init[i][slice][j][0]; buffer2D[slice][j][1] = (3.0 * Init[i][slice][j][0] + Init[i][slice][j][1]) / 4.0; buffer2D[slice][j][2 * N_x - 1] = Init[i][slice][j][N_x - 1]; buffer2D[slice][j][2 * N_x - 2] = (Init[i][slice][j][N_x - 2] + 3.0 * Init[i][slice][j][N_x - 1]) / 4.0; for (k = 1; k < N_x - 1; k++) { buffer2D[slice][j][2 * k] = (Init[i][slice][j][k - 1] + 3.0 * Init[i][slice][j][k]) / 4.0; buffer2D[slice][j][2 * k + 1] = (3.0 * Init[i][slice][j][k] + Init[i][slice][j][k + 1]) / 4.0; } } for (k = 0; k < 2 * N_x; k++) { buffer3D[slice][0][k] = buffer2D[slice][0][k]; buffer3D[slice][1][k] = (3.0 * buffer2D[slice][0][k] + buffer2D[slice][1][k]) / 4.0; buffer3D[slice][2 * N_y - 1][k] = buffer2D[slice][N_y - 1][k]; buffer3D[slice][2 * N_y - 2][k] = (buffer2D[slice][N_y - 2][k] + 3.0 * buffer2D[slice][N_y - 1][k]) / 4.0; } for (j = 1; j < N_y - 1; j++) for (k = 0; k < 2 * N_x; k++) { buffer3D[slice][2 * j][k] = (buffer2D[slice][j - 1][k] + 3.0 * buffer2D[slice][j][k]) / 4.0; buffer3D[slice][2 * j + 1][k] = (3 * buffer2D[slice][j][k] + buffer2D[slice][j + 1][k]) / 4.0; } } for (j = 0; j < 2 * N_y; j++) for (k = 0; k < 2 * N_x; k++) { Object[i][0][j][k] = buffer3D[0][j][k]; Object[i][1][j][k] = (3.0 * buffer3D[0][j][k] + buffer3D[1][j][k]) / 4.0; Object[i][2 * N_z - 1][j][k] = buffer3D[N_z - 1][j][k]; Object[i][2 * N_z - 2][j][k] = (3.0 * buffer3D[N_z - 1][j][k] + buffer3D[N_z - 2][j][k]) / 4.0; } for (slice = 1; slice < N_z - 1; slice++) for (j = 0; j < 2 * N_y; j++) for (k = 0; k < 2 * N_x; k++) { Object[i][2 * slice][j][k] = (buffer3D[slice - 1][j][k] + 3.0 * buffer3D[slice][j][k]) / 4.0; Object[i][2 * slice + 1][j][k] = (3.0 * buffer3D[slice][j][k] + buffer3D[slice + 1][j][k]) / 4.0; } multifree(buffer2D, 3); multifree(buffer3D, 3); } } /* * 'InitObject' intializes the Object to be reconstructed to either 0 or an * interpolated version of the previous reconstruction. It is used in multi * resolution reconstruction in which after every coarse resolution * reconstruction the object should be intialized with an interpolated * version of the reconstruction following which the object will be * reconstructed at a finer resolution. */ /* * Upsamples the (N_time x N_z x N_y x N_x) size 'Init' by a factor of 2 * along the in 3D x-y-z coordinates and stores it in 'Object' */ void upsample_object_bilinear_3D(Real_arr_t *** Object, Real_arr_t *** Init, int32_t N_z, int32_t N_y, int32_t N_x) { int32_t j, k, slice; Real_t ***buffer2D, ***buffer3D; buffer2D = (Real_t ***) multialloc(sizeof(Real_t), 3, N_z, N_y, 2 * N_x); buffer3D = (Real_t ***) multialloc(sizeof(Real_t), 3, N_z, 2 * N_y, 2 * N_x); for (slice = 0; slice < N_z; slice++) { for (j = 0; j < N_y; j++) { buffer2D[slice][j][0] = Init[slice][j][0]; buffer2D[slice][j][1] = (3.0 * Init[slice][j][0] + Init[slice][j][1]) / 4.0; buffer2D[slice][j][2 * N_x - 1] = Init[slice][j][N_x - 1]; buffer2D[slice][j][2 * N_x - 2] = (Init[slice][j][N_x - 2] + 3.0 * Init[slice][j][N_x - 1]) / 4.0; for (k = 1; k < N_x - 1; k++) { buffer2D[slice][j][2 * k] = (Init[slice][j][k - 1] + 3.0 * Init[slice][j][k]) / 4.0; buffer2D[slice][j][2 * k + 1] = (3.0 * Init[slice][j][k] + Init[slice][j][k + 1]) / 4.0; } } for (k = 0; k < 2 * N_x; k++) { buffer3D[slice][0][k] = buffer2D[slice][0][k]; buffer3D[slice][1][k] = (3.0 * buffer2D[slice][0][k] + buffer2D[slice][1][k]) / 4.0; buffer3D[slice][2 * N_y - 1][k] = buffer2D[slice][N_y - 1][k]; buffer3D[slice][2 * N_y - 2][k] = (buffer2D[slice][N_y - 2][k] + 3.0 * buffer2D[slice][N_y - 1][k]) / 4.0; } for (j = 1; j < N_y - 1; j++) for (k = 0; k < 2 * N_x; k++) { buffer3D[slice][2 * j][k] = (buffer2D[slice][j - 1][k] + 3.0 * buffer2D[slice][j][k]) / 4.0; buffer3D[slice][2 * j + 1][k] = (3 * buffer2D[slice][j][k] + buffer2D[slice][j + 1][k]) / 4.0; } } for (j = 0; j < 2 * N_y; j++) for (k = 0; k < 2 * N_x; k++) { Object[1][j][k] = buffer3D[0][j][k]; Object[2][j][k] = (3.0 * buffer3D[0][j][k] + buffer3D[1][j][k]) / 4.0; Object[2 * N_z][j][k] = buffer3D[N_z - 1][j][k]; Object[2 * N_z - 1][j][k] = (3.0 * buffer3D[N_z - 1][j][k] + buffer3D[N_z - 2][j][k]) / 4.0; } for (slice = 1; slice < N_z - 1; slice++) for (j = 0; j < 2 * N_y; j++) for (k = 0; k < 2 * N_x; k++) { Object[2 * slice + 1][j][k] = (buffer3D[slice - 1][j][k] + 3.0 * buffer3D[slice][j][k]) / 4.0; Object[2 * slice + 2][j][k] = (3.0 * buffer3D[slice][j][k] + buffer3D[slice + 1][j][k]) / 4.0; } multifree(buffer2D, 3); multifree(buffer3D, 3); } /* * randomly select the voxels lines which need to be updated along the x-y * plane for each z-block and time slice */ void randomly_select_x_y(ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr, uint8_t *** Mask) { int32_t i, j, num, n, Index, col, row, *Counter, ArraySize, block; ArraySize = ScannedObjectPtr->N_y * ScannedObjectPtr->N_x; Counter = (int32_t *) get_spc(ArraySize, sizeof(int32_t)); for (i = 0; i < ScannedObjectPtr->N_time; i++) for (block = 0; block < TomoInputsPtr->num_z_blocks; block++) { ArraySize = ScannedObjectPtr->N_y * ScannedObjectPtr->N_x; for (Index = 0; Index < ArraySize; Index++) Counter[Index] = Index; TomoInputsPtr->UpdateSelectNum[i][block] = 0; for (j = 0; j < ScannedObjectPtr->N_x * ScannedObjectPtr->N_y; j++) { Index = floor(random2() * ArraySize); Index = (Index == ArraySize) ? ArraySize - 1 : Index; col = Counter[Index] % ScannedObjectPtr->N_x; row = Counter[Index] / ScannedObjectPtr->N_x; for (n = block * (ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks); n < (block + 1) * (ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks); n++) if (Mask[i][row][col] == 1) { num = TomoInputsPtr->UpdateSelectNum[i][block]; TomoInputsPtr->x_rand_select[i][block][num] = col; TomoInputsPtr->y_rand_select[i][block][num] = row; (TomoInputsPtr->UpdateSelectNum[i][block])++; break; } Counter[Index] = Counter[ArraySize - 1]; ArraySize--; } } free(Counter); } /* * 'InitObject' intializes the Object to be reconstructed to either 0 or an * interpolated version of the previous reconstruction. It is used in multi * resolution reconstruction in which after every coarse resolution * reconstruction the object should be intialized with an interpolated * version of the reconstruction following which the object will be * reconstructed at a finer resolution. --initICD-- If 1, initializes the * object to 0 If 2, the code uses bilinear interpolation to initialize the * object if the previous reconstruction was at a lower resolution The * function also initializes the magnitude update map 'MagUpdateMap' from the * previous coarser resolution reconstruction. */ int32_t initObject(Sinogram * SinogramPtr, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr, Real_arr_t **** MagUpdateMap) { char object_file[100]; int dimTiff[4]; int32_t i, j, k, l, size, flag = 0; Real_arr_t ***Init, ****UpMapInit; for (i = 0; i < ScannedObjectPtr->N_time; i++) for (j = 0; j < ScannedObjectPtr->N_z; j++) for (k = 0; k < ScannedObjectPtr->N_y; k++) for (l = 0; l < ScannedObjectPtr->N_x; l++) ScannedObjectPtr->Object[i][j + 1][k][l] = OBJECT_INIT_VAL; if (TomoInputsPtr->initICD > 3 || TomoInputsPtr->initICD < 0) { sentinel(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "ERROR: initICD value not recognized.\n"); } else if (TomoInputsPtr->initICD == 1) { size = ScannedObjectPtr->N_z * ScannedObjectPtr->N_y * ScannedObjectPtr->N_x; for (i = 0; i < ScannedObjectPtr->N_time; i++) { sprintf(object_file, "%s_time_%d", OBJECT_FILENAME, i); if (read_SharedBinFile_At(object_file, &(ScannedObjectPtr->Object[i][1][0][0]), TomoInputsPtr->node_rank * size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; } if (TomoInputsPtr->initMagUpMap == 1) { size = ScannedObjectPtr->N_time * TomoInputsPtr->num_z_blocks * ScannedObjectPtr->N_y * ScannedObjectPtr->N_x; if (read_SharedBinFile_At(MAG_UPDATE_FILENAME, &(MagUpdateMap[0][0][0][0]), TomoInputsPtr->node_rank * size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; } } else if (TomoInputsPtr->initICD == 2 || TomoInputsPtr->initICD == 3) { if (TomoInputsPtr->initICD == 3) { Init = (Real_arr_t ***) multialloc(sizeof(Real_arr_t), 3, ScannedObjectPtr->N_z / 2, ScannedObjectPtr->N_y / 2, ScannedObjectPtr->N_x / 2); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Interpolating object using 3D bilinear interpolation.\n"); for (i = 0; i < ScannedObjectPtr->N_time; i++) { sprintf(object_file, "%s_time_%d", OBJECT_FILENAME, i); size = ScannedObjectPtr->N_z * ScannedObjectPtr->N_y * ScannedObjectPtr->N_x / 8; if (read_SharedBinFile_At(object_file, &(Init[0][0][0]), TomoInputsPtr->node_rank * size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; upsample_object_bilinear_3D(ScannedObjectPtr->Object[i], Init, ScannedObjectPtr->N_z / 2, ScannedObjectPtr->N_y / 2, ScannedObjectPtr->N_x / 2); } multifree(Init, 3); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Done with interpolating object using 3D bilinear interpolation.\n"); } else { Init = (Real_arr_t ***) multialloc(sizeof(Real_arr_t), 3, ScannedObjectPtr->N_z, ScannedObjectPtr->N_y / 2, ScannedObjectPtr->N_x / 2); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Interpolating object using 2D bilinear interpolation.\n"); for (i = 0; i < ScannedObjectPtr->N_time; i++) { sprintf(object_file, "%s_time_%d", OBJECT_FILENAME, i); size = ScannedObjectPtr->N_z * ScannedObjectPtr->N_y * ScannedObjectPtr->N_x / 4; if (read_SharedBinFile_At(object_file, &(Init[0][0][0]), TomoInputsPtr->node_rank * size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; upsample_object_bilinear_2D(ScannedObjectPtr->Object[i], Init, ScannedObjectPtr->N_z, ScannedObjectPtr->N_y / 2, ScannedObjectPtr->N_x / 2); } multifree(Init, 3); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Done with interpolating object using 2D bilinear interpolation.\n"); } if (TomoInputsPtr->initMagUpMap == 1) { if (TomoInputsPtr->prevnum_z_blocks == TomoInputsPtr->num_z_blocks) { UpMapInit = (Real_arr_t ****) multialloc(sizeof(Real_arr_t), 4, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks, ScannedObjectPtr->N_y / 2, ScannedObjectPtr->N_x / 2); size = ScannedObjectPtr->N_time * TomoInputsPtr->num_z_blocks * ScannedObjectPtr->N_y * ScannedObjectPtr->N_x / 4; if (read_SharedBinFile_At(MAG_UPDATE_FILENAME, &(UpMapInit[0][0][0][0]), TomoInputsPtr->node_rank * size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Interpolating magnitude update map using 2D bilinear interpolation.\n"); upsample_bilinear_2D(MagUpdateMap, UpMapInit, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks, ScannedObjectPtr->N_y / 2, ScannedObjectPtr->N_x / 2); multifree(UpMapInit, 4); } else if (TomoInputsPtr->prevnum_z_blocks == TomoInputsPtr->num_z_blocks / 2) { UpMapInit = (Real_arr_t ****) multialloc(sizeof(Real_arr_t), 4, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks / 2, ScannedObjectPtr->N_y / 2, ScannedObjectPtr->N_x / 2); size = ScannedObjectPtr->N_time * TomoInputsPtr->num_z_blocks * ScannedObjectPtr->N_y * ScannedObjectPtr->N_x / 8; if (read_SharedBinFile_At(MAG_UPDATE_FILENAME, &(UpMapInit[0][0][0][0]), TomoInputsPtr->node_rank * size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Interpolating magnitude update map using 3D bilinear interpolation.\n"); upsample_bilinear_3D(MagUpdateMap, UpMapInit, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks / 2, ScannedObjectPtr->N_y / 2, ScannedObjectPtr->N_x / 2); multifree(UpMapInit, 4); } else { check_warn(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Number of axial blocks is incompatible with previous stage of multi-resolution.\n"); check_warn(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Initializing the multi-resolution map to zeros.\n"); } } } dimTiff[0] = ScannedObjectPtr->N_time; dimTiff[1] = TomoInputsPtr->num_z_blocks; dimTiff[2] = ScannedObjectPtr->N_y; dimTiff[3] = ScannedObjectPtr->N_x; sprintf(object_file, "%s_n%d", MAG_UPDATE_FILENAME, TomoInputsPtr->node_rank); if (TomoInputsPtr->Write2Tiff == 1) if (WriteMultiDimArray2Tiff(object_file, dimTiff, 0, 1, 2, 3, &(MagUpdateMap[0][0][0][0]), 0, TomoInputsPtr->debug_file_ptr)) flag = -1; for (i = 0; i < ScannedObjectPtr->N_time; i++) { sprintf(object_file, "%s_n%d", INIT_OBJECT_FILENAME, TomoInputsPtr->node_rank); sprintf(object_file, "%s_time_%d", object_file, i); dimTiff[0] = 1; dimTiff[1] = ScannedObjectPtr->N_z; dimTiff[2] = ScannedObjectPtr->N_y; dimTiff[3] = ScannedObjectPtr->N_x; if (TomoInputsPtr->Write2Tiff == 1) if (WriteMultiDimArray2Tiff(object_file, dimTiff, 0, 1, 2, 3, &(ScannedObjectPtr->Object[i][1][0][0]), 0, TomoInputsPtr->debug_file_ptr)) flag = -1; } return (flag); error: return (-1); } /* * 'initErrorSinogram' is used to initialize the error sinogram before start * of ICD. It computes e = y - Ax - d. Ax is computed by forward projecting * the obkject x. */ int32_t initErrorSinogam(Sinogram * SinogramPtr, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr, Real_arr_t ** DetectorResponse, Real_arr_t *** ErrorSino /* , AMatrixCol* VoxelLineResponse */ ) { Real_t pixel, avg = 0; int32_t dimTiff[4], i, j, k, p, sino_idx, slice, flag = 0; AMatrixCol *AMatrixPtr = (AMatrixCol *) get_spc(ScannedObjectPtr->N_time, sizeof(AMatrixCol)); uint8_t AvgNumXElements = (uint8_t) ceil(3 * ScannedObjectPtr->delta_xy / SinogramPtr->delta_r); char error_file[100] = "error_sinogram"; sprintf(error_file, "%s_n%d", error_file, TomoInputsPtr->node_rank); for (i = 0; i < ScannedObjectPtr->N_time; i++) { AMatrixPtr[i].values = (Real_t *) get_spc(AvgNumXElements, sizeof(Real_t)); AMatrixPtr[i].index = (int32_t *) get_spc(AvgNumXElements, sizeof(int32_t)); } memset(&(ErrorSino[0][0][0]), 0, SinogramPtr->N_p * SinogramPtr->N_t * SinogramPtr->N_r * sizeof(Real_arr_t)); for (i = 0; i < ScannedObjectPtr->N_time; i++) { for (j = 0; j < ScannedObjectPtr->N_y; j++) { for (k = 0; k < ScannedObjectPtr->N_x; k++) { for (p = 0; p < ScannedObjectPtr->ProjNum[i]; p++) { sino_idx = ScannedObjectPtr->ProjIdxPtr[i][p]; calcAMatrixColumnforAngle(SinogramPtr, ScannedObjectPtr, DetectorResponse, &(AMatrixPtr[i]), j, k, sino_idx); for (slice = 0; slice < ScannedObjectPtr->N_z; slice++) { /* * printf("count = %d, idx = %d, val = %f\n", * VoxelLineResponse[slice].count, * VoxelLineResponse[slice].index[0], * VoxelLineResponse[slice].values[0]); */ pixel = ScannedObjectPtr->Object[i][slice + 1][j][k]; /* slice+1 to account * for extra z slices * required for MPI */ forward_project_voxel(SinogramPtr, pixel, ErrorSino, &(AMatrixPtr[i]) /* , * &(VoxelLineResponse[sl ice]) */ , sino_idx, slice); } } } } } for (i = 0; i < SinogramPtr->N_p; i++) for (j = 0; j < SinogramPtr->N_r; j++) for (k = 0; k < SinogramPtr->N_t; k++) { ErrorSino[i][j][k] = SinogramPtr->Projection[i][j][k] - ErrorSino[i][j][k] - SinogramPtr->ProjOffset[j][k]; if (fabs(ErrorSino[i][j][k] * sqrt(TomoInputsPtr->Weight[i][j][k])) < TomoInputsPtr->ErrorSinoThresh) SinogramPtr->ProjSelect[i][j][k] = true; else SinogramPtr->ProjSelect[i][j][k] = false; /* * if * (ErrorSino[i][j][k]*sqrt(TomoInputsPtr->Weight[i][j][k]) < * -30) TomoInputsPtr->Weight[i][j][k] = 0; */ avg += ErrorSino[i][j][k]; } avg = avg / (SinogramPtr->N_r * SinogramPtr->N_t * SinogramPtr->N_p); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Average of error sinogram in node %d is %f\n", TomoInputsPtr->node_rank, avg); dimTiff[0] = 1; dimTiff[1] = SinogramPtr->N_p; dimTiff[2] = SinogramPtr->N_r; dimTiff[3] = SinogramPtr->N_t; if (TomoInputsPtr->Write2Tiff == 1) flag = WriteMultiDimArray2Tiff(error_file, dimTiff, 0, 3, 1, 2, &(ErrorSino[0][0][0]), 0, TomoInputsPtr->debug_file_ptr); for (i = 0; i < ScannedObjectPtr->N_time; i++) { free(AMatrixPtr[i].values); free(AMatrixPtr[i].index); } free(AMatrixPtr); multifree(SinogramPtr->Projection, 3); return (flag); } /* Updates the variance parameter \sigma */ void update_variance_parameter(Sinogram * SinogramPtr, TomoInputs * TomoInputsPtr, Real_arr_t *** ErrorSino) { int32_t k, i, j; Real_t temp_acc = 0, temp = 0; for (k = 0; k < SinogramPtr->N_p; k++) for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { TomoInputsPtr->Weight[k][i][j] = TomoInputsPtr->Weight[k][i][j] * TomoInputsPtr->var_est; if (SinogramPtr->ProjSelect[k][i][j] == true) temp = ErrorSino[k][i][j] * ErrorSino[k][i][j] * TomoInputsPtr->Weight[k][i][j]; else temp = fabs(ErrorSino[k][i][j]) * TomoInputsPtr->ErrorSinoDelta * TomoInputsPtr->ErrorSinoThresh * sqrt(TomoInputsPtr->Weight[k][i][j] * TomoInputsPtr->var_est); temp_acc += temp; } MPI_Allreduce(&temp_acc, &temp, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); TomoInputsPtr->var_est = temp / ((Real_t) TomoInputsPtr->node_num * (Real_t) SinogramPtr->N_p * (Real_t) SinogramPtr->N_r * (Real_t) SinogramPtr->N_t); for (k = 0; k < SinogramPtr->N_p; k++) for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { TomoInputsPtr->Weight[k][i][j] /= TomoInputsPtr->var_est; if (fabs(ErrorSino[k][i][j] * sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh) SinogramPtr->ProjSelect[k][i][j] = true; else SinogramPtr->ProjSelect[k][i][j] = false; } } void update_d_offset_rect_patch_constraint(Sinogram * SinogramPtr, TomoInputs * TomoInputsPtr, Real_arr_t *** ErrorSino) { Real_t sign, **b, **Lambda, temp; Real_arr_t **x; int32_t i, j, k; b = (Real_t **) multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); Lambda = (Real_t **) multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); x = (Real_arr_t **) multialloc(sizeof(Real_arr_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); memset(&(b[0][0]), 0, SinogramPtr->N_r * SinogramPtr->N_t * sizeof(Real_t)); memset(&(Lambda[0][0]), 0, SinogramPtr->N_r * SinogramPtr->N_t * sizeof(Real_t)); memset(&(x[0][0]), 0, SinogramPtr->N_r * SinogramPtr->N_t * sizeof(Real_arr_t)); for (i = 0; i < SinogramPtr->N_r; i++) { for (j = 0; j < SinogramPtr->N_t; j++) { b[i][j] = 0; Lambda[i][j] = 0; for (k = 0; k < SinogramPtr->N_p; k++) { temp = TomoInputsPtr->ErrorSinoThresh * TomoInputsPtr->ErrorSinoDelta * sqrt(TomoInputsPtr->Weight[k][i][j]); if (SinogramPtr->ProjSelect[k][i][j] == true) { Lambda[i][j] += TomoInputsPtr->Weight[k][i][j]; b[i][j] += (ErrorSino[k][i][j] + SinogramPtr->ProjOffset[i][j]) * TomoInputsPtr->Weight[k][i][j]; } else { sign = (ErrorSino[k][i][j] > 0) - (ErrorSino[k][i][j] < 0); Lambda[i][j] += temp / fabs(ErrorSino[k][i][j]); b[i][j] += (ErrorSino[k][i][j] + SinogramPtr->ProjOffset[i][j]) * temp / fabs(ErrorSino[k][i][j]); } } } } constrained_quad_opt(Lambda, b, SinogramPtr->off_constraint, x, SinogramPtr->N_r, SinogramPtr->N_t, SinogramPtr->off_constraint_num, TomoInputsPtr); for (k = 0; k < SinogramPtr->N_p; k++) { for (i = 0; i < SinogramPtr->N_r; i++) { for (j = 0; j < SinogramPtr->N_t; j++) { ErrorSino[k][i][j] += SinogramPtr->ProjOffset[i][j] - x[i][j]; if (fabs(ErrorSino[k][i][j] * sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh) SinogramPtr->ProjSelect[k][i][j] = true; else SinogramPtr->ProjSelect[k][i][j] = false; } } } memcpy(&(SinogramPtr->ProjOffset[0][0]), &(x[0][0]), SinogramPtr->N_r * SinogramPtr->N_t * sizeof(Real_arr_t)); multifree(b, 2); multifree(Lambda, 2); multifree(x, 2); } /* Updates the projection offset error parameter d_i */ void update_d_offset_zero_mean_constraint(Sinogram * SinogramPtr, TomoInputs * TomoInputsPtr, Real_arr_t *** ErrorSino) { Real_t sign, **numerator, num_sum = 0, temp, **denominator, den_sum = 0, gamma = 0; int32_t i, j, k; numerator = (Real_t **) multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); denominator = (Real_t **) multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { numerator[i][j] = 0; denominator[i][j] = 0; for (k = 0; k < SinogramPtr->N_p; k++) { temp = TomoInputsPtr->ErrorSinoThresh * TomoInputsPtr->ErrorSinoDelta * sqrt(TomoInputsPtr->Weight[k][i][j]); if (SinogramPtr->ProjSelect[k][i][j] == true) { numerator[i][j] += ErrorSino[k][i][j] * TomoInputsPtr->Weight[k][i][j]; denominator[i][j] += TomoInputsPtr->Weight[k][i][j]; } else { sign = (ErrorSino[k][i][j] > 0) - (ErrorSino[k][i][j] < 0); numerator[i][j] += temp * sign; denominator[i][j] += temp / fabs(ErrorSino[k][i][j]); } } num_sum += SinogramPtr->ProjOffset[i][j] + (numerator[i][j] / denominator[i][j]); den_sum += 1.0 / denominator[i][j]; } gamma = num_sum / den_sum; for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { SinogramPtr->ProjOffset[i][j] = SinogramPtr->ProjOffset[i][j] + (numerator[i][j] - gamma) / denominator[i][j]; for (k = 0; k < SinogramPtr->N_p; k++) { ErrorSino[k][i][j] -= (numerator[i][j] - gamma) / denominator[i][j]; if (fabs(ErrorSino[k][i][j] * sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh) SinogramPtr->ProjSelect[k][i][j] = true; else SinogramPtr->ProjSelect[k][i][j] = false; } } multifree(numerator, 2); multifree(denominator, 2); } /* Updates the projection offset error parameter d_i */ void update_d_offset_unconstrained(Sinogram * SinogramPtr, TomoInputs * TomoInputsPtr, Real_arr_t *** ErrorSino) { Real_t sign, **numerator, temp, **denominator; int32_t i, j, k; numerator = (Real_t **) multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); denominator = (Real_t **) multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { numerator[i][j] = 0; denominator[i][j] = 0; for (k = 0; k < SinogramPtr->N_p; k++) { temp = TomoInputsPtr->ErrorSinoThresh * TomoInputsPtr->ErrorSinoDelta * sqrt(TomoInputsPtr->Weight[k][i][j]); if (SinogramPtr->ProjSelect[k][i][j] == true) { numerator[i][j] += ErrorSino[k][i][j] * TomoInputsPtr->Weight[k][i][j]; denominator[i][j] += TomoInputsPtr->Weight[k][i][j]; } else { sign = (ErrorSino[k][i][j] > 0) - (ErrorSino[k][i][j] < 0); numerator[i][j] += temp * sign; denominator[i][j] += temp / fabs(ErrorSino[k][i][j]); } } } for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { SinogramPtr->ProjOffset[i][j] = SinogramPtr->ProjOffset[i][j] + (numerator[i][j]) / denominator[i][j]; for (k = 0; k < SinogramPtr->N_p; k++) { ErrorSino[k][i][j] -= (numerator[i][j]) / denominator[i][j]; if (fabs(ErrorSino[k][i][j] * sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh) SinogramPtr->ProjSelect[k][i][j] = true; else SinogramPtr->ProjSelect[k][i][j] = false; } } multifree(numerator, 2); multifree(denominator, 2); } void update_Sinogram_Offset(Sinogram * SinogramPtr, TomoInputs * TomoInputsPtr, Real_arr_t *** ErrorSino) { if (TomoInputsPtr->OffsetConstraintType == 1) update_d_offset_unconstrained(SinogramPtr, TomoInputsPtr, ErrorSino); else if (TomoInputsPtr->OffsetConstraintType == 2) update_d_offset_zero_mean_constraint(SinogramPtr, TomoInputsPtr, ErrorSino); else if (TomoInputsPtr->OffsetConstraintType == 3) update_d_offset_rect_patch_constraint(SinogramPtr, TomoInputsPtr, ErrorSino); } /* * Implements mutithreaded shared memory parallelization using OpenMP and * splits work among threads. Each thread gets a certain time slice and z * block to update. Multithreading is done within the z-blocks assigned to * each node. ErrorSino - Error sinogram Iter - Present iteration number * MagUpdateMap - Magnitude update map containing the magnitude of update of * each voxel Mask - If a certain element is true then the corresponding * voxel is updated */ int updateVoxelsTimeSlices(Sinogram * SinogramPtr, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr, Real_arr_t ** DetectorResponse, /* AMatrixCol* VoxelLineResponse, */ Real_arr_t *** ErrorSino, int32_t Iter, Real_arr_t **** MagUpdateMap, uint8_t *** Mask) { Real_t AverageUpdate = 0, tempUpdate, avg_update_percentage, total_vox_mag = 0.0, vox_mag = 0.0; int32_t xy_start, xy_end, i, j, K, block, idx, **z_start, **z_stop; Real_t tempTotPix = 0, total_pix = 0; long int **zero_count, total_zero_count = 0; int32_t **thread_num = (int32_t **) multialloc(sizeof(int32_t), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks); MPI_Request *send_reqs, *recv_reqs; send_reqs = (MPI_Request *) get_spc(ScannedObjectPtr->N_time, sizeof(MPI_Request)); recv_reqs = (MPI_Request *) get_spc(ScannedObjectPtr->N_time, sizeof(MPI_Request)); z_start = (int32_t **) multialloc(sizeof(int32_t), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks); z_stop = (int32_t **) multialloc(sizeof(int32_t), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks); randomly_select_x_y(ScannedObjectPtr, TomoInputsPtr, Mask); zero_count = (long int **)multialloc(sizeof(long int), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks); /* * offset_numerator = (Real_t**)multialloc(sizeof(Real_t), 2, * SinogramPtr->N_r, SinogramPtr->N_t); * memset(&(offset_denominator[0][0]), 0, * SinogramPtr->N_r*SinogramPtr->N_t*sizeof(Real_t)); * * for (k = 0; k < SinogramPtr->N_p; k++) for (i = 0; i < SinogramPtr->N_r; * i++) for (j = 0; j < SinogramPtr->N_t; j++) offset_denominator[i][j] * += TomoInputsPtr->Weight[k][i][j]; */ memset(&(zero_count[0][0]), 0, ScannedObjectPtr->N_time * TomoInputsPtr->num_z_blocks * sizeof(long int)); /* * K = * ScannedObjectPtr->N_time*ScannedObjectPtr->N_z*ScannedObjectPtr->N_y*Sc * annedObjectPtr->N_x; K = (K - * total_zero_count)/(ScannedObjectPtr->gamma*K); */ K = ScannedObjectPtr->NHICD_Iterations; check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Number of NHICD iterations is %d.\n", K); for (j = 0; j < K; j++) { total_vox_mag = 0.0; for (i = 0; i < ScannedObjectPtr->N_time; i++) for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2) { idx = (i % 2 == 0) ? block : block + 1; z_start[i][idx] = idx * floor(ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks); z_stop[i][idx] = (idx + 1) * floor(ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks) - 1; z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1 : z_stop[i][idx]; xy_start = j * floor(TomoInputsPtr->UpdateSelectNum[i][idx] / K); xy_end = (j + 1) * floor(TomoInputsPtr->UpdateSelectNum[i][idx] / K) - 1; xy_end = (j == K - 1) ? TomoInputsPtr->UpdateSelectNum[i][idx] - 1 : xy_end; /* * printf ("Loop 1 Start - j = %d, i = %d, idx = %d, z_start * = %d, z_stop = %d, xy_start = %d, xy_end = %d\n", j, i, * idx, z_start[i][idx], z_stop[i][idx], xy_start, xy_end); */ total_vox_mag += updateVoxels(i, i, z_start[i][idx], z_stop[i][idx], xy_start, xy_end, TomoInputsPtr->x_rand_select[i][idx], TomoInputsPtr->y_rand_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /* VoxelLineResponse, */ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]); thread_num[i][idx] = omp_get_thread_num(); } /* * check_info(TomoInputsPtr->node_rank==0, * TomoInputsPtr->debug_file_ptr, "Send MPI info\n"); */ MPI_Send_Recv_Z_Slices(ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0); /* * check_info(TomoInputsPtr->node_rank==0, * TomoInputsPtr->debug_file_ptr, "update_Sinogram_Offset: Will * compute projection offset error\n"); */ if (TomoInputsPtr->updateProjOffset > 1) update_Sinogram_Offset(SinogramPtr, TomoInputsPtr, ErrorSino); /* * check_info(TomoInputsPtr->node_rank==0, * TomoInputsPtr->debug_file_ptr, "update_Sinogram_Offset: Done * computing projection offset error\n"); */ MPI_Wait_Z_Slices(ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0); for (i = 0; i < ScannedObjectPtr->N_time; i++) for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2) { idx = (i % 2 == 0) ? block + 1 : block; z_start[i][idx] = idx * floor(ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks); z_stop[i][idx] = (idx + 1) * floor(ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks) - 1; z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1 : z_stop[i][idx]; xy_start = j * floor(TomoInputsPtr->UpdateSelectNum[i][idx] / K); xy_end = (j + 1) * floor(TomoInputsPtr->UpdateSelectNum[i][idx] / K) - 1; xy_end = (j == K - 1) ? TomoInputsPtr->UpdateSelectNum[i][idx] - 1 : xy_end; total_vox_mag += updateVoxels(i, i, z_start[i][idx], z_stop[i][idx], xy_start, xy_end, TomoInputsPtr->x_rand_select[i][idx], TomoInputsPtr->y_rand_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /* VoxelLineResponse, */ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]); thread_num[i][idx] = omp_get_thread_num(); /* * printf ("Loop 2 - i = %d, idx = %d, z_start = %d, z_stop = * %d, xy_start = %d, xy_end = %d\n", i, idx, * z_start[i][idx], z_stop[i][idx], xy_start, xy_end); */ } MPI_Send_Recv_Z_Slices(ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1); if (TomoInputsPtr->updateProjOffset > 1) update_Sinogram_Offset(SinogramPtr, TomoInputsPtr, ErrorSino); MPI_Wait_Z_Slices(ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1); VSC_based_Voxel_Line_Select(ScannedObjectPtr, TomoInputsPtr, MagUpdateMap); /* * check_info(TomoInputsPtr->node_rank==0, * TomoInputsPtr->debug_file_ptr, "Number of NHICD voxel lines to be * updated in iteration %d is %d\n", j, num_voxel_lines); */ if (Iter > 1 && TomoInputsPtr->no_NHICD == 0) { for (i = 0; i < ScannedObjectPtr->N_time; i++) for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2) { idx = (i % 2 == 0) ? block : block + 1; z_start[i][idx] = idx * floor(ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks); z_stop[i][idx] = (idx + 1) * floor(ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks) - 1; z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1 : z_stop[i][idx]; updateVoxels(i, i, z_start[i][idx], z_stop[i][idx], 0, TomoInputsPtr->NHICDSelectNum[i][idx] - 1, TomoInputsPtr->x_NHICD_select[i][idx], TomoInputsPtr->y_NHICD_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /* VoxelLineResponse, */ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]); thread_num[i][idx] = omp_get_thread_num(); /* * printf ("Loop 1 NHICD - i = %d, idx = %d, z_start = * %d, z_stop = %d\n", i, idx, z_start[i][idx], * z_stop[i][idx]); */ } MPI_Send_Recv_Z_Slices(ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0); if (TomoInputsPtr->updateProjOffset > 1) update_Sinogram_Offset(SinogramPtr, TomoInputsPtr, ErrorSino); MPI_Wait_Z_Slices(ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0); for (i = 0; i < ScannedObjectPtr->N_time; i++) for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2) { idx = (i % 2 == 0) ? block + 1 : block; z_start[i][idx] = idx * floor(ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks); z_stop[i][idx] = (idx + 1) * floor(ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks) - 1; z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1 : z_stop[i][idx]; updateVoxels(i, i, z_start[i][idx], z_stop[i][idx], 0, TomoInputsPtr->NHICDSelectNum[i][idx] - 1, TomoInputsPtr->x_NHICD_select[i][idx], TomoInputsPtr->y_NHICD_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /* VoxelLineResponse, */ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]); thread_num[i][idx] = omp_get_thread_num(); /* * printf ("Loop 2 NHICD - i = %d, idx = %d, z_start = * %d, z_stop = %d\n", i, idx, z_start[i][idx], * z_stop[i][idx]); */ } MPI_Send_Recv_Z_Slices(ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1); if (TomoInputsPtr->updateProjOffset > 1) update_Sinogram_Offset(SinogramPtr, TomoInputsPtr, ErrorSino); MPI_Wait_Z_Slices(ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1); } } if (TomoInputsPtr->updateVar == 1) update_variance_parameter(SinogramPtr, TomoInputsPtr, ErrorSino); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Time Slice, Z Start, Z End - Thread : "); total_pix = 0; for (i = 0; i < ScannedObjectPtr->N_time; i++) { for (block = 0; block < TomoInputsPtr->num_z_blocks; block++) { total_pix += TomoInputsPtr->UpdateSelectNum[i][block] * (ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks); for (j = 0; j < TomoInputsPtr->UpdateSelectNum[i][block]; j++) { AverageUpdate += MagUpdateMap[i][block][TomoInputsPtr->y_rand_select[i][block][j]][TomoInputsPtr->x_rand_select[i][block][j]]; } total_zero_count += zero_count[i][block]; check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "%d,%d,%d-%d; ", i, z_start[i][block], z_stop[i][block], thread_num[i][block]); } } check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "\n"); MPI_Allreduce(&AverageUpdate, &tempUpdate, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&total_pix, &tempTotPix, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&total_vox_mag, &vox_mag, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); AverageUpdate = tempUpdate / (tempTotPix); AverageUpdate = convert2Hounsfield(AverageUpdate); check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Average voxel update over all voxels is %f, total voxels is %f.\n", AverageUpdate, tempTotPix); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Zero count is %ld.\n", total_zero_count); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Variance parameter divisor is %f.\n", (Real_t) TomoInputsPtr->node_num * (Real_t) SinogramPtr->N_p * (Real_t) SinogramPtr->N_r * (Real_t) SinogramPtr->N_t); multifree(zero_count, 2); multifree(thread_num, 2); multifree(z_start, 2); multifree(z_stop, 2); free(send_reqs); free(recv_reqs); /* * multifree(offset_numerator,2); multifree(offset_denominator,2); */ avg_update_percentage = 100 * tempUpdate / vox_mag; check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Percentage average magnitude of voxel updates is %f.\n", avg_update_percentage); if (avg_update_percentage < TomoInputsPtr->StopThreshold) { check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Percentage average magnitude of voxel updates is less than convergence threshold.\n"); return (1); } return (0); } /* * ICD_BackProject calls the ICD optimization function repeatedly till the * stopping criteria is met. */ int ICD_BackProject(Sinogram * SinogramPtr, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr) { #ifndef NO_COST_CALCULATE Real_t cost, cost_0_iter, cost_last_iter, percentage_change_in_cost = 0; char costfile[100] = COST_FILENAME; #endif Real_arr_t ***ErrorSino, **H_r, *H_t; Real_t x, y; int32_t j, flag = 0, Iter, i, k; int dimTiff[4]; char VarEstFile[100] = VAR_PARAM_FILENAME; char scaled_error_file[100] = SCALED_ERROR_SINO_FILENAME; time_t start; char detect_file[100] = DETECTOR_RESPONSE_FILENAME; char projselect_file[100] = PROJ_SELECT_FILENAME; char MagUpdateMapFile[100] = MAG_UPDATE_FILENAME; uint8_t ***Mask; /* AMatrixCol *VoxelLineResponse; */ #ifdef POSITIVITY_CONSTRAINT check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Enforcing positivity constraint\n"); #endif Real_arr_t ****MagUpdateMap = (Real_arr_t ****) multialloc(sizeof(Real_arr_t), 4, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks, ScannedObjectPtr->N_y, ScannedObjectPtr->N_x); H_r = (Real_arr_t **) multialloc(sizeof(Real_arr_t), 2, SinogramPtr->N_p, DETECTOR_RESPONSE_BINS + 1); H_t = (Real_arr_t *) get_spc(DETECTOR_RESPONSE_BINS + 1, sizeof(Real_arr_t)); ErrorSino = (Real_arr_t ***) multialloc(sizeof(Real_arr_t), 3, SinogramPtr->N_p, SinogramPtr->N_r, SinogramPtr->N_t); Mask = (uint8_t ***) multialloc(sizeof(uint8_t), 3, ScannedObjectPtr->N_time, ScannedObjectPtr->N_y, ScannedObjectPtr->N_x); memset(&(MagUpdateMap[0][0][0][0]), 0, ScannedObjectPtr->N_time * TomoInputsPtr->num_z_blocks * ScannedObjectPtr->N_y * ScannedObjectPtr->N_x * sizeof(Real_arr_t)); /* omp_set_num_threads(TomoInputsPtr->num_threads); */ check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Number of CPU cores is %d\n", (int)omp_get_num_procs()); /* * check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, * "ICD_BackProject: Number of threads is %d\n", * TomoInputsPtr->num_threads) ; */ for (i = 0; i < ScannedObjectPtr->N_time; i++) for (j = 0; j < ScannedObjectPtr->N_y; j++) for (k = 0; k < ScannedObjectPtr->N_x; k++) { x = ScannedObjectPtr->x0 + ((Real_t) k + 0.5) * ScannedObjectPtr->delta_xy; y = ScannedObjectPtr->y0 + ((Real_t) j + 0.5) * ScannedObjectPtr->delta_xy; if (x * x + y * y < TomoInputsPtr->radius_obj * TomoInputsPtr->radius_obj) Mask[i][j][k] = 1; else Mask[i][j][k] = 0; } DetectorResponseProfile(H_r, H_t, SinogramPtr, ScannedObjectPtr, TomoInputsPtr); dimTiff[0] = 1; dimTiff[1] = 1; dimTiff[2] = SinogramPtr->N_p; dimTiff[3] = DETECTOR_RESPONSE_BINS + 1; sprintf(detect_file, "%s_n%d", detect_file, TomoInputsPtr->node_rank); if (TomoInputsPtr->Write2Tiff == 1) if (WriteMultiDimArray2Tiff(detect_file, dimTiff, 0, 1, 2, 3, &(H_r[0][0]), 0, TomoInputsPtr->debug_file_ptr)) goto error; start = time(NULL); if (initObject(SinogramPtr, ScannedObjectPtr, TomoInputsPtr, MagUpdateMap)) goto error; check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Time taken to read object = %fmins\n", difftime(time(NULL), start) / 60.0); if (initErrorSinogam(SinogramPtr, ScannedObjectPtr, TomoInputsPtr, H_r, ErrorSino /* , VoxelLineResponse */ )) goto error; check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Time taken to initialize object and compute error sinogram = %fmins\n", difftime(time(NULL), start) / 60.0); #ifndef NO_COST_CALCULATE cost = computeCost(SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino); cost_0_iter = cost; cost_last_iter = cost; check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "------------- Iteration 0, Cost = %f------------\n", cost); if (TomoInputsPtr->node_rank == 0) Write2Bin(costfile, 1, 1, 1, 1, sizeof(Real_t), &cost, TomoInputsPtr->debug_file_ptr); #endif /* Cost calculation endif */ start = time(NULL); for (Iter = 1; Iter <= TomoInputsPtr->NumIter; Iter++) { flag = updateVoxelsTimeSlices(SinogramPtr, ScannedObjectPtr, TomoInputsPtr, H_r, /* VoxelLineResponse, */ ErrorSino, Iter, MagUpdateMap, Mask); if (TomoInputsPtr->WritePerIter == 1) if (write_ObjectProjOff2TiffBinPerIter(SinogramPtr, ScannedObjectPtr, TomoInputsPtr)) goto error; #ifndef NO_COST_CALCULATE cost = computeCost(SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino); percentage_change_in_cost = ((cost - cost_last_iter) / (cost - cost_0_iter)) * 100.0; check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Percentage change in cost is %f.\n", percentage_change_in_cost); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Variance parameter estimate = %f.\n", TomoInputsPtr->var_est); check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "------------- Iteration = %d, Cost = %f, Time since start of ICD = %fmins ------------\n", Iter, cost, difftime(time(NULL), start) / 60.0); if (TomoInputsPtr->node_rank == 0) Append2Bin(costfile, 1, 1, 1, 1, sizeof(Real_t), &cost, TomoInputsPtr->debug_file_ptr); check_error(cost > cost_last_iter, TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Cost value increased.\n"); cost_last_iter = cost; /* * if (percentage_change_in_cost < TomoInputsPtr->cost_thresh && flag * != 0 && Iter > 1){ */ if (flag != 0 && Iter > 1) { check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Convergence criteria is met.\n"); break; } #else check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Variance parameter estimate = %f\n", TomoInputsPtr->var_est); check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "-------------ICD_BackProject: ICD Iter = %d, time since start of ICD = %fmins------------.\n", Iter, difftime(time(NULL), start) / 60.0); if (flag != 0 && Iter > 1) { check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Convergence criteria is met.\n"); break; } #endif flag = fflush(TomoInputsPtr->debug_file_ptr); if (flag != 0) check_warn(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Cannot flush buffer.\n"); } for (i = 0; i < SinogramPtr->N_p; i++) for (j = 0; j < SinogramPtr->N_r; j++) for (k = 0; k < SinogramPtr->N_t; k++) ErrorSino[i][j][k] *= sqrt(TomoInputsPtr->Weight[i][j][k]); if (TomoInputsPtr->node_rank == 0) Write2Bin(VarEstFile, 1, 1, 1, 1, sizeof(Real_t), &(TomoInputsPtr->var_est), TomoInputsPtr->debug_file_ptr); int32_t size = ScannedObjectPtr->N_time * TomoInputsPtr->num_z_blocks * ScannedObjectPtr->N_y * ScannedObjectPtr->N_x; if (write_SharedBinFile_At(MagUpdateMapFile, &(MagUpdateMap[0][0][0][0]), TomoInputsPtr->node_rank * size, size, TomoInputsPtr->debug_file_ptr)) goto error; sprintf(scaled_error_file, "%s_n%d", scaled_error_file, TomoInputsPtr->node_rank); sprintf(projselect_file, "%s_n%d", projselect_file, TomoInputsPtr->node_rank); dimTiff[0] = 1; dimTiff[1] = SinogramPtr->N_p; dimTiff[2] = SinogramPtr->N_r; dimTiff[3] = SinogramPtr->N_t; if (TomoInputsPtr->Write2Tiff == 1) { if (WriteMultiDimArray2Tiff(scaled_error_file, dimTiff, 0, 3, 1, 2, &(ErrorSino[0][0][0]), 0, TomoInputsPtr->debug_file_ptr)) goto error; if (WriteBoolArray2Tiff(projselect_file, dimTiff, 0, 3, 1, 2, &(SinogramPtr->ProjSelect[0][0][0]), 0, TomoInputsPtr->debug_file_ptr)) goto error; } multifree(ErrorSino, 3); multifree(H_r, 2); free(H_t); multifree(Mask, 3); multifree(MagUpdateMap, 4); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Finished running ICD_BackProject.\n"); flag = fflush(TomoInputsPtr->debug_file_ptr); if (flag != 0) check_warn(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Cannot flush buffer.\n"); check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "The estimated value of variance parameter is %f.\n", TomoInputsPtr->var_est); return (0); error: multifree(ErrorSino, 3); multifree(H_r, 2); free(H_t); multifree(Mask, 3); multifree(MagUpdateMap, 4); return (-1); }
#include "XT_Constants.h" #include <stdio.h> #include <math.h> #include <stdlib.h> #include "allocate.h" #include "randlib.h" #include <time.h> #include "XT_AMatrix.h" #include "XT_Profile.h" #include "XT_Structures.h" #include "XT_IOMisc.h" #include "XT_NHICD.h" #include "omp.h" #include "XT_MPI.h" #include <mpi.h> #include "XT_VoxUpdate.h" #include "XT_ForwardProject.h" #include "XT_MPIIO.h" #include "XT_Debug.h" #include "XT_OffsetError.h" /* computes the location of (i,j,k) th element in a 1D array */ int32_t array_loc_1D(int32_t i, int32_t j, int32_t k, int32_t N_j, int32_t N_k) { return (i * N_j * N_k + j * N_k + k); } /* * finds the maximum in a array 'array_in' with number of elements being * 'num' */ int32_t find_max(int32_t * array_in, int32_t num) { int32_t i, maxnum; maxnum = array_in[0]; for (i = 1; i < num; i++) if (array_in[i] > maxnum) maxnum = array_in[i]; return (maxnum); } /* converts the value 'val' to hounsfield units and returns it */ Real_t convert2Hounsfield(Real_t val) { Real_t slope, c; slope = (HOUNSFIELD_WATER_MAP - HOUNSFIELD_AIR_MAP) / (WATER_MASS_ATT_COEFF * WATER_DENSITY - AIR_MASS_ATT_COEFF * AIR_DENSITY) / HFIELD_UNIT_CONV_CONST; c = -slope * (AIR_MASS_ATT_COEFF * AIR_DENSITY * HFIELD_UNIT_CONV_CONST); return (slope * val + c); } /* * Computes the qGGMRF spatial prior cost value at delta = x_i - x_j. i & j * being the voxel and its neighbor */ Real_t CE_QGGMRF_Spatial_Value(Real_t delta, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr) { return ((pow(fabs(delta), MRF_Q) / TomoInputsPtr->Sigma_S_Q) / (ScannedObjectPtr->C_S + pow(fabs(delta), MRF_Q - MRF_P) / TomoInputsPtr->Sigma_S_Q_P)); } /* * Computes the qGGMRF temporal prior cost value at delta = x_i - x_j. i & j * being the voxel and its neighbor */ Real_t CE_QGGMRF_Temporal_Value(Real_t delta, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr) { return ((pow(fabs(delta), MRF_Q) / TomoInputsPtr->Sigma_T_Q) / (ScannedObjectPtr->C_T + pow(fabs(delta), MRF_Q - MRF_P) / TomoInputsPtr->Sigma_T_Q_P)); } /* * Computes the qGGMRF spatial prior derivative at delta = x_i - x_j. i & j * being the voxel and its neighbor */ Real_t CE_QGGMRF_Spatial_Derivative(Real_t delta, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr) { Real_t temp1, temp2, temp3; temp1 = pow(fabs(delta), MRF_Q - MRF_P) / (TomoInputsPtr->Sigma_S_Q_P); temp2 = pow(fabs(delta), MRF_Q - 1); temp3 = ScannedObjectPtr->C_S + temp1; if (delta < 0) return ((-1 * temp2 / (temp3 * TomoInputsPtr->Sigma_S_Q)) * (MRF_Q - ((MRF_Q - MRF_P) * temp1) / (temp3))); else { return ((temp2 / (temp3 * TomoInputsPtr->Sigma_S_Q)) * (MRF_Q - ((MRF_Q - MRF_P) * temp1) / (temp3))); } } /* * Computes the qGGMRF temporal prior derivative at delta = x_i - x_j. i & j * being the voxel and its neighbor */ Real_t CE_QGGMRF_Temporal_Derivative(Real_t delta, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr) { Real_t temp1, temp2, temp3; temp1 = pow(fabs(delta), MRF_Q - MRF_P) / (TomoInputsPtr->Sigma_T_Q_P); temp2 = pow(fabs(delta), MRF_Q - 1); temp3 = ScannedObjectPtr->C_T + temp1; if (delta < 0) return ((-1 * temp2 / (temp3 * TomoInputsPtr->Sigma_T_Q)) * (MRF_Q - ((MRF_Q - MRF_P) * temp1) / (temp3))); else { return ((temp2 / (temp3 * TomoInputsPtr->Sigma_T_Q)) * (MRF_Q - ((MRF_Q - MRF_P) * temp1) / (temp3))); } } /* Computes the qGGMRF spatial prior second derivative at delta = 0 */ Real_t CE_QGGMRF_Spatial_SecondDerivative(ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr) { return MRF_Q / (TomoInputsPtr->Sigma_S_Q * ScannedObjectPtr->C_S); } /* Computes the qGGMRF spatial prior second derivative at delta = 0 */ Real_t CE_QGGMRF_Temporal_SecondDerivative(ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr) { return MRF_Q / (TomoInputsPtr->Sigma_T_Q * ScannedObjectPtr->C_T); } /* * Computes the voxel update and returns it. V is the present value of voxel. * THETA1 and THETA2 are the values used in voxel update. Spatial_Nhood and * Time_Nhood gives the values of voxels in the neighborhood of V. * Time_BDFlag and Spatial_BDFlag are masks which determine whether a * neighbor should be included in the neighorhood or not. */ Real_t CE_FunctionalSubstitution(Real_t V, Real_t THETA1, Real_t THETA2, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr, Real_t Spatial_Nhood[NHOOD_Y_MAXDIM][NHOOD_X_MAXDIM][NHOOD_Z_MAXDIM], Real_t Time_Nhood[NHOOD_TIME_MAXDIM - 1], bool Spatial_BDFlag[NHOOD_Y_MAXDIM][NHOOD_X_MAXDIM][NHOOD_Z_MAXDIM], bool Time_BDFlag[NHOOD_TIME_MAXDIM - 1]) { Real_t u, temp1 = 0, temp2 = 0, temp_const, RefValue = 0, Delta0; Real_t QGGMRF_Params; int32_t i, j, k; RefValue = V; /* Need to Loop this for multiple iterations of substitute function */ for (i = 0; i < NHOOD_Y_MAXDIM; i++) for (j = 0; j < NHOOD_X_MAXDIM; j++) for (k = 0; k < NHOOD_Z_MAXDIM; k++) { if (Spatial_BDFlag[i][j][k] == true && (i != (NHOOD_Y_MAXDIM - 1) / 2 || j != (NHOOD_X_MAXDIM - 1) / 2 || k != (NHOOD_Z_MAXDIM - 1) / 2)) { Delta0 = (RefValue - Spatial_Nhood[i][j][k]); if (Delta0 != 0) QGGMRF_Params = CE_QGGMRF_Spatial_Derivative(Delta0, ScannedObjectPtr, TomoInputsPtr) / (Delta0); else { QGGMRF_Params = CE_QGGMRF_Spatial_SecondDerivative(ScannedObjectPtr, TomoInputsPtr); } temp_const = TomoInputsPtr->Spatial_Filter[i][j][k] * QGGMRF_Params; temp1 += temp_const * Spatial_Nhood[i][j][k]; temp2 += temp_const; } } for (i = 0; i < NHOOD_TIME_MAXDIM - 1; i++) { if (Time_BDFlag[i] == true) { Delta0 = (RefValue - Time_Nhood[i]); if (Delta0 != 0) QGGMRF_Params = CE_QGGMRF_Temporal_Derivative(Delta0, ScannedObjectPtr, TomoInputsPtr) / (Delta0); else { QGGMRF_Params = CE_QGGMRF_Temporal_SecondDerivative(ScannedObjectPtr, TomoInputsPtr); } temp_const = TomoInputsPtr->Time_Filter[0] * QGGMRF_Params; temp1 += temp_const * Time_Nhood[i]; temp2 += temp_const; } } u = (temp1 + (THETA2 * V) - THETA1) / (temp2 + THETA2); RefValue = RefValue + TomoInputsPtr->alpha * (u - RefValue); #ifdef POSITIVITY_CONSTRAINT if (RefValue <= 0) RefValue = 0; #endif return RefValue; } /* computes the value of cost function. 'ErrorSino' is the error sinogram */ Real_t computeCost(Sinogram * SinogramPtr, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr, Real_arr_t *** ErrorSino) { Real_t cost = 0, temp = 0, forward = 0, prior = 0; Real_t delta; int32_t i, j, k, p, N_z; bool j_minus, k_minus, i_plus, j_plus, k_plus, p_plus; #pragma omp parallel for private(j, k, temp) reduction(+:cost) for (i = 0; i < SinogramPtr->N_p; i++) for (j = 0; j < SinogramPtr->N_r; j++) for (k = 0; k < SinogramPtr->N_t; k++) { temp = ErrorSino[i][j][k] * sqrt(TomoInputsPtr->Weight[i][j][k]); if (SinogramPtr->ProjSelect[i][j][k] == true) temp = temp * temp; else temp = 2.0 * TomoInputsPtr->ErrorSinoDelta * TomoInputsPtr->ErrorSinoThresh * fabs(temp) + TomoInputsPtr->ErrorSinoThresh * TomoInputsPtr->ErrorSinoThresh * (1.0 - 2.0 * TomoInputsPtr->ErrorSinoDelta); cost += temp; } cost /= 2.0; /* * When computing the cost of the prior term it is important to make sure * that you don't include the cost of any pair of neighbors more than * once. In this code, a certain sense of causality is used to compute * the cost. We also assume that the weghting kernel given by 'Filter' is * symmetric. Let i, j and k correspond to the three dimensions. If we go * forward to i+1, then all neighbors at j-1, j, j+1, k+1, k, k-1 are to * be considered. However, if for the same i, if we go forward to j+1, * then all k-1, k, and k+1 should be considered. For same i and j, only * the neighbor at k+1 is considred. */ temp = 0; N_z = ScannedObjectPtr->N_z + 2; if (TomoInputsPtr->node_rank == TomoInputsPtr->node_num - 1) N_z = ScannedObjectPtr->N_z + 1; #pragma omp parallel for private(delta, p, j, k, j_minus, k_minus, p_plus, i_plus, j_plus, k_plus) reduction(+:temp) for (i = 0; i < ScannedObjectPtr->N_time; i++) for (p = 1; p < ScannedObjectPtr->N_z + 1; p++) for (j = 0; j < ScannedObjectPtr->N_y; j++) { for (k = 0; k < ScannedObjectPtr->N_x; k++) { j_minus = (j - 1 >= 0) ? true : false; k_minus = (k - 1 >= 0) ? true : false; p_plus = (p + 1 < N_z) ? true : false; i_plus = (i + 1 < ScannedObjectPtr->N_time) ? true : false; j_plus = (j + 1 < ScannedObjectPtr->N_y) ? true : false; k_plus = (k + 1 < ScannedObjectPtr->N_x) ? true : false; if (k_plus == true) { delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j][k + 1]); temp += TomoInputsPtr->Spatial_Filter[1][1][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } if (j_plus == true) { if (k_minus == true) { delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j + 1][k - 1]); temp += TomoInputsPtr->Spatial_Filter[1][2][0] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j + 1][k]); temp += TomoInputsPtr->Spatial_Filter[1][2][1] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); if (k_plus == true) { delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p][j + 1][k + 1]); temp += TomoInputsPtr->Spatial_Filter[1][2][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } } if (p_plus == true) { if (j_minus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j - 1][k]; temp += TomoInputsPtr->Spatial_Filter[2][0][1] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j][k]; temp += TomoInputsPtr->Spatial_Filter[2][1][1] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); if (j_plus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j + 1][k]; temp += TomoInputsPtr->Spatial_Filter[2][2][1] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } if (j_minus == true) { if (k_minus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j - 1][k - 1]; temp += TomoInputsPtr->Spatial_Filter[2][0][0] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } if (k_plus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j - 1][k + 1]; temp += TomoInputsPtr->Spatial_Filter[2][0][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } } if (k_minus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j][k - 1]; temp += TomoInputsPtr->Spatial_Filter[2][1][0] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } if (j_plus == true) { if (k_minus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j + 1][k - 1]; temp += TomoInputsPtr->Spatial_Filter[2][2][0] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } if (k_plus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j + 1][k + 1]; temp += TomoInputsPtr->Spatial_Filter[2][2][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } } if (k_plus == true) { delta = ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i][p + 1][j][k + 1]; temp += TomoInputsPtr->Spatial_Filter[2][1][2] * CE_QGGMRF_Spatial_Value(delta, ScannedObjectPtr, TomoInputsPtr); } } if (i_plus == true) { delta = (ScannedObjectPtr->Object[i][p][j][k] - ScannedObjectPtr->Object[i + 1][p][j][k]); temp += TomoInputsPtr->Time_Filter[0] * CE_QGGMRF_Temporal_Value(delta, ScannedObjectPtr, TomoInputsPtr); } } } /* * Use MPI reduction operation to add the forward and prior costs from * all nodes */ MPI_Reduce(&cost, &forward, 1, MPI_REAL_DATATYPE, MPI_SUM, 0, MPI_COMM_WORLD); MPI_Reduce(&temp, &prior, 1, MPI_REAL_DATATYPE, MPI_SUM, 0, MPI_COMM_WORLD); if (TomoInputsPtr->node_rank == 0) { check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Scaled error sino cost = %f\n", forward); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Decrease in scaled error sino cost = %f\n", TomoInputsPtr->ErrorSino_Cost - forward); TomoInputsPtr->ErrorSino_Cost = forward; forward += (Real_t) TomoInputsPtr->node_num * (Real_t) SinogramPtr->N_p * (Real_t) SinogramPtr->N_r * (Real_t) SinogramPtr->N_t * log(TomoInputsPtr->var_est) / 2; check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Forward cost = %f\n", forward); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Prior cost = %f\n", prior); TomoInputsPtr->Forward_Cost = forward; TomoInputsPtr->Prior_Cost = prior; cost = forward + prior; } /* Broadcase the value of cost to all nodes */ MPI_Bcast(&cost, 1, MPI_REAL_DATATYPE, 0, MPI_COMM_WORLD); return cost; } /* * Upsamples the (N_time x N_z x N_y x N_x) size 'Init' by a factor of 2 * along the x-y plane and stores it in 'Object' */ void upsample_bilinear_2D(Real_arr_t **** Object, Real_arr_t **** Init, int32_t N_time, int32_t N_z, int32_t N_y, int32_t N_x) { int32_t i, j, k, m; Real_arr_t **buffer; #pragma omp parallel for private(buffer, m, j, k) for (i = 0; i < N_time; i++) for (m = 0; m < N_z; m++) { buffer = (Real_arr_t **) multialloc(sizeof(Real_arr_t), 2, N_y, 2 * N_x); for (j = 0; j < N_y; j++) { buffer[j][0] = Init[i][m][j][0]; buffer[j][1] = (3.0 * Init[i][m][j][0] + Init[i][m][j][1]) / 4.0; buffer[j][2 * N_x - 1] = Init[i][m][j][N_x - 1]; buffer[j][2 * N_x - 2] = (Init[i][m][j][N_x - 2] + 3.0 * Init[i][m][j][N_x - 1]) / 4.0; for (k = 1; k < N_x - 1; k++) { buffer[j][2 * k] = (Init[i][m][j][k - 1] + 3.0 * Init[i][m][j][k]) / 4.0; buffer[j][2 * k + 1] = (3.0 * Init[i][m][j][k] + Init[i][m][j][k + 1]) / 4.0; } } for (k = 0; k < 2 * N_x; k++) { Object[i][m][0][k] = buffer[0][k]; Object[i][m][1][k] = (3.0 * buffer[0][k] + buffer[1][k]) / 4.0; Object[i][m][2 * N_y - 1][k] = buffer[N_y - 1][k]; Object[i][m][2 * N_y - 2][k] = (buffer[N_y - 2][k] + 3.0 * buffer[N_y - 1][k]) / 4.0; } for (j = 1; j < N_y - 1; j++) { for (k = 0; k < 2 * N_x; k++) { Object[i][m][2 * j][k] = (buffer[j - 1][k] + 3.0 * buffer[j][k]) / 4.0; Object[i][m][2 * j + 1][k] = (3 * buffer[j][k] + buffer[j + 1][k]) / 4.0; } } multifree(buffer, 2); } } /* * Upsamples the (N_z x N_y x N_x) size 'Init' by a factor of 2 along the x-y * plane and stores it in 'Object' */ void upsample_object_bilinear_2D(Real_arr_t *** Object, Real_arr_t *** Init, int32_t N_z, int32_t N_y, int32_t N_x) { int32_t j, k, slice; Real_arr_t **buffer; buffer = (Real_arr_t **) multialloc(sizeof(Real_arr_t), 2, N_y, 2 * N_x); for (slice = 0; slice < N_z; slice++) { for (j = 0; j < N_y; j++) { buffer[j][0] = Init[slice][j][0]; buffer[j][1] = (3.0 * Init[slice][j][0] + Init[slice][j][1]) / 4.0; buffer[j][2 * N_x - 1] = Init[slice][j][N_x - 1]; buffer[j][2 * N_x - 2] = (Init[slice][j][N_x - 2] + 3.0 * Init[slice][j][N_x - 1]) / 4.0; for (k = 1; k < N_x - 1; k++) { buffer[j][2 * k] = (Init[slice][j][k - 1] + 3.0 * Init[slice][j][k]) / 4.0; buffer[j][2 * k + 1] = (3.0 * Init[slice][j][k] + Init[slice][j][k + 1]) / 4.0; } } for (k = 0; k < 2 * N_x; k++) { Object[slice + 1][0][k] = buffer[0][k]; Object[slice + 1][1][k] = (3.0 * buffer[0][k] + buffer[1][k]) / 4.0; Object[slice + 1][2 * N_y - 1][k] = buffer[N_y - 1][k]; Object[slice + 1][2 * N_y - 2][k] = (buffer[N_y - 2][k] + 3.0 * buffer[N_y - 1][k]) / 4.0; } for (j = 1; j < N_y - 1; j++) { for (k = 0; k < 2 * N_x; k++) { Object[slice + 1][2 * j][k] = (buffer[j - 1][k] + 3.0 * buffer[j][k]) / 4.0; Object[slice + 1][2 * j + 1][k] = (3 * buffer[j][k] + buffer[j + 1][k]) / 4.0; } } } multifree(buffer, 2); } void upsample_bilinear_3D(Real_arr_t **** Object, Real_arr_t **** Init, int32_t N_time, int32_t N_z, int32_t N_y, int32_t N_x) { int32_t i, j, k, slice; Real_t ***buffer2D, ***buffer3D; #pragma omp parallel for private(buffer2D, buffer3D, slice, j, k) for (i = 0; i < N_time; i++) { buffer2D = (Real_t ***) multialloc(sizeof(Real_t), 3, N_z, N_y, 2 * N_x); buffer3D = (Real_t ***) multialloc(sizeof(Real_t), 3, N_z, 2 * N_y, 2 * N_x); for (slice = 0; slice < N_z; slice++) { for (j = 0; j < N_y; j++) { buffer2D[slice][j][0] = Init[i][slice][j][0]; buffer2D[slice][j][1] = (3.0 * Init[i][slice][j][0] + Init[i][slice][j][1]) / 4.0; buffer2D[slice][j][2 * N_x - 1] = Init[i][slice][j][N_x - 1]; buffer2D[slice][j][2 * N_x - 2] = (Init[i][slice][j][N_x - 2] + 3.0 * Init[i][slice][j][N_x - 1]) / 4.0; for (k = 1; k < N_x - 1; k++) { buffer2D[slice][j][2 * k] = (Init[i][slice][j][k - 1] + 3.0 * Init[i][slice][j][k]) / 4.0; buffer2D[slice][j][2 * k + 1] = (3.0 * Init[i][slice][j][k] + Init[i][slice][j][k + 1]) / 4.0; } } for (k = 0; k < 2 * N_x; k++) { buffer3D[slice][0][k] = buffer2D[slice][0][k]; buffer3D[slice][1][k] = (3.0 * buffer2D[slice][0][k] + buffer2D[slice][1][k]) / 4.0; buffer3D[slice][2 * N_y - 1][k] = buffer2D[slice][N_y - 1][k]; buffer3D[slice][2 * N_y - 2][k] = (buffer2D[slice][N_y - 2][k] + 3.0 * buffer2D[slice][N_y - 1][k]) / 4.0; } for (j = 1; j < N_y - 1; j++) for (k = 0; k < 2 * N_x; k++) { buffer3D[slice][2 * j][k] = (buffer2D[slice][j - 1][k] + 3.0 * buffer2D[slice][j][k]) / 4.0; buffer3D[slice][2 * j + 1][k] = (3 * buffer2D[slice][j][k] + buffer2D[slice][j + 1][k]) / 4.0; } } for (j = 0; j < 2 * N_y; j++) for (k = 0; k < 2 * N_x; k++) { Object[i][0][j][k] = buffer3D[0][j][k]; Object[i][1][j][k] = (3.0 * buffer3D[0][j][k] + buffer3D[1][j][k]) / 4.0; Object[i][2 * N_z - 1][j][k] = buffer3D[N_z - 1][j][k]; Object[i][2 * N_z - 2][j][k] = (3.0 * buffer3D[N_z - 1][j][k] + buffer3D[N_z - 2][j][k]) / 4.0; } for (slice = 1; slice < N_z - 1; slice++) for (j = 0; j < 2 * N_y; j++) for (k = 0; k < 2 * N_x; k++) { Object[i][2 * slice][j][k] = (buffer3D[slice - 1][j][k] + 3.0 * buffer3D[slice][j][k]) / 4.0; Object[i][2 * slice + 1][j][k] = (3.0 * buffer3D[slice][j][k] + buffer3D[slice + 1][j][k]) / 4.0; } multifree(buffer2D, 3); multifree(buffer3D, 3); } } /* * 'InitObject' intializes the Object to be reconstructed to either 0 or an * interpolated version of the previous reconstruction. It is used in multi * resolution reconstruction in which after every coarse resolution * reconstruction the object should be intialized with an interpolated * version of the reconstruction following which the object will be * reconstructed at a finer resolution. */ /* * Upsamples the (N_time x N_z x N_y x N_x) size 'Init' by a factor of 2 * along the in 3D x-y-z coordinates and stores it in 'Object' */ void upsample_object_bilinear_3D(Real_arr_t *** Object, Real_arr_t *** Init, int32_t N_z, int32_t N_y, int32_t N_x) { int32_t j, k, slice; Real_t ***buffer2D, ***buffer3D; buffer2D = (Real_t ***) multialloc(sizeof(Real_t), 3, N_z, N_y, 2 * N_x); buffer3D = (Real_t ***) multialloc(sizeof(Real_t), 3, N_z, 2 * N_y, 2 * N_x); for (slice = 0; slice < N_z; slice++) { for (j = 0; j < N_y; j++) { buffer2D[slice][j][0] = Init[slice][j][0]; buffer2D[slice][j][1] = (3.0 * Init[slice][j][0] + Init[slice][j][1]) / 4.0; buffer2D[slice][j][2 * N_x - 1] = Init[slice][j][N_x - 1]; buffer2D[slice][j][2 * N_x - 2] = (Init[slice][j][N_x - 2] + 3.0 * Init[slice][j][N_x - 1]) / 4.0; for (k = 1; k < N_x - 1; k++) { buffer2D[slice][j][2 * k] = (Init[slice][j][k - 1] + 3.0 * Init[slice][j][k]) / 4.0; buffer2D[slice][j][2 * k + 1] = (3.0 * Init[slice][j][k] + Init[slice][j][k + 1]) / 4.0; } } for (k = 0; k < 2 * N_x; k++) { buffer3D[slice][0][k] = buffer2D[slice][0][k]; buffer3D[slice][1][k] = (3.0 * buffer2D[slice][0][k] + buffer2D[slice][1][k]) / 4.0; buffer3D[slice][2 * N_y - 1][k] = buffer2D[slice][N_y - 1][k]; buffer3D[slice][2 * N_y - 2][k] = (buffer2D[slice][N_y - 2][k] + 3.0 * buffer2D[slice][N_y - 1][k]) / 4.0; } for (j = 1; j < N_y - 1; j++) for (k = 0; k < 2 * N_x; k++) { buffer3D[slice][2 * j][k] = (buffer2D[slice][j - 1][k] + 3.0 * buffer2D[slice][j][k]) / 4.0; buffer3D[slice][2 * j + 1][k] = (3 * buffer2D[slice][j][k] + buffer2D[slice][j + 1][k]) / 4.0; } } for (j = 0; j < 2 * N_y; j++) for (k = 0; k < 2 * N_x; k++) { Object[1][j][k] = buffer3D[0][j][k]; Object[2][j][k] = (3.0 * buffer3D[0][j][k] + buffer3D[1][j][k]) / 4.0; Object[2 * N_z][j][k] = buffer3D[N_z - 1][j][k]; Object[2 * N_z - 1][j][k] = (3.0 * buffer3D[N_z - 1][j][k] + buffer3D[N_z - 2][j][k]) / 4.0; } for (slice = 1; slice < N_z - 1; slice++) for (j = 0; j < 2 * N_y; j++) for (k = 0; k < 2 * N_x; k++) { Object[2 * slice + 1][j][k] = (buffer3D[slice - 1][j][k] + 3.0 * buffer3D[slice][j][k]) / 4.0; Object[2 * slice + 2][j][k] = (3.0 * buffer3D[slice][j][k] + buffer3D[slice + 1][j][k]) / 4.0; } multifree(buffer2D, 3); multifree(buffer3D, 3); } /* * randomly select the voxels lines which need to be updated along the x-y * plane for each z-block and time slice */ void randomly_select_x_y(ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr, uint8_t *** Mask) { int32_t i, j, num, n, Index, col, row, *Counter, ArraySize, block; ArraySize = ScannedObjectPtr->N_y * ScannedObjectPtr->N_x; Counter = (int32_t *) get_spc(ArraySize, sizeof(int32_t)); for (i = 0; i < ScannedObjectPtr->N_time; i++) for (block = 0; block < TomoInputsPtr->num_z_blocks; block++) { ArraySize = ScannedObjectPtr->N_y * ScannedObjectPtr->N_x; for (Index = 0; Index < ArraySize; Index++) Counter[Index] = Index; TomoInputsPtr->UpdateSelectNum[i][block] = 0; for (j = 0; j < ScannedObjectPtr->N_x * ScannedObjectPtr->N_y; j++) { Index = floor(random2() * ArraySize); Index = (Index == ArraySize) ? ArraySize - 1 : Index; col = Counter[Index] % ScannedObjectPtr->N_x; row = Counter[Index] / ScannedObjectPtr->N_x; for (n = block * (ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks); n < (block + 1) * (ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks); n++) if (Mask[i][row][col] == 1) { num = TomoInputsPtr->UpdateSelectNum[i][block]; TomoInputsPtr->x_rand_select[i][block][num] = col; TomoInputsPtr->y_rand_select[i][block][num] = row; (TomoInputsPtr->UpdateSelectNum[i][block])++; break; } Counter[Index] = Counter[ArraySize - 1]; ArraySize--; } } free(Counter); } /* * 'InitObject' intializes the Object to be reconstructed to either 0 or an * interpolated version of the previous reconstruction. It is used in multi * resolution reconstruction in which after every coarse resolution * reconstruction the object should be intialized with an interpolated * version of the reconstruction following which the object will be * reconstructed at a finer resolution. --initICD-- If 1, initializes the * object to 0 If 2, the code uses bilinear interpolation to initialize the * object if the previous reconstruction was at a lower resolution The * function also initializes the magnitude update map 'MagUpdateMap' from the * previous coarser resolution reconstruction. */ int32_t initObject(Sinogram * SinogramPtr, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr, Real_arr_t **** MagUpdateMap) { char object_file[100]; int dimTiff[4]; int32_t i, j, k, l, size, flag = 0; Real_arr_t ***Init, ****UpMapInit; for (i = 0; i < ScannedObjectPtr->N_time; i++) for (j = 0; j < ScannedObjectPtr->N_z; j++) for (k = 0; k < ScannedObjectPtr->N_y; k++) for (l = 0; l < ScannedObjectPtr->N_x; l++) ScannedObjectPtr->Object[i][j + 1][k][l] = OBJECT_INIT_VAL; if (TomoInputsPtr->initICD > 3 || TomoInputsPtr->initICD < 0) { sentinel(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "ERROR: initICD value not recognized.\n"); } else if (TomoInputsPtr->initICD == 1) { size = ScannedObjectPtr->N_z * ScannedObjectPtr->N_y * ScannedObjectPtr->N_x; for (i = 0; i < ScannedObjectPtr->N_time; i++) { sprintf(object_file, "%s_time_%d", OBJECT_FILENAME, i); if (read_SharedBinFile_At(object_file, &(ScannedObjectPtr->Object[i][1][0][0]), TomoInputsPtr->node_rank * size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; } if (TomoInputsPtr->initMagUpMap == 1) { size = ScannedObjectPtr->N_time * TomoInputsPtr->num_z_blocks * ScannedObjectPtr->N_y * ScannedObjectPtr->N_x; if (read_SharedBinFile_At(MAG_UPDATE_FILENAME, &(MagUpdateMap[0][0][0][0]), TomoInputsPtr->node_rank * size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; } } else if (TomoInputsPtr->initICD == 2 || TomoInputsPtr->initICD == 3) { if (TomoInputsPtr->initICD == 3) { Init = (Real_arr_t ***) multialloc(sizeof(Real_arr_t), 3, ScannedObjectPtr->N_z / 2, ScannedObjectPtr->N_y / 2, ScannedObjectPtr->N_x / 2); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Interpolating object using 3D bilinear interpolation.\n"); for (i = 0; i < ScannedObjectPtr->N_time; i++) { sprintf(object_file, "%s_time_%d", OBJECT_FILENAME, i); size = ScannedObjectPtr->N_z * ScannedObjectPtr->N_y * ScannedObjectPtr->N_x / 8; if (read_SharedBinFile_At(object_file, &(Init[0][0][0]), TomoInputsPtr->node_rank * size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; upsample_object_bilinear_3D(ScannedObjectPtr->Object[i], Init, ScannedObjectPtr->N_z / 2, ScannedObjectPtr->N_y / 2, ScannedObjectPtr->N_x / 2); } multifree(Init, 3); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Done with interpolating object using 3D bilinear interpolation.\n"); } else { Init = (Real_arr_t ***) multialloc(sizeof(Real_arr_t), 3, ScannedObjectPtr->N_z, ScannedObjectPtr->N_y / 2, ScannedObjectPtr->N_x / 2); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Interpolating object using 2D bilinear interpolation.\n"); for (i = 0; i < ScannedObjectPtr->N_time; i++) { sprintf(object_file, "%s_time_%d", OBJECT_FILENAME, i); size = ScannedObjectPtr->N_z * ScannedObjectPtr->N_y * ScannedObjectPtr->N_x / 4; if (read_SharedBinFile_At(object_file, &(Init[0][0][0]), TomoInputsPtr->node_rank * size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; upsample_object_bilinear_2D(ScannedObjectPtr->Object[i], Init, ScannedObjectPtr->N_z, ScannedObjectPtr->N_y / 2, ScannedObjectPtr->N_x / 2); } multifree(Init, 3); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Done with interpolating object using 2D bilinear interpolation.\n"); } if (TomoInputsPtr->initMagUpMap == 1) { if (TomoInputsPtr->prevnum_z_blocks == TomoInputsPtr->num_z_blocks) { UpMapInit = (Real_arr_t ****) multialloc(sizeof(Real_arr_t), 4, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks, ScannedObjectPtr->N_y / 2, ScannedObjectPtr->N_x / 2); size = ScannedObjectPtr->N_time * TomoInputsPtr->num_z_blocks * ScannedObjectPtr->N_y * ScannedObjectPtr->N_x / 4; if (read_SharedBinFile_At(MAG_UPDATE_FILENAME, &(UpMapInit[0][0][0][0]), TomoInputsPtr->node_rank * size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Interpolating magnitude update map using 2D bilinear interpolation.\n"); upsample_bilinear_2D(MagUpdateMap, UpMapInit, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks, ScannedObjectPtr->N_y / 2, ScannedObjectPtr->N_x / 2); multifree(UpMapInit, 4); } else if (TomoInputsPtr->prevnum_z_blocks == TomoInputsPtr->num_z_blocks / 2) { UpMapInit = (Real_arr_t ****) multialloc(sizeof(Real_arr_t), 4, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks / 2, ScannedObjectPtr->N_y / 2, ScannedObjectPtr->N_x / 2); size = ScannedObjectPtr->N_time * TomoInputsPtr->num_z_blocks * ScannedObjectPtr->N_y * ScannedObjectPtr->N_x / 8; if (read_SharedBinFile_At(MAG_UPDATE_FILENAME, &(UpMapInit[0][0][0][0]), TomoInputsPtr->node_rank * size, size, TomoInputsPtr->debug_file_ptr)) flag = -1; check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Interpolating magnitude update map using 3D bilinear interpolation.\n"); upsample_bilinear_3D(MagUpdateMap, UpMapInit, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks / 2, ScannedObjectPtr->N_y / 2, ScannedObjectPtr->N_x / 2); multifree(UpMapInit, 4); } else { check_warn(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Number of axial blocks is incompatible with previous stage of multi-resolution.\n"); check_warn(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Initializing the multi-resolution map to zeros.\n"); } } } dimTiff[0] = ScannedObjectPtr->N_time; dimTiff[1] = TomoInputsPtr->num_z_blocks; dimTiff[2] = ScannedObjectPtr->N_y; dimTiff[3] = ScannedObjectPtr->N_x; sprintf(object_file, "%s_n%d", MAG_UPDATE_FILENAME, TomoInputsPtr->node_rank); if (TomoInputsPtr->Write2Tiff == 1) if (WriteMultiDimArray2Tiff(object_file, dimTiff, 0, 1, 2, 3, &(MagUpdateMap[0][0][0][0]), 0, TomoInputsPtr->debug_file_ptr)) flag = -1; for (i = 0; i < ScannedObjectPtr->N_time; i++) { sprintf(object_file, "%s_n%d", INIT_OBJECT_FILENAME, TomoInputsPtr->node_rank); sprintf(object_file, "%s_time_%d", object_file, i); dimTiff[0] = 1; dimTiff[1] = ScannedObjectPtr->N_z; dimTiff[2] = ScannedObjectPtr->N_y; dimTiff[3] = ScannedObjectPtr->N_x; if (TomoInputsPtr->Write2Tiff == 1) if (WriteMultiDimArray2Tiff(object_file, dimTiff, 0, 1, 2, 3, &(ScannedObjectPtr->Object[i][1][0][0]), 0, TomoInputsPtr->debug_file_ptr)) flag = -1; } return (flag); error: return (-1); } /* * 'initErrorSinogram' is used to initialize the error sinogram before start * of ICD. It computes e = y - Ax - d. Ax is computed by forward projecting * the obkject x. */ int32_t initErrorSinogam(Sinogram * SinogramPtr, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr, Real_arr_t ** DetectorResponse, Real_arr_t *** ErrorSino /* , AMatrixCol* VoxelLineResponse */ ) { Real_t pixel, avg = 0; int32_t dimTiff[4], i, j, k, p, sino_idx, slice, flag = 0; AMatrixCol *AMatrixPtr = (AMatrixCol *) get_spc(ScannedObjectPtr->N_time, sizeof(AMatrixCol)); uint8_t AvgNumXElements = (uint8_t) ceil(3 * ScannedObjectPtr->delta_xy / SinogramPtr->delta_r); char error_file[100] = "error_sinogram"; sprintf(error_file, "%s_n%d", error_file, TomoInputsPtr->node_rank); for (i = 0; i < ScannedObjectPtr->N_time; i++) { AMatrixPtr[i].values = (Real_t *) get_spc(AvgNumXElements, sizeof(Real_t)); AMatrixPtr[i].index = (int32_t *) get_spc(AvgNumXElements, sizeof(int32_t)); } memset(&(ErrorSino[0][0][0]), 0, SinogramPtr->N_p * SinogramPtr->N_t * SinogramPtr->N_r * sizeof(Real_arr_t)); #pragma omp parallel for private(j, k, p, sino_idx, slice, pixel) for (i = 0; i < ScannedObjectPtr->N_time; i++) { for (j = 0; j < ScannedObjectPtr->N_y; j++) { for (k = 0; k < ScannedObjectPtr->N_x; k++) { for (p = 0; p < ScannedObjectPtr->ProjNum[i]; p++) { sino_idx = ScannedObjectPtr->ProjIdxPtr[i][p]; calcAMatrixColumnforAngle(SinogramPtr, ScannedObjectPtr, DetectorResponse, &(AMatrixPtr[i]), j, k, sino_idx); for (slice = 0; slice < ScannedObjectPtr->N_z; slice++) { /* * printf("count = %d, idx = %d, val = %f\n", * VoxelLineResponse[slice].count, * VoxelLineResponse[slice].index[0], * VoxelLineResponse[slice].values[0]); */ pixel = ScannedObjectPtr->Object[i][slice + 1][j][k]; /* slice+1 to account * for extra z slices * required for MPI */ forward_project_voxel(SinogramPtr, pixel, ErrorSino, &(AMatrixPtr[i]) /* , * &(VoxelLineResponse[sl ice]) */ , sino_idx, slice); } } } } } #pragma omp parallel for private(j, k) reduction(+:avg) for (i = 0; i < SinogramPtr->N_p; i++) for (j = 0; j < SinogramPtr->N_r; j++) for (k = 0; k < SinogramPtr->N_t; k++) { ErrorSino[i][j][k] = SinogramPtr->Projection[i][j][k] - ErrorSino[i][j][k] - SinogramPtr->ProjOffset[j][k]; if (fabs(ErrorSino[i][j][k] * sqrt(TomoInputsPtr->Weight[i][j][k])) < TomoInputsPtr->ErrorSinoThresh) SinogramPtr->ProjSelect[i][j][k] = true; else SinogramPtr->ProjSelect[i][j][k] = false; /* * if * (ErrorSino[i][j][k]*sqrt(TomoInputsPtr->Weight[i][j][k]) < * -30) TomoInputsPtr->Weight[i][j][k] = 0; */ avg += ErrorSino[i][j][k]; } avg = avg / (SinogramPtr->N_r * SinogramPtr->N_t * SinogramPtr->N_p); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Average of error sinogram in node %d is %f\n", TomoInputsPtr->node_rank, avg); dimTiff[0] = 1; dimTiff[1] = SinogramPtr->N_p; dimTiff[2] = SinogramPtr->N_r; dimTiff[3] = SinogramPtr->N_t; if (TomoInputsPtr->Write2Tiff == 1) flag = WriteMultiDimArray2Tiff(error_file, dimTiff, 0, 3, 1, 2, &(ErrorSino[0][0][0]), 0, TomoInputsPtr->debug_file_ptr); for (i = 0; i < ScannedObjectPtr->N_time; i++) { free(AMatrixPtr[i].values); free(AMatrixPtr[i].index); } free(AMatrixPtr); multifree(SinogramPtr->Projection, 3); return (flag); } /* Updates the variance parameter \sigma */ void update_variance_parameter(Sinogram * SinogramPtr, TomoInputs * TomoInputsPtr, Real_arr_t *** ErrorSino) { int32_t k, i, j; Real_t temp_acc = 0, temp = 0; #pragma omp parallel for private(i, j, temp) reduction(+:temp_acc) for (k = 0; k < SinogramPtr->N_p; k++) for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { TomoInputsPtr->Weight[k][i][j] = TomoInputsPtr->Weight[k][i][j] * TomoInputsPtr->var_est; if (SinogramPtr->ProjSelect[k][i][j] == true) temp = ErrorSino[k][i][j] * ErrorSino[k][i][j] * TomoInputsPtr->Weight[k][i][j]; else temp = fabs(ErrorSino[k][i][j]) * TomoInputsPtr->ErrorSinoDelta * TomoInputsPtr->ErrorSinoThresh * sqrt(TomoInputsPtr->Weight[k][i][j] * TomoInputsPtr->var_est); temp_acc += temp; } MPI_Allreduce(&temp_acc, &temp, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); TomoInputsPtr->var_est = temp / ((Real_t) TomoInputsPtr->node_num * (Real_t) SinogramPtr->N_p * (Real_t) SinogramPtr->N_r * (Real_t) SinogramPtr->N_t); #pragma omp parallel for private(i, j) for (k = 0; k < SinogramPtr->N_p; k++) for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { TomoInputsPtr->Weight[k][i][j] /= TomoInputsPtr->var_est; if (fabs(ErrorSino[k][i][j] * sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh) SinogramPtr->ProjSelect[k][i][j] = true; else SinogramPtr->ProjSelect[k][i][j] = false; } } void update_d_offset_rect_patch_constraint(Sinogram * SinogramPtr, TomoInputs * TomoInputsPtr, Real_arr_t *** ErrorSino) { Real_t sign, **b, **Lambda, temp; Real_arr_t **x; int32_t i, j, k; b = (Real_t **) multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); Lambda = (Real_t **) multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); x = (Real_arr_t **) multialloc(sizeof(Real_arr_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); memset(&(b[0][0]), 0, SinogramPtr->N_r * SinogramPtr->N_t * sizeof(Real_t)); memset(&(Lambda[0][0]), 0, SinogramPtr->N_r * SinogramPtr->N_t * sizeof(Real_t)); memset(&(x[0][0]), 0, SinogramPtr->N_r * SinogramPtr->N_t * sizeof(Real_arr_t)); #pragma omp parallel for collapse(2) private(i, j, k, temp, sign) for (i = 0; i < SinogramPtr->N_r; i++) { for (j = 0; j < SinogramPtr->N_t; j++) { b[i][j] = 0; Lambda[i][j] = 0; for (k = 0; k < SinogramPtr->N_p; k++) { temp = TomoInputsPtr->ErrorSinoThresh * TomoInputsPtr->ErrorSinoDelta * sqrt(TomoInputsPtr->Weight[k][i][j]); if (SinogramPtr->ProjSelect[k][i][j] == true) { Lambda[i][j] += TomoInputsPtr->Weight[k][i][j]; b[i][j] += (ErrorSino[k][i][j] + SinogramPtr->ProjOffset[i][j]) * TomoInputsPtr->Weight[k][i][j]; } else { sign = (ErrorSino[k][i][j] > 0) - (ErrorSino[k][i][j] < 0); Lambda[i][j] += temp / fabs(ErrorSino[k][i][j]); b[i][j] += (ErrorSino[k][i][j] + SinogramPtr->ProjOffset[i][j]) * temp / fabs(ErrorSino[k][i][j]); } } } } constrained_quad_opt(Lambda, b, SinogramPtr->off_constraint, x, SinogramPtr->N_r, SinogramPtr->N_t, SinogramPtr->off_constraint_num, TomoInputsPtr); #pragma omp parallel for collapse(3) private(i, j, k) for (k = 0; k < SinogramPtr->N_p; k++) { for (i = 0; i < SinogramPtr->N_r; i++) { for (j = 0; j < SinogramPtr->N_t; j++) { ErrorSino[k][i][j] += SinogramPtr->ProjOffset[i][j] - x[i][j]; if (fabs(ErrorSino[k][i][j] * sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh) SinogramPtr->ProjSelect[k][i][j] = true; else SinogramPtr->ProjSelect[k][i][j] = false; } } } memcpy(&(SinogramPtr->ProjOffset[0][0]), &(x[0][0]), SinogramPtr->N_r * SinogramPtr->N_t * sizeof(Real_arr_t)); multifree(b, 2); multifree(Lambda, 2); multifree(x, 2); } /* Updates the projection offset error parameter d_i */ void update_d_offset_zero_mean_constraint(Sinogram * SinogramPtr, TomoInputs * TomoInputsPtr, Real_arr_t *** ErrorSino) { Real_t sign, **numerator, num_sum = 0, temp, **denominator, den_sum = 0, gamma = 0; int32_t i, j, k; numerator = (Real_t **) multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); denominator = (Real_t **) multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); #pragma omp parallel for private(j, k, temp, sign) reduction(+:num_sum, den_sum) for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { numerator[i][j] = 0; denominator[i][j] = 0; for (k = 0; k < SinogramPtr->N_p; k++) { temp = TomoInputsPtr->ErrorSinoThresh * TomoInputsPtr->ErrorSinoDelta * sqrt(TomoInputsPtr->Weight[k][i][j]); if (SinogramPtr->ProjSelect[k][i][j] == true) { numerator[i][j] += ErrorSino[k][i][j] * TomoInputsPtr->Weight[k][i][j]; denominator[i][j] += TomoInputsPtr->Weight[k][i][j]; } else { sign = (ErrorSino[k][i][j] > 0) - (ErrorSino[k][i][j] < 0); numerator[i][j] += temp * sign; denominator[i][j] += temp / fabs(ErrorSino[k][i][j]); } } num_sum += SinogramPtr->ProjOffset[i][j] + (numerator[i][j] / denominator[i][j]); den_sum += 1.0 / denominator[i][j]; } gamma = num_sum / den_sum; #pragma omp parallel for private(j, k) for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { SinogramPtr->ProjOffset[i][j] = SinogramPtr->ProjOffset[i][j] + (numerator[i][j] - gamma) / denominator[i][j]; for (k = 0; k < SinogramPtr->N_p; k++) { ErrorSino[k][i][j] -= (numerator[i][j] - gamma) / denominator[i][j]; if (fabs(ErrorSino[k][i][j] * sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh) SinogramPtr->ProjSelect[k][i][j] = true; else SinogramPtr->ProjSelect[k][i][j] = false; } } multifree(numerator, 2); multifree(denominator, 2); } /* Updates the projection offset error parameter d_i */ void update_d_offset_unconstrained(Sinogram * SinogramPtr, TomoInputs * TomoInputsPtr, Real_arr_t *** ErrorSino) { Real_t sign, **numerator, temp, **denominator; int32_t i, j, k; numerator = (Real_t **) multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); denominator = (Real_t **) multialloc(sizeof(Real_t), 2, SinogramPtr->N_r, SinogramPtr->N_t); #pragma omp parallel for private(j, k, temp, sign) for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { numerator[i][j] = 0; denominator[i][j] = 0; for (k = 0; k < SinogramPtr->N_p; k++) { temp = TomoInputsPtr->ErrorSinoThresh * TomoInputsPtr->ErrorSinoDelta * sqrt(TomoInputsPtr->Weight[k][i][j]); if (SinogramPtr->ProjSelect[k][i][j] == true) { numerator[i][j] += ErrorSino[k][i][j] * TomoInputsPtr->Weight[k][i][j]; denominator[i][j] += TomoInputsPtr->Weight[k][i][j]; } else { sign = (ErrorSino[k][i][j] > 0) - (ErrorSino[k][i][j] < 0); numerator[i][j] += temp * sign; denominator[i][j] += temp / fabs(ErrorSino[k][i][j]); } } } #pragma omp parallel for private(j, k) for (i = 0; i < SinogramPtr->N_r; i++) for (j = 0; j < SinogramPtr->N_t; j++) { SinogramPtr->ProjOffset[i][j] = SinogramPtr->ProjOffset[i][j] + (numerator[i][j]) / denominator[i][j]; for (k = 0; k < SinogramPtr->N_p; k++) { ErrorSino[k][i][j] -= (numerator[i][j]) / denominator[i][j]; if (fabs(ErrorSino[k][i][j] * sqrt(TomoInputsPtr->Weight[k][i][j])) < TomoInputsPtr->ErrorSinoThresh) SinogramPtr->ProjSelect[k][i][j] = true; else SinogramPtr->ProjSelect[k][i][j] = false; } } multifree(numerator, 2); multifree(denominator, 2); } void update_Sinogram_Offset(Sinogram * SinogramPtr, TomoInputs * TomoInputsPtr, Real_arr_t *** ErrorSino) { if (TomoInputsPtr->OffsetConstraintType == 1) update_d_offset_unconstrained(SinogramPtr, TomoInputsPtr, ErrorSino); else if (TomoInputsPtr->OffsetConstraintType == 2) update_d_offset_zero_mean_constraint(SinogramPtr, TomoInputsPtr, ErrorSino); else if (TomoInputsPtr->OffsetConstraintType == 3) update_d_offset_rect_patch_constraint(SinogramPtr, TomoInputsPtr, ErrorSino); } /* * Implements mutithreaded shared memory parallelization using OpenMP and * splits work among threads. Each thread gets a certain time slice and z * block to update. Multithreading is done within the z-blocks assigned to * each node. ErrorSino - Error sinogram Iter - Present iteration number * MagUpdateMap - Magnitude update map containing the magnitude of update of * each voxel Mask - If a certain element is true then the corresponding * voxel is updated */ int updateVoxelsTimeSlices(Sinogram * SinogramPtr, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr, Real_arr_t ** DetectorResponse, /* AMatrixCol* VoxelLineResponse, */ Real_arr_t *** ErrorSino, int32_t Iter, Real_arr_t **** MagUpdateMap, uint8_t *** Mask) { Real_t AverageUpdate = 0, tempUpdate, avg_update_percentage, total_vox_mag = 0.0, vox_mag = 0.0; int32_t xy_start, xy_end, i, j, K, block, idx, **z_start, **z_stop; Real_t tempTotPix = 0, total_pix = 0; long int **zero_count, total_zero_count = 0; int32_t **thread_num = (int32_t **) multialloc(sizeof(int32_t), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks); MPI_Request *send_reqs, *recv_reqs; send_reqs = (MPI_Request *) get_spc(ScannedObjectPtr->N_time, sizeof(MPI_Request)); recv_reqs = (MPI_Request *) get_spc(ScannedObjectPtr->N_time, sizeof(MPI_Request)); z_start = (int32_t **) multialloc(sizeof(int32_t), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks); z_stop = (int32_t **) multialloc(sizeof(int32_t), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks); randomly_select_x_y(ScannedObjectPtr, TomoInputsPtr, Mask); zero_count = (long int **)multialloc(sizeof(long int), 2, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks); /* * offset_numerator = (Real_t**)multialloc(sizeof(Real_t), 2, * SinogramPtr->N_r, SinogramPtr->N_t); * memset(&(offset_denominator[0][0]), 0, * SinogramPtr->N_r*SinogramPtr->N_t*sizeof(Real_t)); * * for (k = 0; k < SinogramPtr->N_p; k++) for (i = 0; i < SinogramPtr->N_r; * i++) for (j = 0; j < SinogramPtr->N_t; j++) offset_denominator[i][j] * += TomoInputsPtr->Weight[k][i][j]; */ memset(&(zero_count[0][0]), 0, ScannedObjectPtr->N_time * TomoInputsPtr->num_z_blocks * sizeof(long int)); /* * K = * ScannedObjectPtr->N_time*ScannedObjectPtr->N_z*ScannedObjectPtr->N_y*Sc * annedObjectPtr->N_x; K = (K - * total_zero_count)/(ScannedObjectPtr->gamma*K); */ K = ScannedObjectPtr->NHICD_Iterations; check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Number of NHICD iterations is %d.\n", K); for (j = 0; j < K; j++) { total_vox_mag = 0.0; #pragma omp parallel for collapse(2) private(i, block, idx, xy_start, xy_end) reduction(+:total_vox_mag) for (i = 0; i < ScannedObjectPtr->N_time; i++) for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2) { idx = (i % 2 == 0) ? block : block + 1; z_start[i][idx] = idx * floor(ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks); z_stop[i][idx] = (idx + 1) * floor(ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks) - 1; z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1 : z_stop[i][idx]; xy_start = j * floor(TomoInputsPtr->UpdateSelectNum[i][idx] / K); xy_end = (j + 1) * floor(TomoInputsPtr->UpdateSelectNum[i][idx] / K) - 1; xy_end = (j == K - 1) ? TomoInputsPtr->UpdateSelectNum[i][idx] - 1 : xy_end; /* * printf ("Loop 1 Start - j = %d, i = %d, idx = %d, z_start * = %d, z_stop = %d, xy_start = %d, xy_end = %d\n", j, i, * idx, z_start[i][idx], z_stop[i][idx], xy_start, xy_end); */ total_vox_mag += updateVoxels(i, i, z_start[i][idx], z_stop[i][idx], xy_start, xy_end, TomoInputsPtr->x_rand_select[i][idx], TomoInputsPtr->y_rand_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /* VoxelLineResponse, */ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]); thread_num[i][idx] = omp_get_thread_num(); } /* * check_info(TomoInputsPtr->node_rank==0, * TomoInputsPtr->debug_file_ptr, "Send MPI info\n"); */ MPI_Send_Recv_Z_Slices(ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0); /* * check_info(TomoInputsPtr->node_rank==0, * TomoInputsPtr->debug_file_ptr, "update_Sinogram_Offset: Will * compute projection offset error\n"); */ if (TomoInputsPtr->updateProjOffset > 1) update_Sinogram_Offset(SinogramPtr, TomoInputsPtr, ErrorSino); /* * check_info(TomoInputsPtr->node_rank==0, * TomoInputsPtr->debug_file_ptr, "update_Sinogram_Offset: Done * computing projection offset error\n"); */ MPI_Wait_Z_Slices(ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0); #pragma omp parallel for collapse(2) private(i, block, idx, xy_start, xy_end) reduction(+:total_vox_mag) for (i = 0; i < ScannedObjectPtr->N_time; i++) for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2) { idx = (i % 2 == 0) ? block + 1 : block; z_start[i][idx] = idx * floor(ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks); z_stop[i][idx] = (idx + 1) * floor(ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks) - 1; z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1 : z_stop[i][idx]; xy_start = j * floor(TomoInputsPtr->UpdateSelectNum[i][idx] / K); xy_end = (j + 1) * floor(TomoInputsPtr->UpdateSelectNum[i][idx] / K) - 1; xy_end = (j == K - 1) ? TomoInputsPtr->UpdateSelectNum[i][idx] - 1 : xy_end; total_vox_mag += updateVoxels(i, i, z_start[i][idx], z_stop[i][idx], xy_start, xy_end, TomoInputsPtr->x_rand_select[i][idx], TomoInputsPtr->y_rand_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /* VoxelLineResponse, */ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]); thread_num[i][idx] = omp_get_thread_num(); /* * printf ("Loop 2 - i = %d, idx = %d, z_start = %d, z_stop = * %d, xy_start = %d, xy_end = %d\n", i, idx, * z_start[i][idx], z_stop[i][idx], xy_start, xy_end); */ } MPI_Send_Recv_Z_Slices(ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1); if (TomoInputsPtr->updateProjOffset > 1) update_Sinogram_Offset(SinogramPtr, TomoInputsPtr, ErrorSino); MPI_Wait_Z_Slices(ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1); VSC_based_Voxel_Line_Select(ScannedObjectPtr, TomoInputsPtr, MagUpdateMap); /* * check_info(TomoInputsPtr->node_rank==0, * TomoInputsPtr->debug_file_ptr, "Number of NHICD voxel lines to be * updated in iteration %d is %d\n", j, num_voxel_lines); */ if (Iter > 1 && TomoInputsPtr->no_NHICD == 0) { #pragma omp parallel for collapse(2) private(i, block, idx) for (i = 0; i < ScannedObjectPtr->N_time; i++) for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2) { idx = (i % 2 == 0) ? block : block + 1; z_start[i][idx] = idx * floor(ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks); z_stop[i][idx] = (idx + 1) * floor(ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks) - 1; z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1 : z_stop[i][idx]; updateVoxels(i, i, z_start[i][idx], z_stop[i][idx], 0, TomoInputsPtr->NHICDSelectNum[i][idx] - 1, TomoInputsPtr->x_NHICD_select[i][idx], TomoInputsPtr->y_NHICD_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /* VoxelLineResponse, */ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]); thread_num[i][idx] = omp_get_thread_num(); /* * printf ("Loop 1 NHICD - i = %d, idx = %d, z_start = * %d, z_stop = %d\n", i, idx, z_start[i][idx], * z_stop[i][idx]); */ } MPI_Send_Recv_Z_Slices(ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0); if (TomoInputsPtr->updateProjOffset > 1) update_Sinogram_Offset(SinogramPtr, TomoInputsPtr, ErrorSino); MPI_Wait_Z_Slices(ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 0); #pragma omp parallel for collapse(2) private(i, block, idx) for (i = 0; i < ScannedObjectPtr->N_time; i++) for (block = 0; block < TomoInputsPtr->num_z_blocks; block = block + 2) { idx = (i % 2 == 0) ? block + 1 : block; z_start[i][idx] = idx * floor(ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks); z_stop[i][idx] = (idx + 1) * floor(ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks) - 1; z_stop[i][idx] = (idx >= TomoInputsPtr->num_z_blocks - 1) ? ScannedObjectPtr->N_z - 1 : z_stop[i][idx]; updateVoxels(i, i, z_start[i][idx], z_stop[i][idx], 0, TomoInputsPtr->NHICDSelectNum[i][idx] - 1, TomoInputsPtr->x_NHICD_select[i][idx], TomoInputsPtr->y_NHICD_select[i][idx], SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino, DetectorResponse, /* VoxelLineResponse, */ Iter, &(zero_count[i][idx]), MagUpdateMap[i][idx], Mask[i]); thread_num[i][idx] = omp_get_thread_num(); /* * printf ("Loop 2 NHICD - i = %d, idx = %d, z_start = * %d, z_stop = %d\n", i, idx, z_start[i][idx], * z_stop[i][idx]); */ } MPI_Send_Recv_Z_Slices(ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1); if (TomoInputsPtr->updateProjOffset > 1) update_Sinogram_Offset(SinogramPtr, TomoInputsPtr, ErrorSino); MPI_Wait_Z_Slices(ScannedObjectPtr, TomoInputsPtr, send_reqs, recv_reqs, 1); } } if (TomoInputsPtr->updateVar == 1) update_variance_parameter(SinogramPtr, TomoInputsPtr, ErrorSino); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Time Slice, Z Start, Z End - Thread : "); total_pix = 0; for (i = 0; i < ScannedObjectPtr->N_time; i++) { for (block = 0; block < TomoInputsPtr->num_z_blocks; block++) { total_pix += TomoInputsPtr->UpdateSelectNum[i][block] * (ScannedObjectPtr->N_z / TomoInputsPtr->num_z_blocks); for (j = 0; j < TomoInputsPtr->UpdateSelectNum[i][block]; j++) { AverageUpdate += MagUpdateMap[i][block][TomoInputsPtr->y_rand_select[i][block][j]][TomoInputsPtr->x_rand_select[i][block][j]]; } total_zero_count += zero_count[i][block]; check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "%d,%d,%d-%d; ", i, z_start[i][block], z_stop[i][block], thread_num[i][block]); } } check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "\n"); MPI_Allreduce(&AverageUpdate, &tempUpdate, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&total_pix, &tempTotPix, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&total_vox_mag, &vox_mag, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); AverageUpdate = tempUpdate / (tempTotPix); AverageUpdate = convert2Hounsfield(AverageUpdate); check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Average voxel update over all voxels is %f, total voxels is %f.\n", AverageUpdate, tempTotPix); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Zero count is %ld.\n", total_zero_count); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Variance parameter divisor is %f.\n", (Real_t) TomoInputsPtr->node_num * (Real_t) SinogramPtr->N_p * (Real_t) SinogramPtr->N_r * (Real_t) SinogramPtr->N_t); multifree(zero_count, 2); multifree(thread_num, 2); multifree(z_start, 2); multifree(z_stop, 2); free(send_reqs); free(recv_reqs); /* * multifree(offset_numerator,2); multifree(offset_denominator,2); */ avg_update_percentage = 100 * tempUpdate / vox_mag; check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Percentage average magnitude of voxel updates is %f.\n", avg_update_percentage); if (avg_update_percentage < TomoInputsPtr->StopThreshold) { check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Percentage average magnitude of voxel updates is less than convergence threshold.\n"); return (1); } return (0); } /* * ICD_BackProject calls the ICD optimization function repeatedly till the * stopping criteria is met. */ int ICD_BackProject(Sinogram * SinogramPtr, ScannedObject * ScannedObjectPtr, TomoInputs * TomoInputsPtr) { #ifndef NO_COST_CALCULATE Real_t cost, cost_0_iter, cost_last_iter, percentage_change_in_cost = 0; char costfile[100] = COST_FILENAME; #endif Real_arr_t ***ErrorSino, **H_r, *H_t; Real_t x, y; int32_t j, flag = 0, Iter, i, k; int dimTiff[4]; char VarEstFile[100] = VAR_PARAM_FILENAME; char scaled_error_file[100] = SCALED_ERROR_SINO_FILENAME; time_t start; char detect_file[100] = DETECTOR_RESPONSE_FILENAME; char projselect_file[100] = PROJ_SELECT_FILENAME; char MagUpdateMapFile[100] = MAG_UPDATE_FILENAME; uint8_t ***Mask; /* AMatrixCol *VoxelLineResponse; */ #ifdef POSITIVITY_CONSTRAINT check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Enforcing positivity constraint\n"); #endif Real_arr_t ****MagUpdateMap = (Real_arr_t ****) multialloc(sizeof(Real_arr_t), 4, ScannedObjectPtr->N_time, TomoInputsPtr->num_z_blocks, ScannedObjectPtr->N_y, ScannedObjectPtr->N_x); H_r = (Real_arr_t **) multialloc(sizeof(Real_arr_t), 2, SinogramPtr->N_p, DETECTOR_RESPONSE_BINS + 1); H_t = (Real_arr_t *) get_spc(DETECTOR_RESPONSE_BINS + 1, sizeof(Real_arr_t)); ErrorSino = (Real_arr_t ***) multialloc(sizeof(Real_arr_t), 3, SinogramPtr->N_p, SinogramPtr->N_r, SinogramPtr->N_t); Mask = (uint8_t ***) multialloc(sizeof(uint8_t), 3, ScannedObjectPtr->N_time, ScannedObjectPtr->N_y, ScannedObjectPtr->N_x); memset(&(MagUpdateMap[0][0][0][0]), 0, ScannedObjectPtr->N_time * TomoInputsPtr->num_z_blocks * ScannedObjectPtr->N_y * ScannedObjectPtr->N_x * sizeof(Real_arr_t)); /* omp_set_num_threads(TomoInputsPtr->num_threads); */ check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Number of CPU cores is %d\n", (int)omp_get_num_procs()); /* * check_info(TomoInputsPtr->node_rank==0, TomoInputsPtr->debug_file_ptr, * "ICD_BackProject: Number of threads is %d\n", * TomoInputsPtr->num_threads) ; */ for (i = 0; i < ScannedObjectPtr->N_time; i++) for (j = 0; j < ScannedObjectPtr->N_y; j++) for (k = 0; k < ScannedObjectPtr->N_x; k++) { x = ScannedObjectPtr->x0 + ((Real_t) k + 0.5) * ScannedObjectPtr->delta_xy; y = ScannedObjectPtr->y0 + ((Real_t) j + 0.5) * ScannedObjectPtr->delta_xy; if (x * x + y * y < TomoInputsPtr->radius_obj * TomoInputsPtr->radius_obj) Mask[i][j][k] = 1; else Mask[i][j][k] = 0; } DetectorResponseProfile(H_r, H_t, SinogramPtr, ScannedObjectPtr, TomoInputsPtr); dimTiff[0] = 1; dimTiff[1] = 1; dimTiff[2] = SinogramPtr->N_p; dimTiff[3] = DETECTOR_RESPONSE_BINS + 1; sprintf(detect_file, "%s_n%d", detect_file, TomoInputsPtr->node_rank); if (TomoInputsPtr->Write2Tiff == 1) if (WriteMultiDimArray2Tiff(detect_file, dimTiff, 0, 1, 2, 3, &(H_r[0][0]), 0, TomoInputsPtr->debug_file_ptr)) goto error; start = time(NULL); if (initObject(SinogramPtr, ScannedObjectPtr, TomoInputsPtr, MagUpdateMap)) goto error; check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Time taken to read object = %fmins\n", difftime(time(NULL), start) / 60.0); if (initErrorSinogam(SinogramPtr, ScannedObjectPtr, TomoInputsPtr, H_r, ErrorSino /* , VoxelLineResponse */ )) goto error; check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Time taken to initialize object and compute error sinogram = %fmins\n", difftime(time(NULL), start) / 60.0); #ifndef NO_COST_CALCULATE cost = computeCost(SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino); cost_0_iter = cost; cost_last_iter = cost; check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "------------- Iteration 0, Cost = %f------------\n", cost); if (TomoInputsPtr->node_rank == 0) Write2Bin(costfile, 1, 1, 1, 1, sizeof(Real_t), &cost, TomoInputsPtr->debug_file_ptr); #endif /* Cost calculation endif */ start = time(NULL); for (Iter = 1; Iter <= TomoInputsPtr->NumIter; Iter++) { flag = updateVoxelsTimeSlices(SinogramPtr, ScannedObjectPtr, TomoInputsPtr, H_r, /* VoxelLineResponse, */ ErrorSino, Iter, MagUpdateMap, Mask); if (TomoInputsPtr->WritePerIter == 1) if (write_ObjectProjOff2TiffBinPerIter(SinogramPtr, ScannedObjectPtr, TomoInputsPtr)) goto error; #ifndef NO_COST_CALCULATE cost = computeCost(SinogramPtr, ScannedObjectPtr, TomoInputsPtr, ErrorSino); percentage_change_in_cost = ((cost - cost_last_iter) / (cost - cost_0_iter)) * 100.0; check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Percentage change in cost is %f.\n", percentage_change_in_cost); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Variance parameter estimate = %f.\n", TomoInputsPtr->var_est); check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "------------- Iteration = %d, Cost = %f, Time since start of ICD = %fmins ------------\n", Iter, cost, difftime(time(NULL), start) / 60.0); if (TomoInputsPtr->node_rank == 0) Append2Bin(costfile, 1, 1, 1, 1, sizeof(Real_t), &cost, TomoInputsPtr->debug_file_ptr); check_error(cost > cost_last_iter, TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Cost value increased.\n"); cost_last_iter = cost; /* * if (percentage_change_in_cost < TomoInputsPtr->cost_thresh && flag * != 0 && Iter > 1){ */ if (flag != 0 && Iter > 1) { check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Convergence criteria is met.\n"); break; } #else check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Variance parameter estimate = %f\n", TomoInputsPtr->var_est); check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "-------------ICD_BackProject: ICD Iter = %d, time since start of ICD = %fmins------------.\n", Iter, difftime(time(NULL), start) / 60.0); if (flag != 0 && Iter > 1) { check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Convergence criteria is met.\n"); break; } #endif flag = fflush(TomoInputsPtr->debug_file_ptr); if (flag != 0) check_warn(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Cannot flush buffer.\n"); } for (i = 0; i < SinogramPtr->N_p; i++) for (j = 0; j < SinogramPtr->N_r; j++) for (k = 0; k < SinogramPtr->N_t; k++) ErrorSino[i][j][k] *= sqrt(TomoInputsPtr->Weight[i][j][k]); if (TomoInputsPtr->node_rank == 0) Write2Bin(VarEstFile, 1, 1, 1, 1, sizeof(Real_t), &(TomoInputsPtr->var_est), TomoInputsPtr->debug_file_ptr); int32_t size = ScannedObjectPtr->N_time * TomoInputsPtr->num_z_blocks * ScannedObjectPtr->N_y * ScannedObjectPtr->N_x; if (write_SharedBinFile_At(MagUpdateMapFile, &(MagUpdateMap[0][0][0][0]), TomoInputsPtr->node_rank * size, size, TomoInputsPtr->debug_file_ptr)) goto error; sprintf(scaled_error_file, "%s_n%d", scaled_error_file, TomoInputsPtr->node_rank); sprintf(projselect_file, "%s_n%d", projselect_file, TomoInputsPtr->node_rank); dimTiff[0] = 1; dimTiff[1] = SinogramPtr->N_p; dimTiff[2] = SinogramPtr->N_r; dimTiff[3] = SinogramPtr->N_t; if (TomoInputsPtr->Write2Tiff == 1) { if (WriteMultiDimArray2Tiff(scaled_error_file, dimTiff, 0, 3, 1, 2, &(ErrorSino[0][0][0]), 0, TomoInputsPtr->debug_file_ptr)) goto error; if (WriteBoolArray2Tiff(projselect_file, dimTiff, 0, 3, 1, 2, &(SinogramPtr->ProjSelect[0][0][0]), 0, TomoInputsPtr->debug_file_ptr)) goto error; } multifree(ErrorSino, 3); multifree(H_r, 2); free(H_t); multifree(Mask, 3); multifree(MagUpdateMap, 4); check_debug(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Finished running ICD_BackProject.\n"); flag = fflush(TomoInputsPtr->debug_file_ptr); if (flag != 0) check_warn(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "Cannot flush buffer.\n"); check_info(TomoInputsPtr->node_rank == 0, TomoInputsPtr->debug_file_ptr, "The estimated value of variance parameter is %f.\n", TomoInputsPtr->var_est); return (0); error: multifree(ErrorSino, 3); multifree(H_r, 2); free(H_t); multifree(Mask, 3); multifree(MagUpdateMap, 4); return (-1); }
triplet_kpoint.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* These codes were originally parts of spglib, but only develped */ /* and used for phono3py. Therefore these were moved from spglib to */ /* phono3py. This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stddef.h> #include <stdlib.h> #include <mathfunc.h> #include <kpoint.h> #include <kgrid.h> #include <triplet_h/triplet.h> #include <triplet_h/triplet_kpoint.h> #define KPT_NUM_BZ_SEARCH_SPACE 125 static int bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = { { 0, 0, 0}, { 0, 0, 1}, { 0, 0, 2}, { 0, 0, -2}, { 0, 0, -1}, { 0, 1, 0}, { 0, 1, 1}, { 0, 1, 2}, { 0, 1, -2}, { 0, 1, -1}, { 0, 2, 0}, { 0, 2, 1}, { 0, 2, 2}, { 0, 2, -2}, { 0, 2, -1}, { 0, -2, 0}, { 0, -2, 1}, { 0, -2, 2}, { 0, -2, -2}, { 0, -2, -1}, { 0, -1, 0}, { 0, -1, 1}, { 0, -1, 2}, { 0, -1, -2}, { 0, -1, -1}, { 1, 0, 0}, { 1, 0, 1}, { 1, 0, 2}, { 1, 0, -2}, { 1, 0, -1}, { 1, 1, 0}, { 1, 1, 1}, { 1, 1, 2}, { 1, 1, -2}, { 1, 1, -1}, { 1, 2, 0}, { 1, 2, 1}, { 1, 2, 2}, { 1, 2, -2}, { 1, 2, -1}, { 1, -2, 0}, { 1, -2, 1}, { 1, -2, 2}, { 1, -2, -2}, { 1, -2, -1}, { 1, -1, 0}, { 1, -1, 1}, { 1, -1, 2}, { 1, -1, -2}, { 1, -1, -1}, { 2, 0, 0}, { 2, 0, 1}, { 2, 0, 2}, { 2, 0, -2}, { 2, 0, -1}, { 2, 1, 0}, { 2, 1, 1}, { 2, 1, 2}, { 2, 1, -2}, { 2, 1, -1}, { 2, 2, 0}, { 2, 2, 1}, { 2, 2, 2}, { 2, 2, -2}, { 2, 2, -1}, { 2, -2, 0}, { 2, -2, 1}, { 2, -2, 2}, { 2, -2, -2}, { 2, -2, -1}, { 2, -1, 0}, { 2, -1, 1}, { 2, -1, 2}, { 2, -1, -2}, { 2, -1, -1}, {-2, 0, 0}, {-2, 0, 1}, {-2, 0, 2}, {-2, 0, -2}, {-2, 0, -1}, {-2, 1, 0}, {-2, 1, 1}, {-2, 1, 2}, {-2, 1, -2}, {-2, 1, -1}, {-2, 2, 0}, {-2, 2, 1}, {-2, 2, 2}, {-2, 2, -2}, {-2, 2, -1}, {-2, -2, 0}, {-2, -2, 1}, {-2, -2, 2}, {-2, -2, -2}, {-2, -2, -1}, {-2, -1, 0}, {-2, -1, 1}, {-2, -1, 2}, {-2, -1, -2}, {-2, -1, -1}, {-1, 0, 0}, {-1, 0, 1}, {-1, 0, 2}, {-1, 0, -2}, {-1, 0, -1}, {-1, 1, 0}, {-1, 1, 1}, {-1, 1, 2}, {-1, 1, -2}, {-1, 1, -1}, {-1, 2, 0}, {-1, 2, 1}, {-1, 2, 2}, {-1, 2, -2}, {-1, 2, -1}, {-1, -2, 0}, {-1, -2, 1}, {-1, -2, 2}, {-1, -2, -2}, {-1, -2, -1}, {-1, -1, 0}, {-1, -1, 1}, {-1, -1, 2}, {-1, -1, -2}, {-1, -1, -1} }; static void grid_point_to_address_double(int address_double[3], const size_t grid_point, const int mesh[3], const int is_shift[3]); static size_t get_ir_triplets_at_q(size_t *map_triplets, size_t *map_q, int (*grid_address)[3], const size_t grid_point, const int mesh[3], const MatINT * rot_reciprocal, const int swappable); static size_t get_BZ_triplets_at_q(size_t (*triplets)[3], const size_t grid_point, TPLCONST int (*bz_grid_address)[3], const size_t *bz_map, const size_t *map_triplets, const size_t num_map_triplets, const int mesh[3]); static int get_third_q_of_triplets_at_q(int bz_address[3][3], const int q_index, const size_t *bz_map, const int mesh[3], const int bzmesh[3]); static void modulo_i3(int v[3], const int m[3]); size_t tpk_get_ir_triplets_at_q(size_t *map_triplets, size_t *map_q, int (*grid_address)[3], const int grid_point, const int mesh[3], const int is_time_reversal, const MatINT * rotations, const int swappable) { int num_ir; MatINT *rot_reciprocal; rot_reciprocal = kpt_get_point_group_reciprocal(rotations, is_time_reversal); num_ir = get_ir_triplets_at_q(map_triplets, map_q, grid_address, grid_point, mesh, rot_reciprocal, swappable); mat_free_MatINT(rot_reciprocal); return num_ir; } size_t tpk_get_BZ_triplets_at_q(size_t (*triplets)[3], const size_t grid_point, TPLCONST int (*bz_grid_address)[3], const size_t *bz_map, const size_t *map_triplets, const size_t num_map_triplets, const int mesh[3]) { return get_BZ_triplets_at_q(triplets, grid_point, bz_grid_address, bz_map, map_triplets, num_map_triplets, mesh); } static size_t get_ir_triplets_at_q(size_t *map_triplets, size_t *map_q, int (*grid_address)[3], const size_t grid_point, const int mesh[3], const MatINT * rot_reciprocal, const int swappable) { size_t i, j, num_grid, q_2, num_ir_q, num_ir_triplets, ir_grid_point; int mesh_double[3], is_shift[3]; int address_double0[3], address_double1[3], address_double2[3]; size_t *ir_grid_points, *third_q; double tolerance; double stabilizer_q[1][3]; MatINT *rot_reciprocal_q; ir_grid_points = NULL; third_q = NULL; rot_reciprocal_q = NULL; tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]); num_grid = mesh[0] * mesh[1] * (size_t)mesh[2]; for (i = 0; i < 3; i++) { /* Only consider the gamma-point */ is_shift[i] = 0; mesh_double[i] = mesh[i] * 2; } /* Search irreducible q-points (map_q) with a stabilizer */ /* q */ grid_point_to_address_double(address_double0, grid_point, mesh, is_shift); for (i = 0; i < 3; i++) { stabilizer_q[0][i] = (double)address_double0[i] / mesh_double[i] - (address_double0[i] > mesh[i]); } rot_reciprocal_q = kpt_get_point_group_reciprocal_with_q(rot_reciprocal, tolerance, 1, stabilizer_q); num_ir_q = kpt_get_dense_irreducible_reciprocal_mesh(grid_address, map_q, mesh, is_shift, rot_reciprocal_q); mat_free_MatINT(rot_reciprocal_q); rot_reciprocal_q = NULL; third_q = (size_t*) malloc(sizeof(size_t) * num_ir_q); ir_grid_points = (size_t*) malloc(sizeof(size_t) * num_ir_q); num_ir_q = 0; for (i = 0; i < num_grid; i++) { if (map_q[i] == i) { ir_grid_points[num_ir_q] = i; num_ir_q++; } } for (i = 0; i < num_grid; i++) { map_triplets[i] = num_grid; /* When not found, map_triplets == num_grid */ } #pragma omp parallel for private(j, address_double1, address_double2) for (i = 0; i < num_ir_q; i++) { grid_point_to_address_double(address_double1, ir_grid_points[i], mesh, is_shift); /* q' */ for (j = 0; j < 3; j++) { /* q'' */ address_double2[j] = - address_double0[j] - address_double1[j]; } third_q[i] = kgd_get_dense_grid_point_double_mesh(address_double2, mesh); } num_ir_triplets = 0; if (swappable) { /* search q1 <-> q2 */ for (i = 0; i < num_ir_q; i++) { ir_grid_point = ir_grid_points[i]; q_2 = third_q[i]; if (map_triplets[map_q[q_2]] < num_grid) { map_triplets[ir_grid_point] = map_triplets[map_q[q_2]]; } else { map_triplets[ir_grid_point] = ir_grid_point; num_ir_triplets++; } } } else { for (i = 0; i < num_ir_q; i++) { ir_grid_point = ir_grid_points[i]; map_triplets[ir_grid_point] = ir_grid_point; num_ir_triplets++; } } #pragma omp parallel for for (i = 0; i < num_grid; i++) { map_triplets[i] = map_triplets[map_q[i]]; } free(third_q); third_q = NULL; free(ir_grid_points); ir_grid_points = NULL; return num_ir_triplets; } static size_t get_BZ_triplets_at_q(size_t (*triplets)[3], const size_t grid_point, TPLCONST int (*bz_grid_address)[3], const size_t *bz_map, const size_t *map_triplets, const size_t num_map_triplets, const int mesh[3]) { size_t i, num_ir; int j, k; int bz_address[3][3], bz_address_double[3], bzmesh[3]; size_t *ir_grid_points; ir_grid_points = NULL; for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; } num_ir = 0; ir_grid_points = (size_t*) malloc(sizeof(size_t) * num_map_triplets); for (i = 0; i < num_map_triplets; i++) { if (map_triplets[i] == i) { ir_grid_points[num_ir] = i; num_ir++; } } #pragma omp parallel for private(j, k, bz_address, bz_address_double) for (i = 0; i < num_ir; i++) { for (j = 0; j < 3; j++) { bz_address[0][j] = bz_grid_address[grid_point][j]; bz_address[1][j] = bz_grid_address[ir_grid_points[i]][j]; bz_address[2][j] = - bz_address[0][j] - bz_address[1][j]; } for (j = 2; j > -1; j--) { if (get_third_q_of_triplets_at_q(bz_address, j, bz_map, mesh, bzmesh) == 0) { break; } } for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { bz_address_double[k] = bz_address[j][k] * 2; } triplets[i][j] = bz_map[kgd_get_dense_grid_point_double_mesh(bz_address_double, bzmesh)]; } } free(ir_grid_points); ir_grid_points = NULL; return num_ir; } static int get_third_q_of_triplets_at_q(int bz_address[3][3], const int q_index, const size_t *bz_map, const int mesh[3], const int bzmesh[3]) { int i, j, smallest_g, smallest_index, sum_g, delta_g[3]; size_t prod_bzmesh; size_t bzgp[KPT_NUM_BZ_SEARCH_SPACE]; int bz_address_double[3]; prod_bzmesh = (size_t)bzmesh[0] * bzmesh[1] * bzmesh[2]; modulo_i3(bz_address[q_index], mesh); for (i = 0; i < 3; i++) { delta_g[i] = 0; for (j = 0; j < 3; j++) { delta_g[i] += bz_address[j][i]; } delta_g[i] /= mesh[i]; } for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) { for (j = 0; j < 3; j++) { bz_address_double[j] = (bz_address[q_index][j] + bz_search_space[i][j] * mesh[j]) * 2; } bzgp[i] = bz_map[kgd_get_dense_grid_point_double_mesh(bz_address_double, bzmesh)]; } for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) { if (bzgp[i] != prod_bzmesh) { goto escape; } } escape: smallest_g = 4; smallest_index = 0; for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) { if (bzgp[i] < prod_bzmesh) { /* q'' is in BZ */ sum_g = (abs(delta_g[0] + bz_search_space[i][0]) + abs(delta_g[1] + bz_search_space[i][1]) + abs(delta_g[2] + bz_search_space[i][2])); if (sum_g < smallest_g) { smallest_index = i; smallest_g = sum_g; } } } for (i = 0; i < 3; i++) { bz_address[q_index][i] += bz_search_space[smallest_index][i] * mesh[i]; } return smallest_g; } static void grid_point_to_address_double(int address_double[3], const size_t grid_point, const int mesh[3], const int is_shift[3]) { int i; int address[3]; #ifndef GRID_ORDER_XYZ address[2] = grid_point / (mesh[0] * mesh[1]); address[1] = (grid_point - address[2] * mesh[0] * mesh[1]) / mesh[0]; address[0] = grid_point % mesh[0]; #else address[0] = grid_point / (mesh[1] * mesh[2]); address[1] = (grid_point - address[0] * mesh[1] * mesh[2]) / mesh[2]; address[2] = grid_point % mesh[2]; #endif for (i = 0; i < 3; i++) { address_double[i] = address[i] * 2 + is_shift[i]; } } static void modulo_i3(int v[3], const int m[3]) { int i; for (i = 0; i < 3; i++) { v[i] = v[i] % m[i]; if (v[i] < 0) { v[i] += m[i]; } } }
/* All rights reserved. */ /* These codes were originally parts of spglib, but only develped */ /* and used for phono3py. Therefore these were moved from spglib to */ /* phono3py. This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stddef.h> #include <stdlib.h> #include <mathfunc.h> #include <kpoint.h> #include <kgrid.h> #include <triplet_h/triplet.h> #include <triplet_h/triplet_kpoint.h> #define KPT_NUM_BZ_SEARCH_SPACE 125 static int bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = { {0, 0, 0}, {0, 0, 1}, {0, 0, 2}, {0, 0, -2}, {0, 0, -1}, {0, 1, 0}, {0, 1, 1}, {0, 1, 2}, {0, 1, -2}, {0, 1, -1}, {0, 2, 0}, {0, 2, 1}, {0, 2, 2}, {0, 2, -2}, {0, 2, -1}, {0, -2, 0}, {0, -2, 1}, {0, -2, 2}, {0, -2, -2}, {0, -2, -1}, {0, -1, 0}, {0, -1, 1}, {0, -1, 2}, {0, -1, -2}, {0, -1, -1}, {1, 0, 0}, {1, 0, 1}, {1, 0, 2}, {1, 0, -2}, {1, 0, -1}, {1, 1, 0}, {1, 1, 1}, {1, 1, 2}, {1, 1, -2}, {1, 1, -1}, {1, 2, 0}, {1, 2, 1}, {1, 2, 2}, {1, 2, -2}, {1, 2, -1}, {1, -2, 0}, {1, -2, 1}, {1, -2, 2}, {1, -2, -2}, {1, -2, -1}, {1, -1, 0}, {1, -1, 1}, {1, -1, 2}, {1, -1, -2}, {1, -1, -1}, {2, 0, 0}, {2, 0, 1}, {2, 0, 2}, {2, 0, -2}, {2, 0, -1}, {2, 1, 0}, {2, 1, 1}, {2, 1, 2}, {2, 1, -2}, {2, 1, -1}, {2, 2, 0}, {2, 2, 1}, {2, 2, 2}, {2, 2, -2}, {2, 2, -1}, {2, -2, 0}, {2, -2, 1}, {2, -2, 2}, {2, -2, -2}, {2, -2, -1}, {2, -1, 0}, {2, -1, 1}, {2, -1, 2}, {2, -1, -2}, {2, -1, -1}, {-2, 0, 0}, {-2, 0, 1}, {-2, 0, 2}, {-2, 0, -2}, {-2, 0, -1}, {-2, 1, 0}, {-2, 1, 1}, {-2, 1, 2}, {-2, 1, -2}, {-2, 1, -1}, {-2, 2, 0}, {-2, 2, 1}, {-2, 2, 2}, {-2, 2, -2}, {-2, 2, -1}, {-2, -2, 0}, {-2, -2, 1}, {-2, -2, 2}, {-2, -2, -2}, {-2, -2, -1}, {-2, -1, 0}, {-2, -1, 1}, {-2, -1, 2}, {-2, -1, -2}, {-2, -1, -1}, {-1, 0, 0}, {-1, 0, 1}, {-1, 0, 2}, {-1, 0, -2}, {-1, 0, -1}, {-1, 1, 0}, {-1, 1, 1}, {-1, 1, 2}, {-1, 1, -2}, {-1, 1, -1}, {-1, 2, 0}, {-1, 2, 1}, {-1, 2, 2}, {-1, 2, -2}, {-1, 2, -1}, {-1, -2, 0}, {-1, -2, 1}, {-1, -2, 2}, {-1, -2, -2}, {-1, -2, -1}, {-1, -1, 0}, {-1, -1, 1}, {-1, -1, 2}, {-1, -1, -2}, {-1, -1, -1} }; static void grid_point_to_address_double(int address_double[3], const size_t grid_point, const int mesh[3], const int is_shift[3]); static size_t get_ir_triplets_at_q(size_t * map_triplets, size_t * map_q, int (*grid_address)[3], const size_t grid_point, const int mesh[3], const MatINT * rot_reciprocal, const int swappable); static size_t get_BZ_triplets_at_q(size_t(*triplets)[3], const size_t grid_point, TPLCONST int (*bz_grid_address)[3], const size_t * bz_map, const size_t * map_triplets, const size_t num_map_triplets, const int mesh[3]); static int get_third_q_of_triplets_at_q(int bz_address[3][3], const int q_index, const size_t * bz_map, const int mesh[3], const int bzmesh[3]); static void modulo_i3(int v[3], const int m[3]); size_t tpk_get_ir_triplets_at_q(size_t * map_triplets, size_t * map_q, int (*grid_address)[3], const int grid_point, const int mesh[3], const int is_time_reversal, const MatINT * rotations, const int swappable) { int num_ir; MatINT *rot_reciprocal; rot_reciprocal = kpt_get_point_group_reciprocal(rotations, is_time_reversal); num_ir = get_ir_triplets_at_q(map_triplets, map_q, grid_address, grid_point, mesh, rot_reciprocal, swappable); mat_free_MatINT(rot_reciprocal); return num_ir; } size_t tpk_get_BZ_triplets_at_q(size_t(*triplets)[3], const size_t grid_point, TPLCONST int (*bz_grid_address)[3], const size_t * bz_map, const size_t * map_triplets, const size_t num_map_triplets, const int mesh[3]) { return get_BZ_triplets_at_q(triplets, grid_point, bz_grid_address, bz_map, map_triplets, num_map_triplets, mesh); } static size_t get_ir_triplets_at_q(size_t * map_triplets, size_t * map_q, int (*grid_address)[3], const size_t grid_point, const int mesh[3], const MatINT * rot_reciprocal, const int swappable) { size_t i, j, num_grid, q_2, num_ir_q, num_ir_triplets, ir_grid_point; int mesh_double[3], is_shift[3]; int address_double0[3], address_double1[3], address_double2[3]; size_t *ir_grid_points, *third_q; double tolerance; double stabilizer_q[1][3]; MatINT *rot_reciprocal_q; ir_grid_points = NULL; third_q = NULL; rot_reciprocal_q = NULL; tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]); num_grid = mesh[0] * mesh[1] * (size_t) mesh[2]; for (i = 0; i < 3; i++) { /* Only consider the gamma-point */ is_shift[i] = 0; mesh_double[i] = mesh[i] * 2; } /* Search irreducible q-points (map_q) with a stabilizer */ /* q */ grid_point_to_address_double(address_double0, grid_point, mesh, is_shift); for (i = 0; i < 3; i++) { stabilizer_q[0][i] = (double)address_double0[i] / mesh_double[i] - (address_double0[i] > mesh[i]); } rot_reciprocal_q = kpt_get_point_group_reciprocal_with_q(rot_reciprocal, tolerance, 1, stabilizer_q); num_ir_q = kpt_get_dense_irreducible_reciprocal_mesh(grid_address, map_q, mesh, is_shift, rot_reciprocal_q); mat_free_MatINT(rot_reciprocal_q); rot_reciprocal_q = NULL; third_q = (size_t *) malloc(sizeof(size_t) * num_ir_q); ir_grid_points = (size_t *) malloc(sizeof(size_t) * num_ir_q); num_ir_q = 0; for (i = 0; i < num_grid; i++) { if (map_q[i] == i) { ir_grid_points[num_ir_q] = i; num_ir_q++; } } for (i = 0; i < num_grid; i++) { map_triplets[i] = num_grid; /* When not found, map_triplets == * num_grid */ } for (i = 0; i < num_ir_q; i++) { grid_point_to_address_double(address_double1, ir_grid_points[i], mesh, is_shift); /* q' */ for (j = 0; j < 3; j++) { /* q'' */ address_double2[j] = -address_double0[j] - address_double1[j]; } third_q[i] = kgd_get_dense_grid_point_double_mesh(address_double2, mesh); } num_ir_triplets = 0; if (swappable) { /* search q1 <-> q2 */ for (i = 0; i < num_ir_q; i++) { ir_grid_point = ir_grid_points[i]; q_2 = third_q[i]; if (map_triplets[map_q[q_2]] < num_grid) { map_triplets[ir_grid_point] = map_triplets[map_q[q_2]]; } else { map_triplets[ir_grid_point] = ir_grid_point; num_ir_triplets++; } } } else { for (i = 0; i < num_ir_q; i++) { ir_grid_point = ir_grid_points[i]; map_triplets[ir_grid_point] = ir_grid_point; num_ir_triplets++; } } for (i = 0; i < num_grid; i++) { map_triplets[i] = map_triplets[map_q[i]]; } free(third_q); third_q = NULL; free(ir_grid_points); ir_grid_points = NULL; return num_ir_triplets; } static size_t get_BZ_triplets_at_q(size_t(*triplets)[3], const size_t grid_point, TPLCONST int (*bz_grid_address)[3], const size_t * bz_map, const size_t * map_triplets, const size_t num_map_triplets, const int mesh[3]) { size_t i, num_ir; int j, k; int bz_address[3][3], bz_address_double[3], bzmesh[3]; size_t *ir_grid_points; ir_grid_points = NULL; for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; } num_ir = 0; ir_grid_points = (size_t *) malloc(sizeof(size_t) * num_map_triplets); for (i = 0; i < num_map_triplets; i++) { if (map_triplets[i] == i) { ir_grid_points[num_ir] = i; num_ir++; } } for (i = 0; i < num_ir; i++) { for (j = 0; j < 3; j++) { bz_address[0][j] = bz_grid_address[grid_point][j]; bz_address[1][j] = bz_grid_address[ir_grid_points[i]][j]; bz_address[2][j] = -bz_address[0][j] - bz_address[1][j]; } for (j = 2; j > -1; j--) { if (get_third_q_of_triplets_at_q(bz_address, j, bz_map, mesh, bzmesh) == 0) { break; } } for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { bz_address_double[k] = bz_address[j][k] * 2; } triplets[i][j] = bz_map[kgd_get_dense_grid_point_double_mesh(bz_address_double, bzmesh)]; } } free(ir_grid_points); ir_grid_points = NULL; return num_ir; } static int get_third_q_of_triplets_at_q(int bz_address[3][3], const int q_index, const size_t * bz_map, const int mesh[3], const int bzmesh[3]) { int i, j, smallest_g, smallest_index, sum_g, delta_g[3]; size_t prod_bzmesh; size_t bzgp[KPT_NUM_BZ_SEARCH_SPACE]; int bz_address_double[3]; prod_bzmesh = (size_t) bzmesh[0] * bzmesh[1] * bzmesh[2]; modulo_i3(bz_address[q_index], mesh); for (i = 0; i < 3; i++) { delta_g[i] = 0; for (j = 0; j < 3; j++) { delta_g[i] += bz_address[j][i]; } delta_g[i] /= mesh[i]; } for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) { for (j = 0; j < 3; j++) { bz_address_double[j] = (bz_address[q_index][j] + bz_search_space[i][j] * mesh[j]) * 2; } bzgp[i] = bz_map[kgd_get_dense_grid_point_double_mesh(bz_address_double, bzmesh)]; } for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) { if (bzgp[i] != prod_bzmesh) { goto escape; } } escape: smallest_g = 4; smallest_index = 0; for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) { if (bzgp[i] < prod_bzmesh) { /* q'' is in BZ */ sum_g = (abs(delta_g[0] + bz_search_space[i][0]) + abs(delta_g[1] + bz_search_space[i][1]) + abs(delta_g[2] + bz_search_space[i][2])); if (sum_g < smallest_g) { smallest_index = i; smallest_g = sum_g; } } } for (i = 0; i < 3; i++) { bz_address[q_index][i] += bz_search_space[smallest_index][i] * mesh[i]; } return smallest_g; } static void grid_point_to_address_double(int address_double[3], const size_t grid_point, const int mesh[3], const int is_shift[3]) { int i; int address[3]; #ifndef GRID_ORDER_XYZ address[2] = grid_point / (mesh[0] * mesh[1]); address[1] = (grid_point - address[2] * mesh[0] * mesh[1]) / mesh[0]; address[0] = grid_point % mesh[0]; #else address[0] = grid_point / (mesh[1] * mesh[2]); address[1] = (grid_point - address[0] * mesh[1] * mesh[2]) / mesh[2]; address[2] = grid_point % mesh[2]; #endif for (i = 0; i < 3; i++) { address_double[i] = address[i] * 2 + is_shift[i]; } } static void modulo_i3(int v[3], const int m[3]) { int i; for (i = 0; i < 3; i++) { v[i] = v[i] % m[i]; if (v[i] < 0) { v[i] += m[i]; } } }
/* All rights reserved. */ /* These codes were originally parts of spglib, but only develped */ /* and used for phono3py. Therefore these were moved from spglib to */ /* phono3py. This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stddef.h> #include <stdlib.h> #include <mathfunc.h> #include <kpoint.h> #include <kgrid.h> #include <triplet_h/triplet.h> #include <triplet_h/triplet_kpoint.h> #define KPT_NUM_BZ_SEARCH_SPACE 125 static int bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = { {0, 0, 0}, {0, 0, 1}, {0, 0, 2}, {0, 0, -2}, {0, 0, -1}, {0, 1, 0}, {0, 1, 1}, {0, 1, 2}, {0, 1, -2}, {0, 1, -1}, {0, 2, 0}, {0, 2, 1}, {0, 2, 2}, {0, 2, -2}, {0, 2, -1}, {0, -2, 0}, {0, -2, 1}, {0, -2, 2}, {0, -2, -2}, {0, -2, -1}, {0, -1, 0}, {0, -1, 1}, {0, -1, 2}, {0, -1, -2}, {0, -1, -1}, {1, 0, 0}, {1, 0, 1}, {1, 0, 2}, {1, 0, -2}, {1, 0, -1}, {1, 1, 0}, {1, 1, 1}, {1, 1, 2}, {1, 1, -2}, {1, 1, -1}, {1, 2, 0}, {1, 2, 1}, {1, 2, 2}, {1, 2, -2}, {1, 2, -1}, {1, -2, 0}, {1, -2, 1}, {1, -2, 2}, {1, -2, -2}, {1, -2, -1}, {1, -1, 0}, {1, -1, 1}, {1, -1, 2}, {1, -1, -2}, {1, -1, -1}, {2, 0, 0}, {2, 0, 1}, {2, 0, 2}, {2, 0, -2}, {2, 0, -1}, {2, 1, 0}, {2, 1, 1}, {2, 1, 2}, {2, 1, -2}, {2, 1, -1}, {2, 2, 0}, {2, 2, 1}, {2, 2, 2}, {2, 2, -2}, {2, 2, -1}, {2, -2, 0}, {2, -2, 1}, {2, -2, 2}, {2, -2, -2}, {2, -2, -1}, {2, -1, 0}, {2, -1, 1}, {2, -1, 2}, {2, -1, -2}, {2, -1, -1}, {-2, 0, 0}, {-2, 0, 1}, {-2, 0, 2}, {-2, 0, -2}, {-2, 0, -1}, {-2, 1, 0}, {-2, 1, 1}, {-2, 1, 2}, {-2, 1, -2}, {-2, 1, -1}, {-2, 2, 0}, {-2, 2, 1}, {-2, 2, 2}, {-2, 2, -2}, {-2, 2, -1}, {-2, -2, 0}, {-2, -2, 1}, {-2, -2, 2}, {-2, -2, -2}, {-2, -2, -1}, {-2, -1, 0}, {-2, -1, 1}, {-2, -1, 2}, {-2, -1, -2}, {-2, -1, -1}, {-1, 0, 0}, {-1, 0, 1}, {-1, 0, 2}, {-1, 0, -2}, {-1, 0, -1}, {-1, 1, 0}, {-1, 1, 1}, {-1, 1, 2}, {-1, 1, -2}, {-1, 1, -1}, {-1, 2, 0}, {-1, 2, 1}, {-1, 2, 2}, {-1, 2, -2}, {-1, 2, -1}, {-1, -2, 0}, {-1, -2, 1}, {-1, -2, 2}, {-1, -2, -2}, {-1, -2, -1}, {-1, -1, 0}, {-1, -1, 1}, {-1, -1, 2}, {-1, -1, -2}, {-1, -1, -1} }; static void grid_point_to_address_double(int address_double[3], const size_t grid_point, const int mesh[3], const int is_shift[3]); static size_t get_ir_triplets_at_q(size_t * map_triplets, size_t * map_q, int (*grid_address)[3], const size_t grid_point, const int mesh[3], const MatINT * rot_reciprocal, const int swappable); static size_t get_BZ_triplets_at_q(size_t(*triplets)[3], const size_t grid_point, TPLCONST int (*bz_grid_address)[3], const size_t * bz_map, const size_t * map_triplets, const size_t num_map_triplets, const int mesh[3]); static int get_third_q_of_triplets_at_q(int bz_address[3][3], const int q_index, const size_t * bz_map, const int mesh[3], const int bzmesh[3]); static void modulo_i3(int v[3], const int m[3]); size_t tpk_get_ir_triplets_at_q(size_t * map_triplets, size_t * map_q, int (*grid_address)[3], const int grid_point, const int mesh[3], const int is_time_reversal, const MatINT * rotations, const int swappable) { int num_ir; MatINT *rot_reciprocal; rot_reciprocal = kpt_get_point_group_reciprocal(rotations, is_time_reversal); num_ir = get_ir_triplets_at_q(map_triplets, map_q, grid_address, grid_point, mesh, rot_reciprocal, swappable); mat_free_MatINT(rot_reciprocal); return num_ir; } size_t tpk_get_BZ_triplets_at_q(size_t(*triplets)[3], const size_t grid_point, TPLCONST int (*bz_grid_address)[3], const size_t * bz_map, const size_t * map_triplets, const size_t num_map_triplets, const int mesh[3]) { return get_BZ_triplets_at_q(triplets, grid_point, bz_grid_address, bz_map, map_triplets, num_map_triplets, mesh); } static size_t get_ir_triplets_at_q(size_t * map_triplets, size_t * map_q, int (*grid_address)[3], const size_t grid_point, const int mesh[3], const MatINT * rot_reciprocal, const int swappable) { size_t i, j, num_grid, q_2, num_ir_q, num_ir_triplets, ir_grid_point; int mesh_double[3], is_shift[3]; int address_double0[3], address_double1[3], address_double2[3]; size_t *ir_grid_points, *third_q; double tolerance; double stabilizer_q[1][3]; MatINT *rot_reciprocal_q; ir_grid_points = NULL; third_q = NULL; rot_reciprocal_q = NULL; tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]); num_grid = mesh[0] * mesh[1] * (size_t) mesh[2]; for (i = 0; i < 3; i++) { /* Only consider the gamma-point */ is_shift[i] = 0; mesh_double[i] = mesh[i] * 2; } /* Search irreducible q-points (map_q) with a stabilizer */ /* q */ grid_point_to_address_double(address_double0, grid_point, mesh, is_shift); for (i = 0; i < 3; i++) { stabilizer_q[0][i] = (double)address_double0[i] / mesh_double[i] - (address_double0[i] > mesh[i]); } rot_reciprocal_q = kpt_get_point_group_reciprocal_with_q(rot_reciprocal, tolerance, 1, stabilizer_q); num_ir_q = kpt_get_dense_irreducible_reciprocal_mesh(grid_address, map_q, mesh, is_shift, rot_reciprocal_q); mat_free_MatINT(rot_reciprocal_q); rot_reciprocal_q = NULL; third_q = (size_t *) malloc(sizeof(size_t) * num_ir_q); ir_grid_points = (size_t *) malloc(sizeof(size_t) * num_ir_q); num_ir_q = 0; for (i = 0; i < num_grid; i++) { if (map_q[i] == i) { ir_grid_points[num_ir_q] = i; num_ir_q++; } } for (i = 0; i < num_grid; i++) { map_triplets[i] = num_grid; /* When not found, map_triplets == * num_grid */ } #pragma omp parallel for private(j, address_double1, address_double2) for (i = 0; i < num_ir_q; i++) { grid_point_to_address_double(address_double1, ir_grid_points[i], mesh, is_shift); /* q' */ for (j = 0; j < 3; j++) { /* q'' */ address_double2[j] = -address_double0[j] - address_double1[j]; } third_q[i] = kgd_get_dense_grid_point_double_mesh(address_double2, mesh); } num_ir_triplets = 0; if (swappable) { /* search q1 <-> q2 */ for (i = 0; i < num_ir_q; i++) { ir_grid_point = ir_grid_points[i]; q_2 = third_q[i]; if (map_triplets[map_q[q_2]] < num_grid) { map_triplets[ir_grid_point] = map_triplets[map_q[q_2]]; } else { map_triplets[ir_grid_point] = ir_grid_point; num_ir_triplets++; } } } else { for (i = 0; i < num_ir_q; i++) { ir_grid_point = ir_grid_points[i]; map_triplets[ir_grid_point] = ir_grid_point; num_ir_triplets++; } } #pragma omp parallel for for (i = 0; i < num_grid; i++) { map_triplets[i] = map_triplets[map_q[i]]; } free(third_q); third_q = NULL; free(ir_grid_points); ir_grid_points = NULL; return num_ir_triplets; } static size_t get_BZ_triplets_at_q(size_t(*triplets)[3], const size_t grid_point, TPLCONST int (*bz_grid_address)[3], const size_t * bz_map, const size_t * map_triplets, const size_t num_map_triplets, const int mesh[3]) { size_t i, num_ir; int j, k; int bz_address[3][3], bz_address_double[3], bzmesh[3]; size_t *ir_grid_points; ir_grid_points = NULL; for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; } num_ir = 0; ir_grid_points = (size_t *) malloc(sizeof(size_t) * num_map_triplets); for (i = 0; i < num_map_triplets; i++) { if (map_triplets[i] == i) { ir_grid_points[num_ir] = i; num_ir++; } } #pragma omp parallel for private(j, k, bz_address, bz_address_double) for (i = 0; i < num_ir; i++) { for (j = 0; j < 3; j++) { bz_address[0][j] = bz_grid_address[grid_point][j]; bz_address[1][j] = bz_grid_address[ir_grid_points[i]][j]; bz_address[2][j] = -bz_address[0][j] - bz_address[1][j]; } for (j = 2; j > -1; j--) { if (get_third_q_of_triplets_at_q(bz_address, j, bz_map, mesh, bzmesh) == 0) { break; } } for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { bz_address_double[k] = bz_address[j][k] * 2; } triplets[i][j] = bz_map[kgd_get_dense_grid_point_double_mesh(bz_address_double, bzmesh)]; } } free(ir_grid_points); ir_grid_points = NULL; return num_ir; } static int get_third_q_of_triplets_at_q(int bz_address[3][3], const int q_index, const size_t * bz_map, const int mesh[3], const int bzmesh[3]) { int i, j, smallest_g, smallest_index, sum_g, delta_g[3]; size_t prod_bzmesh; size_t bzgp[KPT_NUM_BZ_SEARCH_SPACE]; int bz_address_double[3]; prod_bzmesh = (size_t) bzmesh[0] * bzmesh[1] * bzmesh[2]; modulo_i3(bz_address[q_index], mesh); for (i = 0; i < 3; i++) { delta_g[i] = 0; for (j = 0; j < 3; j++) { delta_g[i] += bz_address[j][i]; } delta_g[i] /= mesh[i]; } for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) { for (j = 0; j < 3; j++) { bz_address_double[j] = (bz_address[q_index][j] + bz_search_space[i][j] * mesh[j]) * 2; } bzgp[i] = bz_map[kgd_get_dense_grid_point_double_mesh(bz_address_double, bzmesh)]; } for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) { if (bzgp[i] != prod_bzmesh) { goto escape; } } escape: smallest_g = 4; smallest_index = 0; for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) { if (bzgp[i] < prod_bzmesh) { /* q'' is in BZ */ sum_g = (abs(delta_g[0] + bz_search_space[i][0]) + abs(delta_g[1] + bz_search_space[i][1]) + abs(delta_g[2] + bz_search_space[i][2])); if (sum_g < smallest_g) { smallest_index = i; smallest_g = sum_g; } } } for (i = 0; i < 3; i++) { bz_address[q_index][i] += bz_search_space[smallest_index][i] * mesh[i]; } return smallest_g; } static void grid_point_to_address_double(int address_double[3], const size_t grid_point, const int mesh[3], const int is_shift[3]) { int i; int address[3]; #ifndef GRID_ORDER_XYZ address[2] = grid_point / (mesh[0] * mesh[1]); address[1] = (grid_point - address[2] * mesh[0] * mesh[1]) / mesh[0]; address[0] = grid_point % mesh[0]; #else address[0] = grid_point / (mesh[1] * mesh[2]); address[1] = (grid_point - address[0] * mesh[1] * mesh[2]) / mesh[2]; address[2] = grid_point % mesh[2]; #endif for (i = 0; i < 3; i++) { address_double[i] = address[i] * 2 + is_shift[i]; } } static void modulo_i3(int v[3], const int m[3]) { int i; for (i = 0; i < 3; i++) { v[i] = v[i] % m[i]; if (v[i] < 0) { v[i] += m[i]; } } }
spectra.c
/** @file spectra.c Documented spectra module * * Julien Lesgourgues, 1.11.2019 * * This module computes the harmonic power spectra \f$ C_l^{X} \f$'s * given the transfer functions and the primordial spectra. * * The following functions can be called from other modules: * * -# spectra_init() at the beginning (but after transfer_init()) * -# spectra_cl_at_l() at any time for computing individual \f$ C_l \f$'s at any l * -# spectra_free() at the end */ #include "spectra.h" /** * Anisotropy power spectra \f$ C_l\f$'s for all types, modes and initial conditions. * * This routine evaluates all the \f$C_l\f$'s at a given value of l by * interpolating in the pre-computed table. When relevant, it also * sums over all initial conditions for each mode, and over all modes. * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param l Input: multipole number * @param cl_tot Output: total \f$C_l\f$'s for all types (TT, TE, EE, etc..) * @param cl_md Output: \f$C_l\f$'s for all types (TT, TE, EE, etc..) decomposed mode by mode (scalar, tensor, ...) when relevant * @param cl_md_ic Output: \f$C_l\f$'s for all types (TT, TE, EE, etc..) decomposed by pairs of initial conditions (adiabatic, isocurvatures) for each mode (usually, only for the scalar mode) when relevant * @return the error status */ int spectra_cl_at_l( struct spectra * psp, double l, double * cl_tot, /* array with argument cl_tot[index_ct] (must be already allocated) */ double * * cl_md, /* array with argument cl_md[index_md][index_ct] (must be already allocated only if several modes) */ double * * cl_md_ic /* array with argument cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct] (must be already allocated for a given mode only if several ic's) */ ) { /** Summary: */ /** - define local variables */ int last_index; int index_md; int index_ic1,index_ic2,index_ic1_ic2; int index_ct; /** - (a) treat case in which there is only one mode and one initial condition. Then, only cl_tot needs to be filled. */ if ((psp->md_size == 1) && (psp->ic_size[0] == 1)) { index_md = 0; if ((int)l <= psp->l[psp->l_size[index_md]-1]) { /* interpolate at l */ class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ct_size, l, &last_index, cl_tot, psp->ct_size, psp->error_message), psp->error_message, psp->error_message); /* set to zero for the types such that l<l_max */ for (index_ct=0; index_ct<psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_tot[index_ct]=0.; } else { for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_tot[index_ct]=0.; } } /** - (b) treat case in which there is only one mode with several initial condition. Fill cl_md_ic[index_md=0] and sum it to get cl_tot. */ if ((psp->md_size == 1) && (psp->ic_size[0] > 1)) { index_md = 0; for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_tot[index_ct]=0.; for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (((int)l <= psp->l[psp->l_size[index_md]-1]) && (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_)) { class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ic_ic_size[index_md]*psp->ct_size, l, &last_index, cl_md_ic[index_md], psp->ic_ic_size[index_md]*psp->ct_size, psp->error_message), psp->error_message, psp->error_message); for (index_ct=0; index_ct<psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.; } else { for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.; } /* compute cl_tot by summing over cl_md_ic */ for (index_ct=0; index_ct<psp->ct_size; index_ct++) { if (index_ic1 == index_ic2) cl_tot[index_ct]+=cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]; else cl_tot[index_ct]+=2.*cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]; } } } } /** - (c) loop over modes */ if (psp->md_size > 1) { for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_tot[index_ct]=0.; for (index_md = 0; index_md < psp->md_size; index_md++) { /** - --> (c.1.) treat case in which the mode under consideration has only one initial condition. Fill cl_md[index_md]. */ if (psp->ic_size[index_md] == 1) { if ((int)l <= psp->l[psp->l_size[index_md]-1]) { class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ct_size, l, &last_index, cl_md[index_md], psp->ct_size, psp->error_message), psp->error_message, psp->error_message); for (index_ct=0; index_ct<psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_md[index_md][index_ct]=0.; } else { for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_md[index_md][index_ct]=0.; } } /** - --> (c.2.) treat case in which the mode under consideration has several initial conditions. Fill cl_md_ic[index_md] and sum it to get cl_md[index_md] */ if (psp->ic_size[index_md] > 1) { if ((int)l <= psp->l[psp->l_size[index_md]-1]) { /* interpolate all ic and ct */ class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ic_ic_size[index_md]*psp->ct_size, l, &last_index, cl_md_ic[index_md], psp->ic_ic_size[index_md]*psp->ct_size, psp->error_message), psp->error_message, psp->error_message); /* set to zero some of the components */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); for (index_ct=0; index_ct<psp->ct_size; index_ct++) { if (((int)l > psp->l_max_ct[index_md][index_ct]) || (psp->is_non_zero[index_md][index_ic1_ic2] == _FALSE_)) cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.; } } } } /* if l was too big, set anyway all components to zero */ else { for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); for (index_ct=0; index_ct<psp->ct_size; index_ct++) { cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.; } } } } /* sum up all ic for each mode */ for (index_ct=0; index_ct<psp->ct_size; index_ct++) { cl_md[index_md][index_ct]=0.; for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (index_ic1 == index_ic2) cl_md[index_md][index_ct]+=cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]; else cl_md[index_md][index_ct]+=2.*cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]; } } } } /** - --> (c.3.) add contribution of cl_md[index_md] to cl_tot */ for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_tot[index_ct]+=cl_md[index_md][index_ct]; } } return _SUCCESS_; } /** * This routine initializes the spectra structure (in particular, * computes table of anisotropy and Fourier spectra \f$ C_l^{X}, P(k), ... \f$) * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure (will provide H, Omega_m at redshift of interest) * @param ppt Input: pointer to perturbation structure * @param ptr Input: pointer to transfer structure * @param ppm Input: pointer to primordial structure * @param pnl Input: pointer to nonlinear structure * @param psp Output: pointer to initialized spectra structure * @return the error status */ int spectra_init( struct precision * ppr, struct background * pba, struct perturbs * ppt, struct primordial * ppm, struct nonlinear * pnl, struct transfers * ptr, struct spectra * psp ) { /** Summary: */ /** - check that we really want to compute at least one spectrum */ if (ppt->has_cls == _FALSE_) { psp->md_size = 0; if (psp->spectra_verbose > 0) printf("No spectra requested. Spectra module skipped.\n"); return _SUCCESS_; } else { if (psp->spectra_verbose > 0) printf("Computing unlensed harmonic spectra\n"); } /** - initialize indices and allocate some of the arrays in the spectra structure */ class_call(spectra_indices(pba,ppt,ptr,ppm,psp), psp->error_message, psp->error_message); /** - deal with \f$ C_l\f$'s, if any */ if (ppt->has_cls == _TRUE_) { class_call(spectra_cls(pba,ppt,ptr,ppm,psp), psp->error_message, psp->error_message); } else { psp->ct_size=0; } /** - a pointer to the nonlinear structure is stored in the spectra structure. This odd, unusual and unelegant feature has been introduced in v2.8 in order to keep in use some deprecated functions spectra_pk_...() that are now pointing at new function nonlinear_pk_...(). In the future, if the deprecated functions are removed, it will be possible to remove also this pointer. */ psp->pnl = pnl; return _SUCCESS_; } /** * This routine frees all the memory space allocated by spectra_init(). * * To be called at the end of each run, only when no further calls to * spectra_cls_at_l(), spectra_pk_at_z(), spectra_pk_at_k_and_z() are needed. * * @param psp Input: pointer to spectra structure (which fields must be freed) * @return the error status */ int spectra_free( struct spectra * psp ) { int index_md; if (psp->md_size > 0) { if (psp->ct_size > 0) { for (index_md = 0; index_md < psp->md_size; index_md++) { free(psp->l_max_ct[index_md]); free(psp->cl[index_md]); free(psp->ddcl[index_md]); } free(psp->l); free(psp->l_size); free(psp->l_max_ct); free(psp->l_max); free(psp->cl); free(psp->ddcl); } } for (index_md=0; index_md < psp->md_size; index_md++) free(psp->is_non_zero[index_md]); free(psp->is_non_zero); free(psp->ic_size); free(psp->ic_ic_size); return _SUCCESS_; } /** * This routine defines indices and allocates tables in the spectra structure * * @param pba Input: pointer to background structure * @param ppt Input: pointer to perturbation structure * @param ptr Input: pointer to transfers structure * @param ppm Input: pointer to primordial structure * @param psp Input/output: pointer to spectra structure * @return the error status */ int spectra_indices( struct background * pba, struct perturbs * ppt, struct transfers * ptr, struct primordial * ppm, struct spectra * psp ){ int index_ct; int index_md; int index_ic1_ic2; psp->md_size = ppt->md_size; if (ppt->has_scalars == _TRUE_) psp->index_md_scalars = ppt->index_md_scalars; class_alloc(psp->ic_size, sizeof(int)*psp->md_size, psp->error_message); class_alloc(psp->ic_ic_size, sizeof(int)*psp->md_size, psp->error_message); class_alloc(psp->is_non_zero, sizeof(short *)*psp->md_size, psp->error_message); for (index_md=0; index_md < psp->md_size; index_md++) { psp->ic_size[index_md] = ppm->ic_size[index_md]; psp->ic_ic_size[index_md] = ppm->ic_ic_size[index_md]; class_alloc(psp->is_non_zero[index_md], sizeof(short)*psp->ic_ic_size[index_md], psp->error_message); for (index_ic1_ic2=0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) psp->is_non_zero[index_md][index_ic1_ic2] = ppm->is_non_zero[index_md][index_ic1_ic2]; } if (ppt->has_cls == _TRUE_) { /* types of C_l's relevant for both scalars and tensors: TT, EE, TE */ index_ct=0; if (ppt->has_cl_cmb_temperature == _TRUE_) { psp->has_tt = _TRUE_; psp->index_ct_tt=index_ct; index_ct++; } else { psp->has_tt = _FALSE_; } if (ppt->has_cl_cmb_polarization == _TRUE_) { psp->has_ee = _TRUE_; psp->index_ct_ee=index_ct; index_ct++; } else { psp->has_ee = _FALSE_; } if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_cmb_polarization == _TRUE_)) { psp->has_te = _TRUE_; psp->index_ct_te=index_ct; index_ct++; } else { psp->has_te = _FALSE_; } if (ppt->has_cl_cmb_polarization == _TRUE_) { psp->has_bb = _TRUE_; psp->index_ct_bb=index_ct; index_ct++; } else { psp->has_bb = _FALSE_; } /* types of C_l's relevant only for scalars: phi-phi, T-phi, E-phi, d-d, T-d */ if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_pp = _TRUE_; psp->index_ct_pp=index_ct; index_ct++; } else { psp->has_pp = _FALSE_; } if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_tp = _TRUE_; psp->index_ct_tp=index_ct; index_ct++; } else { psp->has_tp = _FALSE_; } psp->ct_size = index_ct; if ((ppt->has_cl_cmb_polarization == _TRUE_) && (ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_ep = _TRUE_; psp->index_ct_ep=index_ct; index_ct++; } else { psp->has_ep = _FALSE_; } if ((ppt->has_scalars == _TRUE_) && ((ppt->has_cl_number_count == _TRUE_) || (ppt->has_cl_lensing_potential == _TRUE_))) psp->d_size=ppt->selection_num; else psp->d_size=0; if ((ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_dd = _TRUE_; psp->index_ct_dd=index_ct; index_ct+=(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2; } else { psp->has_dd = _FALSE_; } /* the computation of C_l^Td would require a very good sampling of transfer functions over a wide range, and a huge computation time. In the current version, we prefer to switch it off, rather than either slowing down the code considerably, or producing very inaccurate spectra. if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_td = _TRUE_; psp->index_ct_td=index_ct; index_ct+=psp->d_size; } else { psp->has_td = _FALSE_; } */ psp->has_td = _FALSE_; if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_pd = _TRUE_; psp->index_ct_pd=index_ct; index_ct+=psp->d_size; } else { psp->has_pd = _FALSE_; } psp->has_td = _FALSE_; if ((ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_ll = _TRUE_; psp->index_ct_ll=index_ct; index_ct+=(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2; } else { psp->has_ll = _FALSE_; } /* the computation of C_l^Tl would require a very good sampling of transfer functions over a wide range, and a huge computation time. In the current version, we prefer to switch it off, rather than either slowing down the code considerably, or producing very inaccurate spectra. if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_tl = _TRUE_; psp->index_ct_tl=index_ct; index_ct+=psp->d_size; } else { psp->has_tl = _FALSE_; } */ psp->has_tl = _FALSE_; if ((ppt->has_cl_number_count == _TRUE_) && (ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_dl = _TRUE_; psp->index_ct_dl=index_ct; index_ct += psp->d_size*psp->d_size - (psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag); } else { psp->has_dl = _FALSE_; } psp->ct_size = index_ct; /* infer from input quantities the l_max for each mode and type, l_max_ct[index_md][index_type]. Maximize it over index_ct, and then over index_md. */ class_alloc(psp->l_max,sizeof(int*)*psp->md_size,psp->error_message); class_alloc(psp->l_max_ct,sizeof(int*)*psp->md_size,psp->error_message); for (index_md=0; index_md<psp->md_size; index_md++) { class_calloc(psp->l_max_ct[index_md],psp->ct_size,sizeof(int),psp->error_message); } if (ppt->has_scalars == _TRUE_) { /* spectra computed up to l_scalar_max */ if (psp->has_tt == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_tt] = ppt->l_scalar_max; if (psp->has_ee == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_ee] = ppt->l_scalar_max; if (psp->has_te == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_te] = ppt->l_scalar_max; if (psp->has_pp == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_pp] = ppt->l_scalar_max; if (psp->has_tp == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_tp] = ppt->l_scalar_max; if (psp->has_ep == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_ep] = ppt->l_scalar_max; /* spectra computed up to l_lss_max */ if (psp->has_dd == _TRUE_) for (index_ct=psp->index_ct_dd; index_ct<psp->index_ct_dd+(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; if (psp->has_td == _TRUE_) for (index_ct=psp->index_ct_td; index_ct<psp->index_ct_td+psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max); if (psp->has_pd == _TRUE_) for (index_ct=psp->index_ct_pd; index_ct<psp->index_ct_pd+psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max); if (psp->has_ll == _TRUE_) for (index_ct=psp->index_ct_ll; index_ct<psp->index_ct_ll+(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; if (psp->has_tl == _TRUE_) for (index_ct=psp->index_ct_tl; index_ct<psp->index_ct_tl+psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max); if (psp->has_dl == _TRUE_) for (index_ct=psp->index_ct_dl; index_ct < psp->index_ct_dl+(psp->d_size*psp->d_size - (psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag)); index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; } if (ppt->has_tensors == _TRUE_) { /* spectra computed up to l_tensor_max */ if (psp->has_tt == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_tt] = ppt->l_tensor_max; if (psp->has_ee == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_ee] = ppt->l_tensor_max; if (psp->has_te == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_te] = ppt->l_tensor_max; if (psp->has_bb == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_bb] = ppt->l_tensor_max; } /* maximizations */ psp->l_max_tot = 0.; for (index_md=0; index_md < psp->md_size; index_md++) { psp->l_max[index_md] = 0.; for (index_ct=0.; index_ct<psp->ct_size; index_ct++) psp->l_max[index_md] = MAX(psp->l_max[index_md],psp->l_max_ct[index_md][index_ct]); psp->l_max_tot = MAX(psp->l_max_tot,psp->l_max[index_md]); } } return _SUCCESS_; } /** * This routine computes a table of values for all harmonic spectra \f$ C_l \f$'s, * given the transfer functions and primordial spectra. * * @param pba Input: pointer to background structure * @param ppt Input: pointer to perturbation structure * @param ptr Input: pointer to transfers structure * @param ppm Input: pointer to primordial structure * @param psp Input/Output: pointer to spectra structure * @return the error status */ int spectra_cls( struct background * pba, struct perturbs * ppt, struct transfers * ptr, struct primordial * ppm, struct spectra * psp ) { /** Summary: */ /** - define local variables */ int index_md; int index_ic1,index_ic2,index_ic1_ic2; int index_l; int index_ct; int cl_integrand_num_columns; double * cl_integrand; /* array with argument cl_integrand[index_k*cl_integrand_num_columns+1+psp->index_ct] */ double * transfer_ic1; /* array with argument transfer_ic1[index_tt] */ double * transfer_ic2; /* idem */ double * primordial_pk; /* array with argument primordial_pk[index_ic_ic]*/ /* This code can be optionally compiled with the openmp option for parallel computation. Inside parallel regions, the use of the command "return" is forbidden. For error management, instead of "return _FAILURE_", we will set the variable below to "abort = _TRUE_". This will lead to a "return _FAILURE_" jus after leaving the parallel region. */ int abort; #ifdef _OPENMP /* instrumentation times */ double tstart, tstop; #endif /** - allocate pointers to arrays where results will be stored */ class_alloc(psp->l_size,sizeof(int)*psp->md_size,psp->error_message); class_alloc(psp->cl,sizeof(double *)*psp->md_size,psp->error_message); class_alloc(psp->ddcl,sizeof(double *)*psp->md_size,psp->error_message); psp->l_size_max = ptr->l_size_max; class_alloc(psp->l,sizeof(double)*psp->l_size_max,psp->error_message); /** - store values of l */ for (index_l=0; index_l < psp->l_size_max; index_l++) { psp->l[index_l] = (double)ptr->l[index_l]; } /** - loop over modes (scalar, tensors, etc). For each mode: */ for (index_md = 0; index_md < psp->md_size; index_md++) { /** - --> (a) store number of l values for this mode */ psp->l_size[index_md] = ptr->l_size[index_md]; /** - --> (b) allocate arrays where results will be stored */ class_alloc(psp->cl[index_md],sizeof(double)*psp->l_size[index_md]*psp->ct_size*psp->ic_ic_size[index_md],psp->error_message); class_alloc(psp->ddcl[index_md],sizeof(double)*psp->l_size[index_md]*psp->ct_size*psp->ic_ic_size[index_md],psp->error_message); cl_integrand_num_columns = 1+psp->ct_size*2; /* one for k, ct_size for each type, ct_size for each second derivative of each type */ /** - --> (c) loop over initial conditions */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); /* non-diagonal coefficients should be computed only if non-zero correlation */ if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { /* initialize error management flag */ abort = _FALSE_; /* beginning of parallel region */ #pragma omp parallel \ shared(ptr,ppm,index_md,psp,ppt,cl_integrand_num_columns,index_ic1,index_ic2,abort) \ private(tstart,cl_integrand,primordial_pk,transfer_ic1,transfer_ic2,index_l,tstop) { #ifdef _OPENMP tstart = omp_get_wtime(); #endif class_alloc_parallel(cl_integrand, ptr->q_size*cl_integrand_num_columns*sizeof(double), psp->error_message); class_alloc_parallel(primordial_pk, psp->ic_ic_size[index_md]*sizeof(double), psp->error_message); class_alloc_parallel(transfer_ic1, ptr->tt_size[index_md]*sizeof(double), psp->error_message); class_alloc_parallel(transfer_ic2, ptr->tt_size[index_md]*sizeof(double), psp->error_message); #pragma omp for schedule (dynamic) /** - ---> loop over l values defined in the transfer module. For each l, compute the \f$ C_l\f$'s for all types (TT, TE, ...) by convolving primordial spectra with transfer functions. This elementary task is assigned to spectra_compute_cl() */ for (index_l=0; index_l < ptr->l_size[index_md]; index_l++) { #pragma omp flush(abort) class_call_parallel(spectra_compute_cl(pba, ppt, ptr, ppm, psp, index_md, index_ic1, index_ic2, index_l, cl_integrand_num_columns, cl_integrand, primordial_pk, transfer_ic1, transfer_ic2), psp->error_message, psp->error_message); } /* end of loop over l */ #ifdef _OPENMP tstop = omp_get_wtime(); if (psp->spectra_verbose > 1) printf("In %s: time spent in parallel region (loop over l's) = %e s for thread %d\n", __func__,tstop-tstart,omp_get_thread_num()); #endif free(cl_integrand); free(primordial_pk); free(transfer_ic1); free(transfer_ic2); } /* end of parallel region */ if (abort == _TRUE_) return _FAILURE_; } else { /* set non-diagonal coefficients to zero if pair of ic's uncorrelated */ for (index_l=0; index_l < ptr->l_size[index_md]; index_l++) { for (index_ct=0; index_ct<psp->ct_size; index_ct++) { psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = 0.; } } } } } /** - --> (d) now that for a given mode, all possible \f$ C_l\f$'s have been computed, compute second derivative of the array in which they are stored, in view of spline interpolation. */ class_call(array_spline_table_lines(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ic_ic_size[index_md]*psp->ct_size, psp->ddcl[index_md], _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); } return _SUCCESS_; } /** * This routine computes the \f$ C_l\f$'s for a given mode, pair of initial conditions * and multipole, but for all types (TT, TE...), by convolving the * transfer functions with the primordial spectra. * * @param pba Input: pointer to background structure * @param ppt Input: pointer to perturbation structure * @param ptr Input: pointer to transfers structure * @param ppm Input: pointer to primordial structure * @param psp Input/Output: pointer to spectra structure (result stored here) * @param index_md Input: index of mode under consideration * @param index_ic1 Input: index of first initial condition in the correlator * @param index_ic2 Input: index of second initial condition in the correlator * @param index_l Input: index of multipole under consideration * @param cl_integrand_num_columns Input: number of columns in cl_integrand * @param cl_integrand Input: an allocated workspace * @param primordial_pk Input: table of primordial spectrum values * @param transfer_ic1 Input: table of transfer function values for first initial condition * @param transfer_ic2 Input: table of transfer function values for second initial condition * @return the error status */ int spectra_compute_cl( struct background * pba, struct perturbs * ppt, struct transfers * ptr, struct primordial * ppm, struct spectra * psp, int index_md, int index_ic1, int index_ic2, int index_l, int cl_integrand_num_columns, double * cl_integrand, double * primordial_pk, double * transfer_ic1, double * transfer_ic2 ) { int index_q; int index_tt; int index_ct; int index_d1,index_d2; double k; double clvalue; int index_ic1_ic2; double transfer_ic1_temp=0.; double transfer_ic2_temp=0.; double * transfer_ic1_nc=NULL; double * transfer_ic2_nc=NULL; double factor; int index_q_spline=0; index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (ppt->has_cl_number_count == _TRUE_) { class_alloc(transfer_ic1_nc,psp->d_size*sizeof(double),psp->error_message); class_alloc(transfer_ic2_nc,psp->d_size*sizeof(double),psp->error_message); } for (index_q=0; index_q < ptr->q_size; index_q++) { //q = ptr->q[index_q]; k = ptr->k[index_md][index_q]; cl_integrand[index_q*cl_integrand_num_columns+0] = k; class_call(primordial_spectrum_at_k(ppm,index_md,linear,k,primordial_pk), ppm->error_message, psp->error_message); /* above routine checks that k>0: no possible division by zero below */ for (index_tt=0; index_tt < ptr->tt_size[index_md]; index_tt++) { transfer_ic1[index_tt] = ptr->transfer[index_md] [((index_ic1 * ptr->tt_size[index_md] + index_tt) * ptr->l_size[index_md] + index_l) * ptr->q_size + index_q]; if (index_ic1 == index_ic2) { transfer_ic2[index_tt] = transfer_ic1[index_tt]; } else { transfer_ic2[index_tt] = ptr->transfer[index_md] [((index_ic2 * ptr->tt_size[index_md] + index_tt) * ptr->l_size[index_md] + index_l) * ptr->q_size + index_q]; } } /* define combinations of transfer functions */ if (ppt->has_cl_cmb_temperature == _TRUE_) { if (_scalars_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t0] + transfer_ic1[ptr->index_tt_t1] + transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t0] + transfer_ic2[ptr->index_tt_t1] + transfer_ic2[ptr->index_tt_t2]; } if (_vectors_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t1] + transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t1] + transfer_ic2[ptr->index_tt_t2]; } if (_tensors_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t2]; } } if (ppt->has_cl_number_count == _TRUE_) { for (index_d1=0; index_d1<psp->d_size; index_d1++) { transfer_ic1_nc[index_d1] = 0.; transfer_ic2_nc[index_d1] = 0.; if (ppt->has_nc_density == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_density+index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_density+index_d1]; } if (ppt->has_nc_rsd == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_rsd+index_d1] + transfer_ic1[ptr->index_tt_d0+index_d1] + transfer_ic1[ptr->index_tt_d1+index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_rsd+index_d1] + transfer_ic2[ptr->index_tt_d0+index_d1] + transfer_ic2[ptr->index_tt_d1+index_d1]; } if (ppt->has_nc_lens == _TRUE_) { transfer_ic1_nc[index_d1] += psp->l[index_l]*(psp->l[index_l]+1.)*transfer_ic1[ptr->index_tt_nc_lens+index_d1]; transfer_ic2_nc[index_d1] += psp->l[index_l]*(psp->l[index_l]+1.)*transfer_ic2[ptr->index_tt_nc_lens+index_d1]; } if (ppt->has_nc_gr == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_nc_g1+index_d1] + transfer_ic1[ptr->index_tt_nc_g2+index_d1] + transfer_ic1[ptr->index_tt_nc_g3+index_d1] + transfer_ic1[ptr->index_tt_nc_g4+index_d1] + transfer_ic1[ptr->index_tt_nc_g5+index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_nc_g1+index_d1] + transfer_ic2[ptr->index_tt_nc_g2+index_d1] + transfer_ic2[ptr->index_tt_nc_g3+index_d1] + transfer_ic2[ptr->index_tt_nc_g4+index_d1] + transfer_ic2[ptr->index_tt_nc_g5+index_d1]; } } } /* integrand of Cl's */ /* note: we must integrate C_l = int [4 pi dk/k calP(k) Delta1_l(q) Delta2_l(q)] where calP(k) is the dimensionless power spectrum equal to a constant in the scale-invariant case, and to P(k) = A_s k^(ns-1) otherwise and q=sqrt(k2+K) (scalars) or sqrt(k2+2K) (vectors) or sqrt(k2+3K) (tensors) In the literature, people often rewrite the integral in terms of q and absorb the Jacobian of the change of variables in a redefinition of the primodial spectrum. Let us illustrate this for scalars: dk/k = kdk/k2 = qdq/k2 = dq/q * (q/k)^2 = dq/q * [q2/(q2-K)] = q2dq * 1/[q(q2-K)] This factor 1/[q(q2-K)] is commonly absorbed in the definition of calP. Then one would have C_l = int [4 pi q2 dq {A_s k^(ns-1)/[q(q2-K)]} Delta1_l(q) Delta2_l(q)] Sometimes in the literature, the factor (k2-3K)=(q2-4K) present in the initial conditions of scalar transfer functions (if normalized to curvature R=1) is also absorbed in the definition of the power spectrum. Then the curvature power spectrum reads calP = (q2-4K)/[q(q2-K)] * (k/k)^ns In CLASS we prefer to define calP = (k/k)^ns like in the flat case, to have the factor (q2-4K) in the initialk conditions, and the factor 1/[q(q2-K)] doesn't need to be there since we integrate over dk/k. For tensors, the change of variable described above gives a slightly different result: dk/k = kdk/k2 = qdq/k2 = dq/q * (q/k)^2 = dq/q * [q2/(q2-3K)] = q2dq * 1/[q(q2-3K)] But for tensors there are extra curvature-related correction factors to take into account. See the comments in the perturbation module, related to initial conditions for tensors. */ factor = 4. * _PI_ / k; if (psp->has_tt == _TRUE_) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tt]= primordial_pk[index_ic1_ic2] * transfer_ic1_temp * transfer_ic2_temp * factor; if (psp->has_ee == _TRUE_) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ee]= primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_e] * transfer_ic2[ptr->index_tt_e] * factor; if (psp->has_te == _TRUE_) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_te]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_e] + transfer_ic1[ptr->index_tt_e] * transfer_ic2_temp) * factor; if (_tensors_ && (psp->has_bb == _TRUE_)) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_bb]= primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_b] * transfer_ic2[ptr->index_tt_b] * factor; if (_scalars_ && (psp->has_pp == _TRUE_)) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_pp]= primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2[ptr->index_tt_lcmb] * factor; if (_scalars_ && (psp->has_tp == _TRUE_)) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tp]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_lcmb] + transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2_temp) * factor; if (_scalars_ && (psp->has_ep == _TRUE_)) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ep]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1[ptr->index_tt_e] * transfer_ic2[ptr->index_tt_lcmb] + transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2[ptr->index_tt_e]) * factor; if (_scalars_ && (psp->has_dd == _TRUE_)) { index_ct=0; for (index_d1=0; index_d1<psp->d_size; index_d1++) { for (index_d2=index_d1; index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_dd+index_ct]= primordial_pk[index_ic1_ic2] * transfer_ic1_nc[index_d1] * transfer_ic2_nc[index_d2] * factor; index_ct++; } } } if (_scalars_ && (psp->has_td == _TRUE_)) { for (index_d1=0; index_d1<psp->d_size; index_d1++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_td+index_d1]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1_temp * transfer_ic2_nc[index_d1] + transfer_ic1_nc[index_d1] * transfer_ic2_temp) * factor; } } if (_scalars_ && (psp->has_pd == _TRUE_)) { for (index_d1=0; index_d1<psp->d_size; index_d1++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_pd+index_d1]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2_nc[index_d1] + transfer_ic1_nc[index_d1] * transfer_ic2[ptr->index_tt_lcmb]) * factor; } } if (_scalars_ && (psp->has_ll == _TRUE_)) { index_ct=0; for (index_d1=0; index_d1<psp->d_size; index_d1++) { for (index_d2=index_d1; index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ll+index_ct]= primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_lensing+index_d1] * transfer_ic2[ptr->index_tt_lensing+index_d2] * factor; index_ct++; } } } if (_scalars_ && (psp->has_tl == _TRUE_)) { for (index_d1=0; index_d1<psp->d_size; index_d1++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tl+index_d1]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_lensing+index_d1] + transfer_ic1[ptr->index_tt_lensing+index_d1] * transfer_ic2_temp) * factor; } } if (_scalars_ && (psp->has_dl == _TRUE_)) { index_ct=0; for (index_d1=0; index_d1<psp->d_size; index_d1++) { for (index_d2=MAX(index_d1-psp->non_diag,0); index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_dl+index_ct]= primordial_pk[index_ic1_ic2] * transfer_ic1_nc[index_d1] * transfer_ic2[ptr->index_tt_lensing+index_d2] * factor; index_ct++; } } } } for (index_ct=0; index_ct<psp->ct_size; index_ct++) { /* treat null spectra (C_l^BB of scalars, C_l^pp of tensors, etc. */ if ((_scalars_ && (psp->has_bb == _TRUE_) && (index_ct == psp->index_ct_bb)) || (_tensors_ && (psp->has_pp == _TRUE_) && (index_ct == psp->index_ct_pp)) || (_tensors_ && (psp->has_tp == _TRUE_) && (index_ct == psp->index_ct_tp)) || (_tensors_ && (psp->has_ep == _TRUE_) && (index_ct == psp->index_ct_ep)) || (_tensors_ && (psp->has_dd == _TRUE_) && (index_ct == psp->index_ct_dd)) || (_tensors_ && (psp->has_td == _TRUE_) && (index_ct == psp->index_ct_td)) || (_tensors_ && (psp->has_pd == _TRUE_) && (index_ct == psp->index_ct_pd)) || (_tensors_ && (psp->has_ll == _TRUE_) && (index_ct == psp->index_ct_ll)) || (_tensors_ && (psp->has_tl == _TRUE_) && (index_ct == psp->index_ct_tl)) || (_tensors_ && (psp->has_dl == _TRUE_) && (index_ct == psp->index_ct_dl)) ) { psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = 0.; } /* for non-zero spectra, integrate over q */ else { /* spline the integrand over the whole range of k's */ class_call(array_spline(cl_integrand, cl_integrand_num_columns, ptr->q_size, 0, 1+index_ct, 1+psp->ct_size+index_ct, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); /* Technical point: we will now do a spline integral over the whole range of k's, excepted in the closed (K>0) case. In that case, it is a bad idea to spline over the values of k corresponding to nu<nu_flat_approximation. In this region, nu values are integer values, so the steps dq and dk have some discrete jumps. This makes the spline routine less accurate than a trapezoidal integral with finer sampling. So, in the closed case, we set index_q_spline to ptr->index_q_flat_approximation, to tell the integration routine that below this index, it should treat the integral as a trapezoidal one. For testing, one is free to set index_q_spline to 0, to enforce spline integration everywhere, or to (ptr->q_size-1), to enforce trapezoidal integration everywhere. */ if (pba->sgnK == 1) { index_q_spline = ptr->index_q_flat_approximation; } class_call(array_integrate_all_trapzd_or_spline(cl_integrand, cl_integrand_num_columns, ptr->q_size, index_q_spline, 0, 1+index_ct, 1+psp->ct_size+index_ct, &clvalue, psp->error_message), psp->error_message, psp->error_message); /* in the closed case, instead of an integral, we have a discrete sum. In practice, this does not matter: the previous routine does give a correct approximation of the discrete sum, both in the trapezoidal and spline regions. The only error comes from the first point: the previous routine assumes a weight for the first point which is too small compared to what it would be in the an actual discrete sum. The line below correct this problem in an exact way. */ if (pba->sgnK == 1) { clvalue += cl_integrand[1+index_ct] * ptr->q[0]/ptr->k[0][0]*sqrt(pba->K)/2.; } /* we have the correct C_l now. We can store it in the transfer structure. */ psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = clvalue; } } if (ppt->has_cl_number_count == _TRUE_) { free(transfer_ic1_nc); free(transfer_ic2_nc); } return _SUCCESS_; } /* deprecated functions (since v2.8) */ /** * Matter power spectrum for arbitrary redshift and for all initial conditions. * * This function is deprecated since v2.8. Try using nonlinear_pk_at_z() instead. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param mode Input: linear or logarithmic * @param z Input: redshift * @param output_tot Output: total matter power spectrum P(k) in \f$ Mpc^3 \f$ (linear mode), or its logarithms (logarithmic mode) * @param output_ic Output: for each pair of initial conditions, matter power spectra P(k) in \f$ Mpc^3 \f$ (linear mode), or their logarithms and cross-correlation angles (logarithmic mode) * @param output_cb_tot Output: CDM+baryon power spectrum P_cb(k) in \f$ Mpc^3 \f$ (linear mode), or its logarithms (logarithmic mode) * @param output_cb_ic Output: for each pair of initial conditions, CDM+baryon power spectra P_cb(k) in \f$ Mpc^3 \f$ (linear mode), or their logarithms and cross-correlation angles (logarithmic mode) * @return the error status */ int spectra_pk_at_z( struct background * pba, struct spectra * psp, enum linear_or_logarithmic mode, double z, double * output_tot, /* array with argument output_tot[index_k] (must be already allocated) */ double * output_ic, /* array with argument output_tot[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] (must be already allocated only if more than one initial condition) */ double * output_cb_tot, /* same as output_tot for the baryon+CDM only */ double * output_cb_ic /* same as output_ic for the baryon+CDM only */ ) { fprintf(stderr," -> [WARNING:] You are calling the function spectra_pk_at_z() which is deprecated since v2.8. Try using nonlinear_pk_at_z() instead.\n"); class_call(nonlinear_pks_at_z( pba, psp->pnl, mode, pk_linear, z, output_tot, output_ic, output_cb_tot, output_cb_ic ), psp->pnl->error_message, psp->error_message); return _SUCCESS_; } /** * Matter power spectrum for arbitrary wavenumber, redshift and initial condition. * * This function is deprecated since v2.8. Try using nonlinear_pk_linear_at_k_and_z() instead. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param ppm Input: pointer to primordial structure (used only in the case 0 < k < kmin) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param pk_tot Output: total matter power spectrum P(k) in \f$ Mpc^3 \f$ * @param pk_ic Output: for each pair of initial conditions, matter power spectra P(k) in \f$ Mpc^3\f$ * @param pk_cb_tot Output: b+CDM power spectrum P(k) in \f$ Mpc^3 \f$ * @param pk_cb_ic Output: for each pair of initial conditions, b+CDM power spectra P(k) in \f$ Mpc^3\f$ * @return the error status */ int spectra_pk_at_k_and_z( struct background * pba, struct primordial * ppm, struct spectra * psp, double k, double z, double * pk_tot, /* pointer to a single number (must be already allocated) */ double * pk_ic, /* array of argument pk_ic[index_ic1_ic2] (must be already allocated only if several initial conditions) */ double * pk_cb_tot, /* same as pk_tot for baryon+CDM part only */ double * pk_cb_ic /* same as pk_ic for baryon+CDM part only */ ) { fprintf(stderr," -> [WARNING:] You are calling the function spectra_pk_at_k_and_z() which is deprecated since v2.8. Try using nonlinear_pk_linear_at_k_and_z() instead.\n"); class_call(nonlinear_pks_at_k_and_z(pba, ppm, psp->pnl, pk_linear, k, z, pk_tot, pk_ic, pk_cb_tot, pk_cb_ic), psp->pnl->error_message, psp->error_message); return _SUCCESS_; } /** * Non-linear total matter power spectrum for arbitrary redshift. * * This function is deprecated since v2.8. Try using nonlinear_pk_at_z() instead. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param mode Input: linear or logarithmic * @param z Input: redshift * @param output_tot Output: total matter power spectrum P(k) in \f$ Mpc^3\f$ (linear mode), or its logarithms (logarithmic mode) * @param output_cb_tot Output: b+CDM power spectrum P(k) in \f$ Mpc^3\f$ (linear mode), or its logarithms (logarithmic mode) * @return the error status */ int spectra_pk_nl_at_z( struct background * pba, struct spectra * psp, enum linear_or_logarithmic mode, double z, double * output_tot, /* array with argument output_tot[index_k] (must be already allocated) */ double * output_cb_tot ) { fprintf(stderr," -> [WARNING:] You are calling the function spectra_pk_nl_at_z() which is deprecated since v2.8. Try using nonlinear_pk_at_z() instead.\n"); class_call(nonlinear_pks_at_z(pba, psp->pnl, mode, pk_nonlinear, z, output_tot, NULL, output_cb_tot, NULL ), psp->pnl->error_message, psp->error_message); return _SUCCESS_; } /** * Non-linear total matter power spectrum for arbitrary wavenumber and redshift. * * This function is deprecated since v2.8. Try using nonlinear_pk_at_k_and_z() instead. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param ppm Input: pointer to primordial structure (used only in the case 0 < k < kmin) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param pk_tot Output: total matter power spectrum P(k) in \f$ Mpc^3\f$ * @param pk_cb_tot Output: b+CDM power spectrum P(k) in \f$ Mpc^3\f$ * @return the error status */ int spectra_pk_nl_at_k_and_z( struct background * pba, struct primordial * ppm, struct spectra * psp, double k, double z, double * pk_tot, /* pointer to a single number (must be already allocated) */ double * pk_cb_tot /* same as pk_tot for baryon+CDM only */ ) { fprintf(stderr," -> [WARNING:] You are calling the function spectra_pk_nl_at_k_and_z() which is deprecated since v2.8. Try using nonlinear_pk_at_k_and_z() instead.\n"); class_call(nonlinear_pks_at_k_and_z(pba, ppm, psp->pnl, pk_nonlinear, k, z, pk_tot, NULL, pk_cb_tot, NULL ), psp->pnl->error_message, psp->error_message); return _SUCCESS_; } /** * Return the P(k,z) for a grid of (k_i,z_j) passed in input, * for all available pk types (_m, _cb), * either linear or nonlinear depending on input. * * This function is deprecated since v2.8. Try using nonlinear_pks_at_kvec_and_zvec() instead. * * @param pba Input: pointer to background structure * @param psp Input: pointer to spectra structure * @param kvec Input: array of wavenumbers in ascending order (in 1/Mpc) * @param kvec_size Input: size of array of wavenumbers * @param zvec Input: array of redshifts in arbitrary order * @param zvec_size Input: size of array of redshifts * @param pk_tot_out Output: P(k_i,z_j) for total matter (if available) in Mpc**3 * @param pk_cb_tot_out Output: P_cb(k_i,z_j) for cdm+baryons (if available) in Mpc**3 * @param nonlinear Input: _TRUE_ or _FALSE_ (to output nonlinear or linear P(k,z)) * @return the error status */ int spectra_fast_pk_at_kvec_and_zvec( struct background * pba, struct spectra * psp, double * kvec, int kvec_size, double * zvec, int zvec_size, double * pk_tot_out, // pk_tot_out[index_zvec*kvec_size+index_kvec], // already allocated //(or NULL if user knows there is no _m output) double * pk_cb_tot_out, // idem int nonlinear ) { enum pk_outputs pk_output; fprintf(stderr," -> [WARNING:] You are calling the function spectra_fast_pks_at_kvec_and_zvec() which is deprecated since v2.8. Try using nonlinear_pk_at_kvec_and_zvec() instead.\n"); if (nonlinear == _TRUE_) pk_output = pk_nonlinear; else pk_output = pk_linear; class_call(nonlinear_pks_at_kvec_and_zvec( pba, psp->pnl, pk_output, kvec, kvec_size, zvec, zvec_size, pk_tot_out, pk_cb_tot_out), psp->pnl->error_message, psp->error_message); return _SUCCESS_; } /** * This routine computes sigma(R) given P(k) for total matter power * spectrum (does not check that k_max is large enough) * * This function is deprecated since v2.8. Try using nonlinear_sigmas_at_z() instead. * * @param pba Input: pointer to background structure * @param ppm Input: pointer to primordial structure * @param psp Input: pointer to spectra structure * @param R Input: radius in Mpc * @param z Input: redshift * @param sigma Output: variance in a sphere of radius R (dimensionless) * @return the error status */ int spectra_sigma( struct background * pba, struct primordial * ppm, struct spectra * psp, double R, double z, double * sigma ) { fprintf(stderr," -> [WARNING:] You are calling the function spectra_sigma() which is deprecated since v2.8. Try using nonlinear_sigmas_at_z() instead.\n"); if (psp->pnl->has_pk_m) { class_call(nonlinear_sigma_at_z(pba, psp->pnl, R, z, psp->pnl->index_pk_m, 80., // hardcoded, yes, but the function is deprecated... sigma), psp->pnl->error_message, psp->error_message); } return _SUCCESS_; } /** * This routine computes sigma(R) given P(k) for baryon+cdm power * spectrum (does not check that k_max is large enough) * * This function is deprecated since v2.8. Try using nonlinear_sigmas_at_z() instead. * * @param pba Input: pointer to background structure * @param ppm Input: pointer to primordial structure * @param psp Input: pointer to spectra structure * @param R Input: radius in Mpc * @param z Input: redshift * @param sigma_cb Output: variance in a sphere of radius R (dimensionless) * @return the error status */ int spectra_sigma_cb( struct background * pba, struct primordial * ppm, struct spectra * psp, double R, double z, double * sigma_cb ) { fprintf(stderr," -> [WARNING:] You are calling the function spectra_sigma_cb() which is deprecated since v2.8. Try using nonlinear_sigmas_at_z() instead.\n"); if (psp->pnl->has_pk_cb) { class_call(nonlinear_sigma_at_z(pba, psp->pnl, R, z, psp->pnl->index_pk_cb, 80., // hardcoded, yes, but the function is deprecated... sigma_cb), psp->pnl->error_message, psp->error_message); } return _SUCCESS_; } /* deprecated functions (since v2.1) */ /** * Obsolete function, superseeded by perturb_sources_at_tau() * (at the time of the switch, this function was anyway never used anywhere) * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param z Input: redshift * @param output Output: matter transfer functions * @return the error status */ int spectra_tk_at_z( struct background * pba, struct spectra * psp, double z, double * output /* array with argument output[(index_k*psp->ic_size[index_md]+index_ic)*psp->tr_size+index_tr] (must be already allocated) */ ) { class_stop(psp->error_message, "The function spectra_tk_at_z() is obsolete, use instead perturb_sources_at_tau(), it does the same"); return _SUCCESS_; } /** * Obsolete function, superseeded by perturb_sources_at_tau() * (at the time of the switch, this function was anyway never used anywhere) * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param output Output: matter transfer functions * @return the error status */ int spectra_tk_at_k_and_z( struct background * pba, struct spectra * psp, double k, double z, double * output /* array with argument output[index_ic*psp->tr_size+index_tr] (must be already allocated) */ ) { class_stop(psp->error_message, "The function spectra_tk_at_k_and_z() is obsolete, use instead perturb_sources_at_tau(), it does the same provided that you interpolate its output at some wavenumber k"); return _SUCCESS_; } /* end deprecated functions */
/** @file spectra.c Documented spectra module * * Julien Lesgourgues, 1.11.2019 * * This module computes the harmonic power spectra \f$ C_l^{X} \f$'s * given the transfer functions and the primordial spectra. * * The following functions can be called from other modules: * * -# spectra_init() at the beginning (but after transfer_init()) * -# spectra_cl_at_l() at any time for computing individual \f$ C_l \f$'s at any l * -# spectra_free() at the end */ #include "spectra.h" /** * Anisotropy power spectra \f$ C_l\f$'s for all types, modes and initial conditions. * * This routine evaluates all the \f$C_l\f$'s at a given value of l by * interpolating in the pre-computed table. When relevant, it also * sums over all initial conditions for each mode, and over all modes. * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param l Input: multipole number * @param cl_tot Output: total \f$C_l\f$'s for all types (TT, TE, EE, etc..) * @param cl_md Output: \f$C_l\f$'s for all types (TT, TE, EE, etc..) decomposed mode by mode (scalar, tensor, ...) when relevant * @param cl_md_ic Output: \f$C_l\f$'s for all types (TT, TE, EE, etc..) decomposed by pairs of initial conditions (adiabatic, isocurvatures) for each mode (usually, only for the scalar mode) when relevant * @return the error status */ int spectra_cl_at_l( struct spectra *psp, double l, double *cl_tot, /* array with argument cl_tot[index_ct] (must * be already allocated) */ double **cl_md, /* array with argument * cl_md[index_md][index_ct] (must be already * allocated only if several modes) */ double **cl_md_ic /* array with argument * cl_md_ic[index_md][index_ic1_ic2*ps * p->ct_size+index_ct] (must be * already allocated for a given mode * only if several ic's) */ ) { /** Summary: */ /** - define local variables */ int last_index; int index_md; int index_ic1, index_ic2, index_ic1_ic2; int index_ct; /** - (a) treat case in which there is only one mode and one initial condition. Then, only cl_tot needs to be filled. */ if ((psp->md_size == 1) && (psp->ic_size[0] == 1)) { index_md = 0; if ((int)l <= psp->l[psp->l_size[index_md] - 1]) { /* interpolate at l */ class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ct_size, l, &last_index, cl_tot, psp->ct_size, psp->error_message), psp->error_message, psp->error_message); /* set to zero for the types such that l<l_max */ for (index_ct = 0; index_ct < psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_tot[index_ct] = 0.; } else { for (index_ct = 0; index_ct < psp->ct_size; index_ct++) cl_tot[index_ct] = 0.; } } /** - (b) treat case in which there is only one mode with several initial condition. Fill cl_md_ic[index_md=0] and sum it to get cl_tot. */ if ((psp->md_size == 1) && (psp->ic_size[0] > 1)) { index_md = 0; for (index_ct = 0; index_ct < psp->ct_size; index_ct++) cl_tot[index_ct] = 0.; for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1, index_ic2, psp->ic_size[index_md]); if (((int)l <= psp->l[psp->l_size[index_md] - 1]) && (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_)) { class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ic_ic_size[index_md] * psp->ct_size, l, &last_index, cl_md_ic[index_md], psp->ic_ic_size[index_md] * psp->ct_size, psp->error_message), psp->error_message, psp->error_message); for (index_ct = 0; index_ct < psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_md_ic[index_md][index_ic1_ic2 * psp->ct_size + index_ct] = 0.; } else { for (index_ct = 0; index_ct < psp->ct_size; index_ct++) cl_md_ic[index_md][index_ic1_ic2 * psp->ct_size + index_ct] = 0.; } /* compute cl_tot by summing over cl_md_ic */ for (index_ct = 0; index_ct < psp->ct_size; index_ct++) { if (index_ic1 == index_ic2) cl_tot[index_ct] += cl_md_ic[index_md][index_ic1_ic2 * psp->ct_size + index_ct]; else cl_tot[index_ct] += 2. * cl_md_ic[index_md][index_ic1_ic2 * psp->ct_size + index_ct]; } } } } /** - (c) loop over modes */ if (psp->md_size > 1) { for (index_ct = 0; index_ct < psp->ct_size; index_ct++) cl_tot[index_ct] = 0.; for (index_md = 0; index_md < psp->md_size; index_md++) { /** - --> (c.1.) treat case in which the mode under consideration has only one initial condition. Fill cl_md[index_md]. */ if (psp->ic_size[index_md] == 1) { if ((int)l <= psp->l[psp->l_size[index_md] - 1]) { class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ct_size, l, &last_index, cl_md[index_md], psp->ct_size, psp->error_message), psp->error_message, psp->error_message); for (index_ct = 0; index_ct < psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_md[index_md][index_ct] = 0.; } else { for (index_ct = 0; index_ct < psp->ct_size; index_ct++) cl_md[index_md][index_ct] = 0.; } } /** - --> (c.2.) treat case in which the mode under consideration has several initial conditions. Fill cl_md_ic[index_md] and sum it to get cl_md[index_md] */ if (psp->ic_size[index_md] > 1) { if ((int)l <= psp->l[psp->l_size[index_md] - 1]) { /* interpolate all ic and ct */ class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ic_ic_size[index_md] * psp->ct_size, l, &last_index, cl_md_ic[index_md], psp->ic_ic_size[index_md] * psp->ct_size, psp->error_message), psp->error_message, psp->error_message); /* set to zero some of the components */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1, index_ic2, psp->ic_size[index_md]); for (index_ct = 0; index_ct < psp->ct_size; index_ct++) { if (((int)l > psp->l_max_ct[index_md][index_ct]) || (psp->is_non_zero[index_md][index_ic1_ic2] == _FALSE_)) cl_md_ic[index_md][index_ic1_ic2 * psp->ct_size + index_ct] = 0.; } } } } /* if l was too big, set anyway all components to zero */ else { for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1, index_ic2, psp->ic_size[index_md]); for (index_ct = 0; index_ct < psp->ct_size; index_ct++) { cl_md_ic[index_md][index_ic1_ic2 * psp->ct_size + index_ct] = 0.; } } } } /* sum up all ic for each mode */ for (index_ct = 0; index_ct < psp->ct_size; index_ct++) { cl_md[index_md][index_ct] = 0.; for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1, index_ic2, psp->ic_size[index_md]); if (index_ic1 == index_ic2) cl_md[index_md][index_ct] += cl_md_ic[index_md][index_ic1_ic2 * psp->ct_size + index_ct]; else cl_md[index_md][index_ct] += 2. * cl_md_ic[index_md][index_ic1_ic2 * psp->ct_size + index_ct]; } } } } /** - --> (c.3.) add contribution of cl_md[index_md] to cl_tot */ for (index_ct = 0; index_ct < psp->ct_size; index_ct++) cl_tot[index_ct] += cl_md[index_md][index_ct]; } } return _SUCCESS_; } /** * This routine initializes the spectra structure (in particular, * computes table of anisotropy and Fourier spectra \f$ C_l^{X}, P(k), ... \f$) * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure (will provide H, Omega_m at redshift of interest) * @param ppt Input: pointer to perturbation structure * @param ptr Input: pointer to transfer structure * @param ppm Input: pointer to primordial structure * @param pnl Input: pointer to nonlinear structure * @param psp Output: pointer to initialized spectra structure * @return the error status */ int spectra_init( struct precision *ppr, struct background *pba, struct perturbs *ppt, struct primordial *ppm, struct nonlinear *pnl, struct transfers *ptr, struct spectra *psp ) { /** Summary: */ /** - check that we really want to compute at least one spectrum */ if (ppt->has_cls == _FALSE_) { psp->md_size = 0; if (psp->spectra_verbose > 0) printf("No spectra requested. Spectra module skipped.\n"); return _SUCCESS_; } else { if (psp->spectra_verbose > 0) printf("Computing unlensed harmonic spectra\n"); } /** - initialize indices and allocate some of the arrays in the spectra structure */ class_call(spectra_indices(pba, ppt, ptr, ppm, psp), psp->error_message, psp->error_message); /** - deal with \f$ C_l\f$'s, if any */ if (ppt->has_cls == _TRUE_) { class_call(spectra_cls(pba, ppt, ptr, ppm, psp), psp->error_message, psp->error_message); } else { psp->ct_size = 0; } /** - a pointer to the nonlinear structure is stored in the spectra structure. This odd, unusual and unelegant feature has been introduced in v2.8 in order to keep in use some deprecated functions spectra_pk_...() that are now pointing at new function nonlinear_pk_...(). In the future, if the deprecated functions are removed, it will be possible to remove also this pointer. */ psp->pnl = pnl; return _SUCCESS_; } /** * This routine frees all the memory space allocated by spectra_init(). * * To be called at the end of each run, only when no further calls to * spectra_cls_at_l(), spectra_pk_at_z(), spectra_pk_at_k_and_z() are needed. * * @param psp Input: pointer to spectra structure (which fields must be freed) * @return the error status */ int spectra_free( struct spectra *psp ) { int index_md; if (psp->md_size > 0) { if (psp->ct_size > 0) { for (index_md = 0; index_md < psp->md_size; index_md++) { free(psp->l_max_ct[index_md]); free(psp->cl[index_md]); free(psp->ddcl[index_md]); } free(psp->l); free(psp->l_size); free(psp->l_max_ct); free(psp->l_max); free(psp->cl); free(psp->ddcl); } } for (index_md = 0; index_md < psp->md_size; index_md++) free(psp->is_non_zero[index_md]); free(psp->is_non_zero); free(psp->ic_size); free(psp->ic_ic_size); return _SUCCESS_; } /** * This routine defines indices and allocates tables in the spectra structure * * @param pba Input: pointer to background structure * @param ppt Input: pointer to perturbation structure * @param ptr Input: pointer to transfers structure * @param ppm Input: pointer to primordial structure * @param psp Input/output: pointer to spectra structure * @return the error status */ int spectra_indices( struct background *pba, struct perturbs *ppt, struct transfers *ptr, struct primordial *ppm, struct spectra *psp ) { int index_ct; int index_md; int index_ic1_ic2; psp->md_size = ppt->md_size; if (ppt->has_scalars == _TRUE_) psp->index_md_scalars = ppt->index_md_scalars; class_alloc(psp->ic_size, sizeof(int) * psp->md_size, psp->error_message); class_alloc(psp->ic_ic_size, sizeof(int) * psp->md_size, psp->error_message); class_alloc(psp->is_non_zero, sizeof(short *) * psp->md_size, psp->error_message); for (index_md = 0; index_md < psp->md_size; index_md++) { psp->ic_size[index_md] = ppm->ic_size[index_md]; psp->ic_ic_size[index_md] = ppm->ic_ic_size[index_md]; class_alloc(psp->is_non_zero[index_md], sizeof(short) * psp->ic_ic_size[index_md], psp->error_message); for (index_ic1_ic2 = 0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) psp->is_non_zero[index_md][index_ic1_ic2] = ppm->is_non_zero[index_md][index_ic1_ic2]; } if (ppt->has_cls == _TRUE_) { /* types of C_l's relevant for both scalars and tensors: TT, EE, TE */ index_ct = 0; if (ppt->has_cl_cmb_temperature == _TRUE_) { psp->has_tt = _TRUE_; psp->index_ct_tt = index_ct; index_ct++; } else { psp->has_tt = _FALSE_; } if (ppt->has_cl_cmb_polarization == _TRUE_) { psp->has_ee = _TRUE_; psp->index_ct_ee = index_ct; index_ct++; } else { psp->has_ee = _FALSE_; } if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_cmb_polarization == _TRUE_)) { psp->has_te = _TRUE_; psp->index_ct_te = index_ct; index_ct++; } else { psp->has_te = _FALSE_; } if (ppt->has_cl_cmb_polarization == _TRUE_) { psp->has_bb = _TRUE_; psp->index_ct_bb = index_ct; index_ct++; } else { psp->has_bb = _FALSE_; } /* * types of C_l's relevant only for scalars: phi-phi, T-phi, E-phi, * d-d, T-d */ if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_pp = _TRUE_; psp->index_ct_pp = index_ct; index_ct++; } else { psp->has_pp = _FALSE_; } if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_tp = _TRUE_; psp->index_ct_tp = index_ct; index_ct++; } else { psp->has_tp = _FALSE_; } psp->ct_size = index_ct; if ((ppt->has_cl_cmb_polarization == _TRUE_) && (ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_ep = _TRUE_; psp->index_ct_ep = index_ct; index_ct++; } else { psp->has_ep = _FALSE_; } if ((ppt->has_scalars == _TRUE_) && ((ppt->has_cl_number_count == _TRUE_) || (ppt->has_cl_lensing_potential == _TRUE_))) psp->d_size = ppt->selection_num; else psp->d_size = 0; if ((ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_dd = _TRUE_; psp->index_ct_dd = index_ct; index_ct += (psp->d_size * (psp->d_size + 1) - (psp->d_size - psp->non_diag) * (psp->d_size - 1 - psp->non_diag)) / 2; } else { psp->has_dd = _FALSE_; } /* * the computation of C_l^Td would require a very good sampling of * transfer functions over a wide range, and a huge computation time. * In the current version, we prefer to switch it off, rather than * either slowing down the code considerably, or producing very * inaccurate spectra. * * if ((ppt->has_cl_cmb_temperature == _TRUE_) && * (ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == * _TRUE_)) { psp->has_td = _TRUE_; psp->index_ct_td=index_ct; * index_ct+=psp->d_size; } else { psp->has_td = _FALSE_; } */ psp->has_td = _FALSE_; if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_pd = _TRUE_; psp->index_ct_pd = index_ct; index_ct += psp->d_size; } else { psp->has_pd = _FALSE_; } psp->has_td = _FALSE_; if ((ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_ll = _TRUE_; psp->index_ct_ll = index_ct; index_ct += (psp->d_size * (psp->d_size + 1) - (psp->d_size - psp->non_diag) * (psp->d_size - 1 - psp->non_diag)) / 2; } else { psp->has_ll = _FALSE_; } /* * the computation of C_l^Tl would require a very good sampling of * transfer functions over a wide range, and a huge computation time. * In the current version, we prefer to switch it off, rather than * either slowing down the code considerably, or producing very * inaccurate spectra. * * if ((ppt->has_cl_cmb_temperature == _TRUE_) && * (ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == * _TRUE_)) { psp->has_tl = _TRUE_; psp->index_ct_tl=index_ct; * index_ct+=psp->d_size; } else { psp->has_tl = _FALSE_; } */ psp->has_tl = _FALSE_; if ((ppt->has_cl_number_count == _TRUE_) && (ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_dl = _TRUE_; psp->index_ct_dl = index_ct; index_ct += psp->d_size * psp->d_size - (psp->d_size - psp->non_diag) * (psp->d_size - 1 - psp->non_diag); } else { psp->has_dl = _FALSE_; } psp->ct_size = index_ct; /* * infer from input quantities the l_max for each mode and type, * l_max_ct[index_md][index_type]. Maximize it over index_ct, and * then over index_md. */ class_alloc(psp->l_max, sizeof(int *) * psp->md_size, psp->error_message); class_alloc(psp->l_max_ct, sizeof(int *) * psp->md_size, psp->error_message); for (index_md = 0; index_md < psp->md_size; index_md++) { class_calloc(psp->l_max_ct[index_md], psp->ct_size, sizeof(int), psp->error_message); } if (ppt->has_scalars == _TRUE_) { /* spectra computed up to l_scalar_max */ if (psp->has_tt == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_tt] = ppt->l_scalar_max; if (psp->has_ee == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_ee] = ppt->l_scalar_max; if (psp->has_te == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_te] = ppt->l_scalar_max; if (psp->has_pp == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_pp] = ppt->l_scalar_max; if (psp->has_tp == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_tp] = ppt->l_scalar_max; if (psp->has_ep == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_ep] = ppt->l_scalar_max; /* spectra computed up to l_lss_max */ if (psp->has_dd == _TRUE_) for (index_ct = psp->index_ct_dd; index_ct < psp->index_ct_dd + (psp->d_size * (psp->d_size + 1) - (psp->d_size - psp->non_diag) * (psp->d_size - 1 - psp->non_diag)) / 2; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; if (psp->has_td == _TRUE_) for (index_ct = psp->index_ct_td; index_ct < psp->index_ct_td + psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max, ppt->l_lss_max); if (psp->has_pd == _TRUE_) for (index_ct = psp->index_ct_pd; index_ct < psp->index_ct_pd + psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max, ppt->l_lss_max); if (psp->has_ll == _TRUE_) for (index_ct = psp->index_ct_ll; index_ct < psp->index_ct_ll + (psp->d_size * (psp->d_size + 1) - (psp->d_size - psp->non_diag) * (psp->d_size - 1 - psp->non_diag)) / 2; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; if (psp->has_tl == _TRUE_) for (index_ct = psp->index_ct_tl; index_ct < psp->index_ct_tl + psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max, ppt->l_lss_max); if (psp->has_dl == _TRUE_) for (index_ct = psp->index_ct_dl; index_ct < psp->index_ct_dl + (psp->d_size * psp->d_size - (psp->d_size - psp->non_diag) * (psp->d_size - 1 - psp->non_diag)); index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; } if (ppt->has_tensors == _TRUE_) { /* spectra computed up to l_tensor_max */ if (psp->has_tt == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_tt] = ppt->l_tensor_max; if (psp->has_ee == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_ee] = ppt->l_tensor_max; if (psp->has_te == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_te] = ppt->l_tensor_max; if (psp->has_bb == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_bb] = ppt->l_tensor_max; } /* maximizations */ psp->l_max_tot = 0.; for (index_md = 0; index_md < psp->md_size; index_md++) { psp->l_max[index_md] = 0.; for (index_ct = 0.; index_ct < psp->ct_size; index_ct++) psp->l_max[index_md] = MAX(psp->l_max[index_md], psp->l_max_ct[index_md][index_ct]); psp->l_max_tot = MAX(psp->l_max_tot, psp->l_max[index_md]); } } return _SUCCESS_; } /** * This routine computes a table of values for all harmonic spectra \f$ C_l \f$'s, * given the transfer functions and primordial spectra. * * @param pba Input: pointer to background structure * @param ppt Input: pointer to perturbation structure * @param ptr Input: pointer to transfers structure * @param ppm Input: pointer to primordial structure * @param psp Input/Output: pointer to spectra structure * @return the error status */ int spectra_cls( struct background *pba, struct perturbs *ppt, struct transfers *ptr, struct primordial *ppm, struct spectra *psp ) { /** Summary: */ /** - define local variables */ int index_md; int index_ic1, index_ic2, index_ic1_ic2; int index_l; int index_ct; int cl_integrand_num_columns; double *cl_integrand; /* array with argument * cl_integrand[index_k*cl_integrand_num_colum * ns+1+psp->index_ct] */ double *transfer_ic1; /* array with argument transfer_ic1[index_tt] */ double *transfer_ic2; /* idem */ double *primordial_pk; /* array with argument * primordial_pk[index_ic_ic] */ /* * This code can be optionally compiled with the openmp option for * parallel computation. Inside parallel regions, the use of the command * "return" is forbidden. For error management, instead of "return * _FAILURE_", we will set the variable below to "abort = _TRUE_". This * will lead to a "return _FAILURE_" jus after leaving the parallel * region. */ int abort; /** - allocate pointers to arrays where results will be stored */ class_alloc(psp->l_size, sizeof(int) * psp->md_size, psp->error_message); class_alloc(psp->cl, sizeof(double *) * psp->md_size, psp->error_message); class_alloc(psp->ddcl, sizeof(double *) * psp->md_size, psp->error_message); psp->l_size_max = ptr->l_size_max; class_alloc(psp->l, sizeof(double) * psp->l_size_max, psp->error_message); /** - store values of l */ for (index_l = 0; index_l < psp->l_size_max; index_l++) { psp->l[index_l] = (double)ptr->l[index_l]; } /** - loop over modes (scalar, tensors, etc). For each mode: */ for (index_md = 0; index_md < psp->md_size; index_md++) { /** - --> (a) store number of l values for this mode */ psp->l_size[index_md] = ptr->l_size[index_md]; /** - --> (b) allocate arrays where results will be stored */ class_alloc(psp->cl[index_md], sizeof(double) * psp->l_size[index_md] * psp->ct_size * psp->ic_ic_size[index_md], psp->error_message); class_alloc(psp->ddcl[index_md], sizeof(double) * psp->l_size[index_md] * psp->ct_size * psp->ic_ic_size[index_md], psp->error_message); cl_integrand_num_columns = 1 + psp->ct_size * 2; /* one for k, ct_size * for each type, * ct_size for each * second derivative of * each type */ /** - --> (c) loop over initial conditions */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1, index_ic2, psp->ic_size[index_md]); /* * non-diagonal coefficients should be computed only if * non-zero correlation */ if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { /* initialize error management flag */ abort = _FALSE_; /* beginning of parallel region */ shared(ptr, ppm, index_md, psp, ppt, cl_integrand_num_columns, index_ic1, index_ic2, abort) \ private(tstart, cl_integrand, primordial_pk, transfer_ic1, transfer_ic2, index_l, tstop) { class_alloc_parallel(cl_integrand, ptr->q_size * cl_integrand_num_columns * sizeof(double), psp->error_message); class_alloc_parallel(primordial_pk, psp->ic_ic_size[index_md] * sizeof(double), psp->error_message); class_alloc_parallel(transfer_ic1, ptr->tt_size[index_md] * sizeof(double), psp->error_message); class_alloc_parallel(transfer_ic2, ptr->tt_size[index_md] * sizeof(double), psp->error_message); /** - ---> loop over l values defined in the transfer module. For each l, compute the \f$ C_l\f$'s for all types (TT, TE, ...) by convolving primordial spectra with transfer functions. This elementary task is assigned to spectra_compute_cl() */ for (index_l = 0; index_l < ptr->l_size[index_md]; index_l++) { class_call_parallel(spectra_compute_cl(pba, ppt, ptr, ppm, psp, index_md, index_ic1, index_ic2, index_l, cl_integrand_num_columns, cl_integrand, primordial_pk, transfer_ic1, transfer_ic2), psp->error_message, psp->error_message); } /* end of loop over l */ free(cl_integrand); free(primordial_pk); free(transfer_ic1); free(transfer_ic2); } /* end of parallel region */ if (abort == _TRUE_) return _FAILURE_; } else { /* * set non-diagonal coefficients to zero if pair of ic's * uncorrelated */ for (index_l = 0; index_l < ptr->l_size[index_md]; index_l++) { for (index_ct = 0; index_ct < psp->ct_size; index_ct++) { psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = 0.; } } } } } /** - --> (d) now that for a given mode, all possible \f$ C_l\f$'s have been computed, compute second derivative of the array in which they are stored, in view of spline interpolation. */ class_call(array_spline_table_lines(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ic_ic_size[index_md] * psp->ct_size, psp->ddcl[index_md], _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); } return _SUCCESS_; } /** * This routine computes the \f$ C_l\f$'s for a given mode, pair of initial conditions * and multipole, but for all types (TT, TE...), by convolving the * transfer functions with the primordial spectra. * * @param pba Input: pointer to background structure * @param ppt Input: pointer to perturbation structure * @param ptr Input: pointer to transfers structure * @param ppm Input: pointer to primordial structure * @param psp Input/Output: pointer to spectra structure (result stored here) * @param index_md Input: index of mode under consideration * @param index_ic1 Input: index of first initial condition in the correlator * @param index_ic2 Input: index of second initial condition in the correlator * @param index_l Input: index of multipole under consideration * @param cl_integrand_num_columns Input: number of columns in cl_integrand * @param cl_integrand Input: an allocated workspace * @param primordial_pk Input: table of primordial spectrum values * @param transfer_ic1 Input: table of transfer function values for first initial condition * @param transfer_ic2 Input: table of transfer function values for second initial condition * @return the error status */ int spectra_compute_cl( struct background *pba, struct perturbs *ppt, struct transfers *ptr, struct primordial *ppm, struct spectra *psp, int index_md, int index_ic1, int index_ic2, int index_l, int cl_integrand_num_columns, double *cl_integrand, double *primordial_pk, double *transfer_ic1, double *transfer_ic2 ) { int index_q; int index_tt; int index_ct; int index_d1, index_d2; double k; double clvalue; int index_ic1_ic2; double transfer_ic1_temp = 0.; double transfer_ic2_temp = 0.; double *transfer_ic1_nc = NULL; double *transfer_ic2_nc = NULL; double factor; int index_q_spline = 0; index_ic1_ic2 = index_symmetric_matrix(index_ic1, index_ic2, psp->ic_size[index_md]); if (ppt->has_cl_number_count == _TRUE_) { class_alloc(transfer_ic1_nc, psp->d_size * sizeof(double), psp->error_message); class_alloc(transfer_ic2_nc, psp->d_size * sizeof(double), psp->error_message); } for (index_q = 0; index_q < ptr->q_size; index_q++) { //q = ptr->q[index_q]; k = ptr->k[index_md][index_q]; cl_integrand[index_q * cl_integrand_num_columns + 0] = k; class_call(primordial_spectrum_at_k(ppm, index_md, linear, k, primordial_pk), ppm->error_message, psp->error_message); /* above routine checks that k>0: no possible division by zero below */ for (index_tt = 0; index_tt < ptr->tt_size[index_md]; index_tt++) { transfer_ic1[index_tt] = ptr->transfer[index_md] [((index_ic1 * ptr->tt_size[index_md] + index_tt) * ptr->l_size[index_md] + index_l) * ptr->q_size + index_q]; if (index_ic1 == index_ic2) { transfer_ic2[index_tt] = transfer_ic1[index_tt]; } else { transfer_ic2[index_tt] = ptr->transfer[index_md] [((index_ic2 * ptr->tt_size[index_md] + index_tt) * ptr->l_size[index_md] + index_l) * ptr->q_size + index_q]; } } /* define combinations of transfer functions */ if (ppt->has_cl_cmb_temperature == _TRUE_) { if (_scalars_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t0] + transfer_ic1[ptr->index_tt_t1] + transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t0] + transfer_ic2[ptr->index_tt_t1] + transfer_ic2[ptr->index_tt_t2]; } if (_vectors_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t1] + transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t1] + transfer_ic2[ptr->index_tt_t2]; } if (_tensors_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t2]; } } if (ppt->has_cl_number_count == _TRUE_) { for (index_d1 = 0; index_d1 < psp->d_size; index_d1++) { transfer_ic1_nc[index_d1] = 0.; transfer_ic2_nc[index_d1] = 0.; if (ppt->has_nc_density == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_density + index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_density + index_d1]; } if (ppt->has_nc_rsd == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_rsd + index_d1] + transfer_ic1[ptr->index_tt_d0 + index_d1] + transfer_ic1[ptr->index_tt_d1 + index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_rsd + index_d1] + transfer_ic2[ptr->index_tt_d0 + index_d1] + transfer_ic2[ptr->index_tt_d1 + index_d1]; } if (ppt->has_nc_lens == _TRUE_) { transfer_ic1_nc[index_d1] += psp->l[index_l] * (psp->l[index_l] + 1.) * transfer_ic1[ptr->index_tt_nc_lens + index_d1]; transfer_ic2_nc[index_d1] += psp->l[index_l] * (psp->l[index_l] + 1.) * transfer_ic2[ptr->index_tt_nc_lens + index_d1]; } if (ppt->has_nc_gr == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_nc_g1 + index_d1] + transfer_ic1[ptr->index_tt_nc_g2 + index_d1] + transfer_ic1[ptr->index_tt_nc_g3 + index_d1] + transfer_ic1[ptr->index_tt_nc_g4 + index_d1] + transfer_ic1[ptr->index_tt_nc_g5 + index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_nc_g1 + index_d1] + transfer_ic2[ptr->index_tt_nc_g2 + index_d1] + transfer_ic2[ptr->index_tt_nc_g3 + index_d1] + transfer_ic2[ptr->index_tt_nc_g4 + index_d1] + transfer_ic2[ptr->index_tt_nc_g5 + index_d1]; } } } /* integrand of Cl's */ /* * note: we must integrate * * C_l = int [4 pi dk/k calP(k) Delta1_l(q) Delta2_l(q)] * * where calP(k) is the dimensionless power spectrum equal to a constant * in the scale-invariant case, and to P(k) = A_s k^(ns-1) otherwise * and q=sqrt(k2+K) (scalars) or sqrt(k2+2K) (vectors) or sqrt(k2+3K) * (tensors) * * In the literature, people often rewrite the integral in terms of q * and absorb the Jacobian of the change of variables in a * redefinition of the primodial spectrum. Let us illustrate this for * scalars: * * dk/k = kdk/k2 = qdq/k2 = dq/q * (q/k)^2 = dq/q * [q2/(q2-K)] = q2dq * * 1/[q(q2-K)] * * This factor 1/[q(q2-K)] is commonly absorbed in the definition of * calP. Then one would have * * C_l = int [4 pi q2 dq {A_s k^(ns-1)/[q(q2-K)]} Delta1_l(q) * Delta2_l(q)] * * Sometimes in the literature, the factor (k2-3K)=(q2-4K) present in * the initial conditions of scalar transfer functions (if normalized * to curvature R=1) is also absorbed in the definition of the power * spectrum. Then the curvature power spectrum reads * * calP = (q2-4K)/[q(q2-K)] * (k/k)^ns * * In CLASS we prefer to define calP = (k/k)^ns like in the flat case, * to have the factor (q2-4K) in the initialk conditions, and the * factor 1/[q(q2-K)] doesn't need to be there since we integrate * over dk/k. * * For tensors, the change of variable described above gives a slightly * different result: * * dk/k = kdk/k2 = qdq/k2 = dq/q * (q/k)^2 = dq/q * [q2/(q2-3K)] = q2dq * * 1/[q(q2-3K)] * * But for tensors there are extra curvature-related correction factors * to take into account. See the comments in the perturbation module, * related to initial conditions for tensors. * */ factor = 4. * _PI_ / k; if (psp->has_tt == _TRUE_) cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_tt] = primordial_pk[index_ic1_ic2] * transfer_ic1_temp * transfer_ic2_temp * factor; if (psp->has_ee == _TRUE_) cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_ee] = primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_e] * transfer_ic2[ptr->index_tt_e] * factor; if (psp->has_te == _TRUE_) cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_te] = primordial_pk[index_ic1_ic2] * 0.5 * (transfer_ic1_temp * transfer_ic2[ptr->index_tt_e] + transfer_ic1[ptr->index_tt_e] * transfer_ic2_temp) * factor; if (_tensors_ && (psp->has_bb == _TRUE_)) cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_bb] = primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_b] * transfer_ic2[ptr->index_tt_b] * factor; if (_scalars_ && (psp->has_pp == _TRUE_)) cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_pp] = primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2[ptr->index_tt_lcmb] * factor; if (_scalars_ && (psp->has_tp == _TRUE_)) cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_tp] = primordial_pk[index_ic1_ic2] * 0.5 * (transfer_ic1_temp * transfer_ic2[ptr->index_tt_lcmb] + transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2_temp) * factor; if (_scalars_ && (psp->has_ep == _TRUE_)) cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_ep] = primordial_pk[index_ic1_ic2] * 0.5 * (transfer_ic1[ptr->index_tt_e] * transfer_ic2[ptr->index_tt_lcmb] + transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2[ptr->index_tt_e]) * factor; if (_scalars_ && (psp->has_dd == _TRUE_)) { index_ct = 0; for (index_d1 = 0; index_d1 < psp->d_size; index_d1++) { for (index_d2 = index_d1; index_d2 <= MIN(index_d1 + psp->non_diag, psp->d_size - 1); index_d2++) { cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_dd + index_ct] = primordial_pk[index_ic1_ic2] * transfer_ic1_nc[index_d1] * transfer_ic2_nc[index_d2] * factor; index_ct++; } } } if (_scalars_ && (psp->has_td == _TRUE_)) { for (index_d1 = 0; index_d1 < psp->d_size; index_d1++) { cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_td + index_d1] = primordial_pk[index_ic1_ic2] * 0.5 * (transfer_ic1_temp * transfer_ic2_nc[index_d1] + transfer_ic1_nc[index_d1] * transfer_ic2_temp) * factor; } } if (_scalars_ && (psp->has_pd == _TRUE_)) { for (index_d1 = 0; index_d1 < psp->d_size; index_d1++) { cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_pd + index_d1] = primordial_pk[index_ic1_ic2] * 0.5 * (transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2_nc[index_d1] + transfer_ic1_nc[index_d1] * transfer_ic2[ptr->index_tt_lcmb]) * factor; } } if (_scalars_ && (psp->has_ll == _TRUE_)) { index_ct = 0; for (index_d1 = 0; index_d1 < psp->d_size; index_d1++) { for (index_d2 = index_d1; index_d2 <= MIN(index_d1 + psp->non_diag, psp->d_size - 1); index_d2++) { cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_ll + index_ct] = primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_lensing + index_d1] * transfer_ic2[ptr->index_tt_lensing + index_d2] * factor; index_ct++; } } } if (_scalars_ && (psp->has_tl == _TRUE_)) { for (index_d1 = 0; index_d1 < psp->d_size; index_d1++) { cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_tl + index_d1] = primordial_pk[index_ic1_ic2] * 0.5 * (transfer_ic1_temp * transfer_ic2[ptr->index_tt_lensing + index_d1] + transfer_ic1[ptr->index_tt_lensing + index_d1] * transfer_ic2_temp) * factor; } } if (_scalars_ && (psp->has_dl == _TRUE_)) { index_ct = 0; for (index_d1 = 0; index_d1 < psp->d_size; index_d1++) { for (index_d2 = MAX(index_d1 - psp->non_diag, 0); index_d2 <= MIN(index_d1 + psp->non_diag, psp->d_size - 1); index_d2++) { cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_dl + index_ct] = primordial_pk[index_ic1_ic2] * transfer_ic1_nc[index_d1] * transfer_ic2[ptr->index_tt_lensing + index_d2] * factor; index_ct++; } } } } for (index_ct = 0; index_ct < psp->ct_size; index_ct++) { /* treat null spectra (C_l^BB of scalars, C_l^pp of tensors, etc. */ if ((_scalars_ && (psp->has_bb == _TRUE_) && (index_ct == psp->index_ct_bb)) || (_tensors_ && (psp->has_pp == _TRUE_) && (index_ct == psp->index_ct_pp)) || (_tensors_ && (psp->has_tp == _TRUE_) && (index_ct == psp->index_ct_tp)) || (_tensors_ && (psp->has_ep == _TRUE_) && (index_ct == psp->index_ct_ep)) || (_tensors_ && (psp->has_dd == _TRUE_) && (index_ct == psp->index_ct_dd)) || (_tensors_ && (psp->has_td == _TRUE_) && (index_ct == psp->index_ct_td)) || (_tensors_ && (psp->has_pd == _TRUE_) && (index_ct == psp->index_ct_pd)) || (_tensors_ && (psp->has_ll == _TRUE_) && (index_ct == psp->index_ct_ll)) || (_tensors_ && (psp->has_tl == _TRUE_) && (index_ct == psp->index_ct_tl)) || (_tensors_ && (psp->has_dl == _TRUE_) && (index_ct == psp->index_ct_dl)) ) { psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = 0.; } /* for non-zero spectra, integrate over q */ else { /* spline the integrand over the whole range of k's */ class_call(array_spline(cl_integrand, cl_integrand_num_columns, ptr->q_size, 0, 1 + index_ct, 1 + psp->ct_size + index_ct, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); /* * Technical point: we will now do a spline integral over the * whole range of k's, excepted in the closed (K>0) case. In that * case, it is a bad idea to spline over the values of k * corresponding to nu<nu_flat_approximation. In this region, nu * values are integer values, so the steps dq and dk have some * discrete jumps. This makes the spline routine less accurate * than a trapezoidal integral with finer sampling. So, in the * closed case, we set index_q_spline to * ptr->index_q_flat_approximation, to tell the integration * routine that below this index, it should treat the integral as * a trapezoidal one. For testing, one is free to set * index_q_spline to 0, to enforce spline integration everywhere, * or to (ptr->q_size-1), to enforce trapezoidal integration * everywhere. */ if (pba->sgnK == 1) { index_q_spline = ptr->index_q_flat_approximation; } class_call(array_integrate_all_trapzd_or_spline(cl_integrand, cl_integrand_num_columns, ptr->q_size, index_q_spline, 0, 1 + index_ct, 1 + psp->ct_size + index_ct, &clvalue, psp->error_message), psp->error_message, psp->error_message); /* * in the closed case, instead of an integral, we have a discrete * sum. In practice, this does not matter: the previous routine * does give a correct approximation of the discrete sum, both in * the trapezoidal and spline regions. The only error comes from * the first point: the previous routine assumes a weight for the * first point which is too small compared to what it would be in * the an actual discrete sum. The line below correct this * problem in an exact way. */ if (pba->sgnK == 1) { clvalue += cl_integrand[1 + index_ct] * ptr->q[0] / ptr->k[0][0] * sqrt(pba->K) / 2.; } /* * we have the correct C_l now. We can store it in the transfer * structure. */ psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = clvalue; } } if (ppt->has_cl_number_count == _TRUE_) { free(transfer_ic1_nc); free(transfer_ic2_nc); } return _SUCCESS_; } /* deprecated functions (since v2.8) */ /** * Matter power spectrum for arbitrary redshift and for all initial conditions. * * This function is deprecated since v2.8. Try using nonlinear_pk_at_z() instead. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param mode Input: linear or logarithmic * @param z Input: redshift * @param output_tot Output: total matter power spectrum P(k) in \f$ Mpc^3 \f$ (linear mode), or its logarithms (logarithmic mode) * @param output_ic Output: for each pair of initial conditions, matter power spectra P(k) in \f$ Mpc^3 \f$ (linear mode), or their logarithms and cross-correlation angles (logarithmic mode) * @param output_cb_tot Output: CDM+baryon power spectrum P_cb(k) in \f$ Mpc^3 \f$ (linear mode), or its logarithms (logarithmic mode) * @param output_cb_ic Output: for each pair of initial conditions, CDM+baryon power spectra P_cb(k) in \f$ Mpc^3 \f$ (linear mode), or their logarithms and cross-correlation angles (logarithmic mode) * @return the error status */ int spectra_pk_at_z( struct background *pba, struct spectra *psp, enum linear_or_logarithmic mode, double z, double *output_tot, /* array with argument * output_tot[index_k] (must be * already allocated) */ double *output_ic, /* array with argument * output_tot[index_k * * psp->ic_ic_size[index_md] + * index_ic1_ic2] (must be already * allocated only if more than one * initial condition) */ double *output_cb_tot, /* same as output_tot for the * baryon+CDM only */ double *output_cb_ic /* same as output_ic for the * baryon+CDM only */ ) { fprintf(stderr, " -> [WARNING:] You are calling the function spectra_pk_at_z() which is deprecated since v2.8. Try using nonlinear_pk_at_z() instead.\n"); class_call(nonlinear_pks_at_z( pba, psp->pnl, mode, pk_linear, z, output_tot, output_ic, output_cb_tot, output_cb_ic ), psp->pnl->error_message, psp->error_message); return _SUCCESS_; } /** * Matter power spectrum for arbitrary wavenumber, redshift and initial condition. * * This function is deprecated since v2.8. Try using nonlinear_pk_linear_at_k_and_z() instead. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param ppm Input: pointer to primordial structure (used only in the case 0 < k < kmin) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param pk_tot Output: total matter power spectrum P(k) in \f$ Mpc^3 \f$ * @param pk_ic Output: for each pair of initial conditions, matter power spectra P(k) in \f$ Mpc^3\f$ * @param pk_cb_tot Output: b+CDM power spectrum P(k) in \f$ Mpc^3 \f$ * @param pk_cb_ic Output: for each pair of initial conditions, b+CDM power spectra P(k) in \f$ Mpc^3\f$ * @return the error status */ int spectra_pk_at_k_and_z( struct background *pba, struct primordial *ppm, struct spectra *psp, double k, double z, double *pk_tot, /* pointer to a single number (must * be already allocated) */ double *pk_ic, /* array of argument * pk_ic[index_ic1_ic2] (must be * already allocated only if several * initial conditions) */ double *pk_cb_tot, /* same as pk_tot for * baryon+CDM part only */ double *pk_cb_ic /* same as pk_ic for baryon+CDM part * only */ ) { fprintf(stderr, " -> [WARNING:] You are calling the function spectra_pk_at_k_and_z() which is deprecated since v2.8. Try using nonlinear_pk_linear_at_k_and_z() instead.\n"); class_call(nonlinear_pks_at_k_and_z(pba, ppm, psp->pnl, pk_linear, k, z, pk_tot, pk_ic, pk_cb_tot, pk_cb_ic), psp->pnl->error_message, psp->error_message); return _SUCCESS_; } /** * Non-linear total matter power spectrum for arbitrary redshift. * * This function is deprecated since v2.8. Try using nonlinear_pk_at_z() instead. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param mode Input: linear or logarithmic * @param z Input: redshift * @param output_tot Output: total matter power spectrum P(k) in \f$ Mpc^3\f$ (linear mode), or its logarithms (logarithmic mode) * @param output_cb_tot Output: b+CDM power spectrum P(k) in \f$ Mpc^3\f$ (linear mode), or its logarithms (logarithmic mode) * @return the error status */ int spectra_pk_nl_at_z( struct background *pba, struct spectra *psp, enum linear_or_logarithmic mode, double z, double *output_tot, /* array with argument * output_tot[index_k] (must be * already allocated) */ double *output_cb_tot ) { fprintf(stderr, " -> [WARNING:] You are calling the function spectra_pk_nl_at_z() which is deprecated since v2.8. Try using nonlinear_pk_at_z() instead.\n"); class_call(nonlinear_pks_at_z(pba, psp->pnl, mode, pk_nonlinear, z, output_tot, NULL, output_cb_tot, NULL ), psp->pnl->error_message, psp->error_message); return _SUCCESS_; } /** * Non-linear total matter power spectrum for arbitrary wavenumber and redshift. * * This function is deprecated since v2.8. Try using nonlinear_pk_at_k_and_z() instead. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param ppm Input: pointer to primordial structure (used only in the case 0 < k < kmin) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param pk_tot Output: total matter power spectrum P(k) in \f$ Mpc^3\f$ * @param pk_cb_tot Output: b+CDM power spectrum P(k) in \f$ Mpc^3\f$ * @return the error status */ int spectra_pk_nl_at_k_and_z( struct background *pba, struct primordial *ppm, struct spectra *psp, double k, double z, double *pk_tot, /* pointer to a single number * (must be already * allocated) */ double *pk_cb_tot /* same as pk_tot for * baryon+CDM only */ ) { fprintf(stderr, " -> [WARNING:] You are calling the function spectra_pk_nl_at_k_and_z() which is deprecated since v2.8. Try using nonlinear_pk_at_k_and_z() instead.\n"); class_call(nonlinear_pks_at_k_and_z(pba, ppm, psp->pnl, pk_nonlinear, k, z, pk_tot, NULL, pk_cb_tot, NULL ), psp->pnl->error_message, psp->error_message); return _SUCCESS_; } /** * Return the P(k,z) for a grid of (k_i,z_j) passed in input, * for all available pk types (_m, _cb), * either linear or nonlinear depending on input. * * This function is deprecated since v2.8. Try using nonlinear_pks_at_kvec_and_zvec() instead. * * @param pba Input: pointer to background structure * @param psp Input: pointer to spectra structure * @param kvec Input: array of wavenumbers in ascending order (in 1/Mpc) * @param kvec_size Input: size of array of wavenumbers * @param zvec Input: array of redshifts in arbitrary order * @param zvec_size Input: size of array of redshifts * @param pk_tot_out Output: P(k_i,z_j) for total matter (if available) in Mpc**3 * @param pk_cb_tot_out Output: P_cb(k_i,z_j) for cdm+baryons (if available) in Mpc**3 * @param nonlinear Input: _TRUE_ or _FALSE_ (to output nonlinear or linear P(k,z)) * @return the error status */ int spectra_fast_pk_at_kvec_and_zvec( struct background *pba, struct spectra *psp, double *kvec, int kvec_size, double *zvec, int zvec_size, double *pk_tot_out, //pk_tot_out[index_zvec * kvec_size + index_kvec], //already allocated // (or NULL if user knows there is no _m output) double *pk_cb_tot_out, //idem int nonlinear ) { enum pk_outputs pk_output; fprintf(stderr, " -> [WARNING:] You are calling the function spectra_fast_pks_at_kvec_and_zvec() which is deprecated since v2.8. Try using nonlinear_pk_at_kvec_and_zvec() instead.\n"); if (nonlinear == _TRUE_) pk_output = pk_nonlinear; else pk_output = pk_linear; class_call(nonlinear_pks_at_kvec_and_zvec( pba, psp->pnl, pk_output, kvec, kvec_size, zvec, zvec_size, pk_tot_out, pk_cb_tot_out), psp->pnl->error_message, psp->error_message); return _SUCCESS_; } /** * This routine computes sigma(R) given P(k) for total matter power * spectrum (does not check that k_max is large enough) * * This function is deprecated since v2.8. Try using nonlinear_sigmas_at_z() instead. * * @param pba Input: pointer to background structure * @param ppm Input: pointer to primordial structure * @param psp Input: pointer to spectra structure * @param R Input: radius in Mpc * @param z Input: redshift * @param sigma Output: variance in a sphere of radius R (dimensionless) * @return the error status */ int spectra_sigma( struct background *pba, struct primordial *ppm, struct spectra *psp, double R, double z, double *sigma ) { fprintf(stderr, " -> [WARNING:] You are calling the function spectra_sigma() which is deprecated since v2.8. Try using nonlinear_sigmas_at_z() instead.\n"); if (psp->pnl->has_pk_m) { class_call(nonlinear_sigma_at_z(pba, psp->pnl, R, z, psp->pnl->index_pk_m, 80., //hardcoded, yes, but the function is deprecated... sigma), psp->pnl->error_message, psp->error_message); } return _SUCCESS_; } /** * This routine computes sigma(R) given P(k) for baryon+cdm power * spectrum (does not check that k_max is large enough) * * This function is deprecated since v2.8. Try using nonlinear_sigmas_at_z() instead. * * @param pba Input: pointer to background structure * @param ppm Input: pointer to primordial structure * @param psp Input: pointer to spectra structure * @param R Input: radius in Mpc * @param z Input: redshift * @param sigma_cb Output: variance in a sphere of radius R (dimensionless) * @return the error status */ int spectra_sigma_cb( struct background *pba, struct primordial *ppm, struct spectra *psp, double R, double z, double *sigma_cb ) { fprintf(stderr, " -> [WARNING:] You are calling the function spectra_sigma_cb() which is deprecated since v2.8. Try using nonlinear_sigmas_at_z() instead.\n"); if (psp->pnl->has_pk_cb) { class_call(nonlinear_sigma_at_z(pba, psp->pnl, R, z, psp->pnl->index_pk_cb, 80., //hardcoded, yes, but the function is deprecated... sigma_cb), psp->pnl->error_message, psp->error_message); } return _SUCCESS_; } /* deprecated functions (since v2.1) */ /** * Obsolete function, superseeded by perturb_sources_at_tau() * (at the time of the switch, this function was anyway never used anywhere) * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param z Input: redshift * @param output Output: matter transfer functions * @return the error status */ int spectra_tk_at_z( struct background *pba, struct spectra *psp, double z, double *output /* array with argument * output[(index_k*psp->ic_size[index_md]+inde * x_ic)*psp->tr_size+index_tr] (must be * already allocated) */ ) { class_stop(psp->error_message, "The function spectra_tk_at_z() is obsolete, use instead perturb_sources_at_tau(), it does the same"); return _SUCCESS_; } /** * Obsolete function, superseeded by perturb_sources_at_tau() * (at the time of the switch, this function was anyway never used anywhere) * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param output Output: matter transfer functions * @return the error status */ int spectra_tk_at_k_and_z( struct background *pba, struct spectra *psp, double k, double z, double *output /* array with argument * output[index_ic*psp->tr_size+index_ * tr] (must be already allocated) */ ) { class_stop(psp->error_message, "The function spectra_tk_at_k_and_z() is obsolete, use instead perturb_sources_at_tau(), it does the same provided that you interpolate its output at some wavenumber k"); return _SUCCESS_; } /* end deprecated functions */
/** @file spectra.c Documented spectra module * * Julien Lesgourgues, 1.11.2019 * * This module computes the harmonic power spectra \f$ C_l^{X} \f$'s * given the transfer functions and the primordial spectra. * * The following functions can be called from other modules: * * -# spectra_init() at the beginning (but after transfer_init()) * -# spectra_cl_at_l() at any time for computing individual \f$ C_l \f$'s at any l * -# spectra_free() at the end */ #include "spectra.h" /** * Anisotropy power spectra \f$ C_l\f$'s for all types, modes and initial conditions. * * This routine evaluates all the \f$C_l\f$'s at a given value of l by * interpolating in the pre-computed table. When relevant, it also * sums over all initial conditions for each mode, and over all modes. * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param l Input: multipole number * @param cl_tot Output: total \f$C_l\f$'s for all types (TT, TE, EE, etc..) * @param cl_md Output: \f$C_l\f$'s for all types (TT, TE, EE, etc..) decomposed mode by mode (scalar, tensor, ...) when relevant * @param cl_md_ic Output: \f$C_l\f$'s for all types (TT, TE, EE, etc..) decomposed by pairs of initial conditions (adiabatic, isocurvatures) for each mode (usually, only for the scalar mode) when relevant * @return the error status */ int spectra_cl_at_l( struct spectra *psp, double l, double *cl_tot, /* array with argument cl_tot[index_ct] (must * be already allocated) */ double **cl_md, /* array with argument * cl_md[index_md][index_ct] (must be already * allocated only if several modes) */ double **cl_md_ic /* array with argument * cl_md_ic[index_md][index_ic1_ic2*ps * p->ct_size+index_ct] (must be * already allocated for a given mode * only if several ic's) */ ) { /** Summary: */ /** - define local variables */ int last_index; int index_md; int index_ic1, index_ic2, index_ic1_ic2; int index_ct; /** - (a) treat case in which there is only one mode and one initial condition. Then, only cl_tot needs to be filled. */ if ((psp->md_size == 1) && (psp->ic_size[0] == 1)) { index_md = 0; if ((int)l <= psp->l[psp->l_size[index_md] - 1]) { /* interpolate at l */ class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ct_size, l, &last_index, cl_tot, psp->ct_size, psp->error_message), psp->error_message, psp->error_message); /* set to zero for the types such that l<l_max */ for (index_ct = 0; index_ct < psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_tot[index_ct] = 0.; } else { for (index_ct = 0; index_ct < psp->ct_size; index_ct++) cl_tot[index_ct] = 0.; } } /** - (b) treat case in which there is only one mode with several initial condition. Fill cl_md_ic[index_md=0] and sum it to get cl_tot. */ if ((psp->md_size == 1) && (psp->ic_size[0] > 1)) { index_md = 0; for (index_ct = 0; index_ct < psp->ct_size; index_ct++) cl_tot[index_ct] = 0.; for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1, index_ic2, psp->ic_size[index_md]); if (((int)l <= psp->l[psp->l_size[index_md] - 1]) && (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_)) { class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ic_ic_size[index_md] * psp->ct_size, l, &last_index, cl_md_ic[index_md], psp->ic_ic_size[index_md] * psp->ct_size, psp->error_message), psp->error_message, psp->error_message); for (index_ct = 0; index_ct < psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_md_ic[index_md][index_ic1_ic2 * psp->ct_size + index_ct] = 0.; } else { for (index_ct = 0; index_ct < psp->ct_size; index_ct++) cl_md_ic[index_md][index_ic1_ic2 * psp->ct_size + index_ct] = 0.; } /* compute cl_tot by summing over cl_md_ic */ for (index_ct = 0; index_ct < psp->ct_size; index_ct++) { if (index_ic1 == index_ic2) cl_tot[index_ct] += cl_md_ic[index_md][index_ic1_ic2 * psp->ct_size + index_ct]; else cl_tot[index_ct] += 2. * cl_md_ic[index_md][index_ic1_ic2 * psp->ct_size + index_ct]; } } } } /** - (c) loop over modes */ if (psp->md_size > 1) { for (index_ct = 0; index_ct < psp->ct_size; index_ct++) cl_tot[index_ct] = 0.; for (index_md = 0; index_md < psp->md_size; index_md++) { /** - --> (c.1.) treat case in which the mode under consideration has only one initial condition. Fill cl_md[index_md]. */ if (psp->ic_size[index_md] == 1) { if ((int)l <= psp->l[psp->l_size[index_md] - 1]) { class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ct_size, l, &last_index, cl_md[index_md], psp->ct_size, psp->error_message), psp->error_message, psp->error_message); for (index_ct = 0; index_ct < psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_md[index_md][index_ct] = 0.; } else { for (index_ct = 0; index_ct < psp->ct_size; index_ct++) cl_md[index_md][index_ct] = 0.; } } /** - --> (c.2.) treat case in which the mode under consideration has several initial conditions. Fill cl_md_ic[index_md] and sum it to get cl_md[index_md] */ if (psp->ic_size[index_md] > 1) { if ((int)l <= psp->l[psp->l_size[index_md] - 1]) { /* interpolate all ic and ct */ class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ic_ic_size[index_md] * psp->ct_size, l, &last_index, cl_md_ic[index_md], psp->ic_ic_size[index_md] * psp->ct_size, psp->error_message), psp->error_message, psp->error_message); /* set to zero some of the components */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1, index_ic2, psp->ic_size[index_md]); for (index_ct = 0; index_ct < psp->ct_size; index_ct++) { if (((int)l > psp->l_max_ct[index_md][index_ct]) || (psp->is_non_zero[index_md][index_ic1_ic2] == _FALSE_)) cl_md_ic[index_md][index_ic1_ic2 * psp->ct_size + index_ct] = 0.; } } } } /* if l was too big, set anyway all components to zero */ else { for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1, index_ic2, psp->ic_size[index_md]); for (index_ct = 0; index_ct < psp->ct_size; index_ct++) { cl_md_ic[index_md][index_ic1_ic2 * psp->ct_size + index_ct] = 0.; } } } } /* sum up all ic for each mode */ for (index_ct = 0; index_ct < psp->ct_size; index_ct++) { cl_md[index_md][index_ct] = 0.; for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1, index_ic2, psp->ic_size[index_md]); if (index_ic1 == index_ic2) cl_md[index_md][index_ct] += cl_md_ic[index_md][index_ic1_ic2 * psp->ct_size + index_ct]; else cl_md[index_md][index_ct] += 2. * cl_md_ic[index_md][index_ic1_ic2 * psp->ct_size + index_ct]; } } } } /** - --> (c.3.) add contribution of cl_md[index_md] to cl_tot */ for (index_ct = 0; index_ct < psp->ct_size; index_ct++) cl_tot[index_ct] += cl_md[index_md][index_ct]; } } return _SUCCESS_; } /** * This routine initializes the spectra structure (in particular, * computes table of anisotropy and Fourier spectra \f$ C_l^{X}, P(k), ... \f$) * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure (will provide H, Omega_m at redshift of interest) * @param ppt Input: pointer to perturbation structure * @param ptr Input: pointer to transfer structure * @param ppm Input: pointer to primordial structure * @param pnl Input: pointer to nonlinear structure * @param psp Output: pointer to initialized spectra structure * @return the error status */ int spectra_init( struct precision *ppr, struct background *pba, struct perturbs *ppt, struct primordial *ppm, struct nonlinear *pnl, struct transfers *ptr, struct spectra *psp ) { /** Summary: */ /** - check that we really want to compute at least one spectrum */ if (ppt->has_cls == _FALSE_) { psp->md_size = 0; if (psp->spectra_verbose > 0) printf("No spectra requested. Spectra module skipped.\n"); return _SUCCESS_; } else { if (psp->spectra_verbose > 0) printf("Computing unlensed harmonic spectra\n"); } /** - initialize indices and allocate some of the arrays in the spectra structure */ class_call(spectra_indices(pba, ppt, ptr, ppm, psp), psp->error_message, psp->error_message); /** - deal with \f$ C_l\f$'s, if any */ if (ppt->has_cls == _TRUE_) { class_call(spectra_cls(pba, ppt, ptr, ppm, psp), psp->error_message, psp->error_message); } else { psp->ct_size = 0; } /** - a pointer to the nonlinear structure is stored in the spectra structure. This odd, unusual and unelegant feature has been introduced in v2.8 in order to keep in use some deprecated functions spectra_pk_...() that are now pointing at new function nonlinear_pk_...(). In the future, if the deprecated functions are removed, it will be possible to remove also this pointer. */ psp->pnl = pnl; return _SUCCESS_; } /** * This routine frees all the memory space allocated by spectra_init(). * * To be called at the end of each run, only when no further calls to * spectra_cls_at_l(), spectra_pk_at_z(), spectra_pk_at_k_and_z() are needed. * * @param psp Input: pointer to spectra structure (which fields must be freed) * @return the error status */ int spectra_free( struct spectra *psp ) { int index_md; if (psp->md_size > 0) { if (psp->ct_size > 0) { for (index_md = 0; index_md < psp->md_size; index_md++) { free(psp->l_max_ct[index_md]); free(psp->cl[index_md]); free(psp->ddcl[index_md]); } free(psp->l); free(psp->l_size); free(psp->l_max_ct); free(psp->l_max); free(psp->cl); free(psp->ddcl); } } for (index_md = 0; index_md < psp->md_size; index_md++) free(psp->is_non_zero[index_md]); free(psp->is_non_zero); free(psp->ic_size); free(psp->ic_ic_size); return _SUCCESS_; } /** * This routine defines indices and allocates tables in the spectra structure * * @param pba Input: pointer to background structure * @param ppt Input: pointer to perturbation structure * @param ptr Input: pointer to transfers structure * @param ppm Input: pointer to primordial structure * @param psp Input/output: pointer to spectra structure * @return the error status */ int spectra_indices( struct background *pba, struct perturbs *ppt, struct transfers *ptr, struct primordial *ppm, struct spectra *psp ) { int index_ct; int index_md; int index_ic1_ic2; psp->md_size = ppt->md_size; if (ppt->has_scalars == _TRUE_) psp->index_md_scalars = ppt->index_md_scalars; class_alloc(psp->ic_size, sizeof(int) * psp->md_size, psp->error_message); class_alloc(psp->ic_ic_size, sizeof(int) * psp->md_size, psp->error_message); class_alloc(psp->is_non_zero, sizeof(short *) * psp->md_size, psp->error_message); for (index_md = 0; index_md < psp->md_size; index_md++) { psp->ic_size[index_md] = ppm->ic_size[index_md]; psp->ic_ic_size[index_md] = ppm->ic_ic_size[index_md]; class_alloc(psp->is_non_zero[index_md], sizeof(short) * psp->ic_ic_size[index_md], psp->error_message); for (index_ic1_ic2 = 0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) psp->is_non_zero[index_md][index_ic1_ic2] = ppm->is_non_zero[index_md][index_ic1_ic2]; } if (ppt->has_cls == _TRUE_) { /* types of C_l's relevant for both scalars and tensors: TT, EE, TE */ index_ct = 0; if (ppt->has_cl_cmb_temperature == _TRUE_) { psp->has_tt = _TRUE_; psp->index_ct_tt = index_ct; index_ct++; } else { psp->has_tt = _FALSE_; } if (ppt->has_cl_cmb_polarization == _TRUE_) { psp->has_ee = _TRUE_; psp->index_ct_ee = index_ct; index_ct++; } else { psp->has_ee = _FALSE_; } if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_cmb_polarization == _TRUE_)) { psp->has_te = _TRUE_; psp->index_ct_te = index_ct; index_ct++; } else { psp->has_te = _FALSE_; } if (ppt->has_cl_cmb_polarization == _TRUE_) { psp->has_bb = _TRUE_; psp->index_ct_bb = index_ct; index_ct++; } else { psp->has_bb = _FALSE_; } /* * types of C_l's relevant only for scalars: phi-phi, T-phi, E-phi, * d-d, T-d */ if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_pp = _TRUE_; psp->index_ct_pp = index_ct; index_ct++; } else { psp->has_pp = _FALSE_; } if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_tp = _TRUE_; psp->index_ct_tp = index_ct; index_ct++; } else { psp->has_tp = _FALSE_; } psp->ct_size = index_ct; if ((ppt->has_cl_cmb_polarization == _TRUE_) && (ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_ep = _TRUE_; psp->index_ct_ep = index_ct; index_ct++; } else { psp->has_ep = _FALSE_; } if ((ppt->has_scalars == _TRUE_) && ((ppt->has_cl_number_count == _TRUE_) || (ppt->has_cl_lensing_potential == _TRUE_))) psp->d_size = ppt->selection_num; else psp->d_size = 0; if ((ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_dd = _TRUE_; psp->index_ct_dd = index_ct; index_ct += (psp->d_size * (psp->d_size + 1) - (psp->d_size - psp->non_diag) * (psp->d_size - 1 - psp->non_diag)) / 2; } else { psp->has_dd = _FALSE_; } /* * the computation of C_l^Td would require a very good sampling of * transfer functions over a wide range, and a huge computation time. * In the current version, we prefer to switch it off, rather than * either slowing down the code considerably, or producing very * inaccurate spectra. * * if ((ppt->has_cl_cmb_temperature == _TRUE_) && * (ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == * _TRUE_)) { psp->has_td = _TRUE_; psp->index_ct_td=index_ct; * index_ct+=psp->d_size; } else { psp->has_td = _FALSE_; } */ psp->has_td = _FALSE_; if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_pd = _TRUE_; psp->index_ct_pd = index_ct; index_ct += psp->d_size; } else { psp->has_pd = _FALSE_; } psp->has_td = _FALSE_; if ((ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_ll = _TRUE_; psp->index_ct_ll = index_ct; index_ct += (psp->d_size * (psp->d_size + 1) - (psp->d_size - psp->non_diag) * (psp->d_size - 1 - psp->non_diag)) / 2; } else { psp->has_ll = _FALSE_; } /* * the computation of C_l^Tl would require a very good sampling of * transfer functions over a wide range, and a huge computation time. * In the current version, we prefer to switch it off, rather than * either slowing down the code considerably, or producing very * inaccurate spectra. * * if ((ppt->has_cl_cmb_temperature == _TRUE_) && * (ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == * _TRUE_)) { psp->has_tl = _TRUE_; psp->index_ct_tl=index_ct; * index_ct+=psp->d_size; } else { psp->has_tl = _FALSE_; } */ psp->has_tl = _FALSE_; if ((ppt->has_cl_number_count == _TRUE_) && (ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_dl = _TRUE_; psp->index_ct_dl = index_ct; index_ct += psp->d_size * psp->d_size - (psp->d_size - psp->non_diag) * (psp->d_size - 1 - psp->non_diag); } else { psp->has_dl = _FALSE_; } psp->ct_size = index_ct; /* * infer from input quantities the l_max for each mode and type, * l_max_ct[index_md][index_type]. Maximize it over index_ct, and * then over index_md. */ class_alloc(psp->l_max, sizeof(int *) * psp->md_size, psp->error_message); class_alloc(psp->l_max_ct, sizeof(int *) * psp->md_size, psp->error_message); for (index_md = 0; index_md < psp->md_size; index_md++) { class_calloc(psp->l_max_ct[index_md], psp->ct_size, sizeof(int), psp->error_message); } if (ppt->has_scalars == _TRUE_) { /* spectra computed up to l_scalar_max */ if (psp->has_tt == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_tt] = ppt->l_scalar_max; if (psp->has_ee == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_ee] = ppt->l_scalar_max; if (psp->has_te == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_te] = ppt->l_scalar_max; if (psp->has_pp == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_pp] = ppt->l_scalar_max; if (psp->has_tp == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_tp] = ppt->l_scalar_max; if (psp->has_ep == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_ep] = ppt->l_scalar_max; /* spectra computed up to l_lss_max */ if (psp->has_dd == _TRUE_) for (index_ct = psp->index_ct_dd; index_ct < psp->index_ct_dd + (psp->d_size * (psp->d_size + 1) - (psp->d_size - psp->non_diag) * (psp->d_size - 1 - psp->non_diag)) / 2; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; if (psp->has_td == _TRUE_) for (index_ct = psp->index_ct_td; index_ct < psp->index_ct_td + psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max, ppt->l_lss_max); if (psp->has_pd == _TRUE_) for (index_ct = psp->index_ct_pd; index_ct < psp->index_ct_pd + psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max, ppt->l_lss_max); if (psp->has_ll == _TRUE_) for (index_ct = psp->index_ct_ll; index_ct < psp->index_ct_ll + (psp->d_size * (psp->d_size + 1) - (psp->d_size - psp->non_diag) * (psp->d_size - 1 - psp->non_diag)) / 2; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; if (psp->has_tl == _TRUE_) for (index_ct = psp->index_ct_tl; index_ct < psp->index_ct_tl + psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max, ppt->l_lss_max); if (psp->has_dl == _TRUE_) for (index_ct = psp->index_ct_dl; index_ct < psp->index_ct_dl + (psp->d_size * psp->d_size - (psp->d_size - psp->non_diag) * (psp->d_size - 1 - psp->non_diag)); index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; } if (ppt->has_tensors == _TRUE_) { /* spectra computed up to l_tensor_max */ if (psp->has_tt == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_tt] = ppt->l_tensor_max; if (psp->has_ee == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_ee] = ppt->l_tensor_max; if (psp->has_te == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_te] = ppt->l_tensor_max; if (psp->has_bb == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_bb] = ppt->l_tensor_max; } /* maximizations */ psp->l_max_tot = 0.; for (index_md = 0; index_md < psp->md_size; index_md++) { psp->l_max[index_md] = 0.; for (index_ct = 0.; index_ct < psp->ct_size; index_ct++) psp->l_max[index_md] = MAX(psp->l_max[index_md], psp->l_max_ct[index_md][index_ct]); psp->l_max_tot = MAX(psp->l_max_tot, psp->l_max[index_md]); } } return _SUCCESS_; } /** * This routine computes a table of values for all harmonic spectra \f$ C_l \f$'s, * given the transfer functions and primordial spectra. * * @param pba Input: pointer to background structure * @param ppt Input: pointer to perturbation structure * @param ptr Input: pointer to transfers structure * @param ppm Input: pointer to primordial structure * @param psp Input/Output: pointer to spectra structure * @return the error status */ int spectra_cls( struct background *pba, struct perturbs *ppt, struct transfers *ptr, struct primordial *ppm, struct spectra *psp ) { /** Summary: */ /** - define local variables */ int index_md; int index_ic1, index_ic2, index_ic1_ic2; int index_l; int index_ct; int cl_integrand_num_columns; double *cl_integrand; /* array with argument * cl_integrand[index_k*cl_integrand_num_colum * ns+1+psp->index_ct] */ double *transfer_ic1; /* array with argument transfer_ic1[index_tt] */ double *transfer_ic2; /* idem */ double *primordial_pk; /* array with argument * primordial_pk[index_ic_ic] */ /* * This code can be optionally compiled with the openmp option for * parallel computation. Inside parallel regions, the use of the command * "return" is forbidden. For error management, instead of "return * _FAILURE_", we will set the variable below to "abort = _TRUE_". This * will lead to a "return _FAILURE_" jus after leaving the parallel * region. */ int abort; #ifdef _OPENMP /* instrumentation times */ double tstart, tstop; #endif /** - allocate pointers to arrays where results will be stored */ class_alloc(psp->l_size, sizeof(int) * psp->md_size, psp->error_message); class_alloc(psp->cl, sizeof(double *) * psp->md_size, psp->error_message); class_alloc(psp->ddcl, sizeof(double *) * psp->md_size, psp->error_message); psp->l_size_max = ptr->l_size_max; class_alloc(psp->l, sizeof(double) * psp->l_size_max, psp->error_message); /** - store values of l */ for (index_l = 0; index_l < psp->l_size_max; index_l++) { psp->l[index_l] = (double)ptr->l[index_l]; } /** - loop over modes (scalar, tensors, etc). For each mode: */ for (index_md = 0; index_md < psp->md_size; index_md++) { /** - --> (a) store number of l values for this mode */ psp->l_size[index_md] = ptr->l_size[index_md]; /** - --> (b) allocate arrays where results will be stored */ class_alloc(psp->cl[index_md], sizeof(double) * psp->l_size[index_md] * psp->ct_size * psp->ic_ic_size[index_md], psp->error_message); class_alloc(psp->ddcl[index_md], sizeof(double) * psp->l_size[index_md] * psp->ct_size * psp->ic_ic_size[index_md], psp->error_message); cl_integrand_num_columns = 1 + psp->ct_size * 2; /* one for k, ct_size * for each type, * ct_size for each * second derivative of * each type */ /** - --> (c) loop over initial conditions */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1, index_ic2, psp->ic_size[index_md]); /* * non-diagonal coefficients should be computed only if * non-zero correlation */ if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { /* initialize error management flag */ abort = _FALSE_; /* beginning of parallel region */ #pragma omp parallel \ shared(ptr,ppm,index_md,psp,ppt,cl_integrand_num_columns,index_ic1,index_ic2,abort) \ private(tstart,cl_integrand,primordial_pk,transfer_ic1,transfer_ic2,index_l,tstop) { #ifdef _OPENMP tstart = omp_get_wtime(); #endif class_alloc_parallel(cl_integrand, ptr->q_size * cl_integrand_num_columns * sizeof(double), psp->error_message); class_alloc_parallel(primordial_pk, psp->ic_ic_size[index_md] * sizeof(double), psp->error_message); class_alloc_parallel(transfer_ic1, ptr->tt_size[index_md] * sizeof(double), psp->error_message); class_alloc_parallel(transfer_ic2, ptr->tt_size[index_md] * sizeof(double), psp->error_message); #pragma omp for schedule (dynamic) /** - ---> loop over l values defined in the transfer module. For each l, compute the \f$ C_l\f$'s for all types (TT, TE, ...) by convolving primordial spectra with transfer functions. This elementary task is assigned to spectra_compute_cl() */ for (index_l = 0; index_l < ptr->l_size[index_md]; index_l++) { #pragma omp flush(abort) class_call_parallel(spectra_compute_cl(pba, ppt, ptr, ppm, psp, index_md, index_ic1, index_ic2, index_l, cl_integrand_num_columns, cl_integrand, primordial_pk, transfer_ic1, transfer_ic2), psp->error_message, psp->error_message); } /* end of loop over l */ #ifdef _OPENMP tstop = omp_get_wtime(); if (psp->spectra_verbose > 1) printf("In %s: time spent in parallel region (loop over l's) = %e s for thread %d\n", __func__, tstop - tstart, omp_get_thread_num()); #endif free(cl_integrand); free(primordial_pk); free(transfer_ic1); free(transfer_ic2); } /* end of parallel region */ if (abort == _TRUE_) return _FAILURE_; } else { /* * set non-diagonal coefficients to zero if pair of ic's * uncorrelated */ for (index_l = 0; index_l < ptr->l_size[index_md]; index_l++) { for (index_ct = 0; index_ct < psp->ct_size; index_ct++) { psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = 0.; } } } } } /** - --> (d) now that for a given mode, all possible \f$ C_l\f$'s have been computed, compute second derivative of the array in which they are stored, in view of spline interpolation. */ class_call(array_spline_table_lines(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ic_ic_size[index_md] * psp->ct_size, psp->ddcl[index_md], _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); } return _SUCCESS_; } /** * This routine computes the \f$ C_l\f$'s for a given mode, pair of initial conditions * and multipole, but for all types (TT, TE...), by convolving the * transfer functions with the primordial spectra. * * @param pba Input: pointer to background structure * @param ppt Input: pointer to perturbation structure * @param ptr Input: pointer to transfers structure * @param ppm Input: pointer to primordial structure * @param psp Input/Output: pointer to spectra structure (result stored here) * @param index_md Input: index of mode under consideration * @param index_ic1 Input: index of first initial condition in the correlator * @param index_ic2 Input: index of second initial condition in the correlator * @param index_l Input: index of multipole under consideration * @param cl_integrand_num_columns Input: number of columns in cl_integrand * @param cl_integrand Input: an allocated workspace * @param primordial_pk Input: table of primordial spectrum values * @param transfer_ic1 Input: table of transfer function values for first initial condition * @param transfer_ic2 Input: table of transfer function values for second initial condition * @return the error status */ int spectra_compute_cl( struct background *pba, struct perturbs *ppt, struct transfers *ptr, struct primordial *ppm, struct spectra *psp, int index_md, int index_ic1, int index_ic2, int index_l, int cl_integrand_num_columns, double *cl_integrand, double *primordial_pk, double *transfer_ic1, double *transfer_ic2 ) { int index_q; int index_tt; int index_ct; int index_d1, index_d2; double k; double clvalue; int index_ic1_ic2; double transfer_ic1_temp = 0.; double transfer_ic2_temp = 0.; double *transfer_ic1_nc = NULL; double *transfer_ic2_nc = NULL; double factor; int index_q_spline = 0; index_ic1_ic2 = index_symmetric_matrix(index_ic1, index_ic2, psp->ic_size[index_md]); if (ppt->has_cl_number_count == _TRUE_) { class_alloc(transfer_ic1_nc, psp->d_size * sizeof(double), psp->error_message); class_alloc(transfer_ic2_nc, psp->d_size * sizeof(double), psp->error_message); } for (index_q = 0; index_q < ptr->q_size; index_q++) { //q = ptr->q[index_q]; k = ptr->k[index_md][index_q]; cl_integrand[index_q * cl_integrand_num_columns + 0] = k; class_call(primordial_spectrum_at_k(ppm, index_md, linear, k, primordial_pk), ppm->error_message, psp->error_message); /* above routine checks that k>0: no possible division by zero below */ for (index_tt = 0; index_tt < ptr->tt_size[index_md]; index_tt++) { transfer_ic1[index_tt] = ptr->transfer[index_md] [((index_ic1 * ptr->tt_size[index_md] + index_tt) * ptr->l_size[index_md] + index_l) * ptr->q_size + index_q]; if (index_ic1 == index_ic2) { transfer_ic2[index_tt] = transfer_ic1[index_tt]; } else { transfer_ic2[index_tt] = ptr->transfer[index_md] [((index_ic2 * ptr->tt_size[index_md] + index_tt) * ptr->l_size[index_md] + index_l) * ptr->q_size + index_q]; } } /* define combinations of transfer functions */ if (ppt->has_cl_cmb_temperature == _TRUE_) { if (_scalars_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t0] + transfer_ic1[ptr->index_tt_t1] + transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t0] + transfer_ic2[ptr->index_tt_t1] + transfer_ic2[ptr->index_tt_t2]; } if (_vectors_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t1] + transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t1] + transfer_ic2[ptr->index_tt_t2]; } if (_tensors_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t2]; } } if (ppt->has_cl_number_count == _TRUE_) { for (index_d1 = 0; index_d1 < psp->d_size; index_d1++) { transfer_ic1_nc[index_d1] = 0.; transfer_ic2_nc[index_d1] = 0.; if (ppt->has_nc_density == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_density + index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_density + index_d1]; } if (ppt->has_nc_rsd == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_rsd + index_d1] + transfer_ic1[ptr->index_tt_d0 + index_d1] + transfer_ic1[ptr->index_tt_d1 + index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_rsd + index_d1] + transfer_ic2[ptr->index_tt_d0 + index_d1] + transfer_ic2[ptr->index_tt_d1 + index_d1]; } if (ppt->has_nc_lens == _TRUE_) { transfer_ic1_nc[index_d1] += psp->l[index_l] * (psp->l[index_l] + 1.) * transfer_ic1[ptr->index_tt_nc_lens + index_d1]; transfer_ic2_nc[index_d1] += psp->l[index_l] * (psp->l[index_l] + 1.) * transfer_ic2[ptr->index_tt_nc_lens + index_d1]; } if (ppt->has_nc_gr == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_nc_g1 + index_d1] + transfer_ic1[ptr->index_tt_nc_g2 + index_d1] + transfer_ic1[ptr->index_tt_nc_g3 + index_d1] + transfer_ic1[ptr->index_tt_nc_g4 + index_d1] + transfer_ic1[ptr->index_tt_nc_g5 + index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_nc_g1 + index_d1] + transfer_ic2[ptr->index_tt_nc_g2 + index_d1] + transfer_ic2[ptr->index_tt_nc_g3 + index_d1] + transfer_ic2[ptr->index_tt_nc_g4 + index_d1] + transfer_ic2[ptr->index_tt_nc_g5 + index_d1]; } } } /* integrand of Cl's */ /* * note: we must integrate * * C_l = int [4 pi dk/k calP(k) Delta1_l(q) Delta2_l(q)] * * where calP(k) is the dimensionless power spectrum equal to a constant * in the scale-invariant case, and to P(k) = A_s k^(ns-1) otherwise * and q=sqrt(k2+K) (scalars) or sqrt(k2+2K) (vectors) or sqrt(k2+3K) * (tensors) * * In the literature, people often rewrite the integral in terms of q * and absorb the Jacobian of the change of variables in a * redefinition of the primodial spectrum. Let us illustrate this for * scalars: * * dk/k = kdk/k2 = qdq/k2 = dq/q * (q/k)^2 = dq/q * [q2/(q2-K)] = q2dq * * 1/[q(q2-K)] * * This factor 1/[q(q2-K)] is commonly absorbed in the definition of * calP. Then one would have * * C_l = int [4 pi q2 dq {A_s k^(ns-1)/[q(q2-K)]} Delta1_l(q) * Delta2_l(q)] * * Sometimes in the literature, the factor (k2-3K)=(q2-4K) present in * the initial conditions of scalar transfer functions (if normalized * to curvature R=1) is also absorbed in the definition of the power * spectrum. Then the curvature power spectrum reads * * calP = (q2-4K)/[q(q2-K)] * (k/k)^ns * * In CLASS we prefer to define calP = (k/k)^ns like in the flat case, * to have the factor (q2-4K) in the initialk conditions, and the * factor 1/[q(q2-K)] doesn't need to be there since we integrate * over dk/k. * * For tensors, the change of variable described above gives a slightly * different result: * * dk/k = kdk/k2 = qdq/k2 = dq/q * (q/k)^2 = dq/q * [q2/(q2-3K)] = q2dq * * 1/[q(q2-3K)] * * But for tensors there are extra curvature-related correction factors * to take into account. See the comments in the perturbation module, * related to initial conditions for tensors. * */ factor = 4. * _PI_ / k; if (psp->has_tt == _TRUE_) cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_tt] = primordial_pk[index_ic1_ic2] * transfer_ic1_temp * transfer_ic2_temp * factor; if (psp->has_ee == _TRUE_) cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_ee] = primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_e] * transfer_ic2[ptr->index_tt_e] * factor; if (psp->has_te == _TRUE_) cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_te] = primordial_pk[index_ic1_ic2] * 0.5 * (transfer_ic1_temp * transfer_ic2[ptr->index_tt_e] + transfer_ic1[ptr->index_tt_e] * transfer_ic2_temp) * factor; if (_tensors_ && (psp->has_bb == _TRUE_)) cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_bb] = primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_b] * transfer_ic2[ptr->index_tt_b] * factor; if (_scalars_ && (psp->has_pp == _TRUE_)) cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_pp] = primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2[ptr->index_tt_lcmb] * factor; if (_scalars_ && (psp->has_tp == _TRUE_)) cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_tp] = primordial_pk[index_ic1_ic2] * 0.5 * (transfer_ic1_temp * transfer_ic2[ptr->index_tt_lcmb] + transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2_temp) * factor; if (_scalars_ && (psp->has_ep == _TRUE_)) cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_ep] = primordial_pk[index_ic1_ic2] * 0.5 * (transfer_ic1[ptr->index_tt_e] * transfer_ic2[ptr->index_tt_lcmb] + transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2[ptr->index_tt_e]) * factor; if (_scalars_ && (psp->has_dd == _TRUE_)) { index_ct = 0; for (index_d1 = 0; index_d1 < psp->d_size; index_d1++) { for (index_d2 = index_d1; index_d2 <= MIN(index_d1 + psp->non_diag, psp->d_size - 1); index_d2++) { cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_dd + index_ct] = primordial_pk[index_ic1_ic2] * transfer_ic1_nc[index_d1] * transfer_ic2_nc[index_d2] * factor; index_ct++; } } } if (_scalars_ && (psp->has_td == _TRUE_)) { for (index_d1 = 0; index_d1 < psp->d_size; index_d1++) { cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_td + index_d1] = primordial_pk[index_ic1_ic2] * 0.5 * (transfer_ic1_temp * transfer_ic2_nc[index_d1] + transfer_ic1_nc[index_d1] * transfer_ic2_temp) * factor; } } if (_scalars_ && (psp->has_pd == _TRUE_)) { for (index_d1 = 0; index_d1 < psp->d_size; index_d1++) { cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_pd + index_d1] = primordial_pk[index_ic1_ic2] * 0.5 * (transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2_nc[index_d1] + transfer_ic1_nc[index_d1] * transfer_ic2[ptr->index_tt_lcmb]) * factor; } } if (_scalars_ && (psp->has_ll == _TRUE_)) { index_ct = 0; for (index_d1 = 0; index_d1 < psp->d_size; index_d1++) { for (index_d2 = index_d1; index_d2 <= MIN(index_d1 + psp->non_diag, psp->d_size - 1); index_d2++) { cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_ll + index_ct] = primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_lensing + index_d1] * transfer_ic2[ptr->index_tt_lensing + index_d2] * factor; index_ct++; } } } if (_scalars_ && (psp->has_tl == _TRUE_)) { for (index_d1 = 0; index_d1 < psp->d_size; index_d1++) { cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_tl + index_d1] = primordial_pk[index_ic1_ic2] * 0.5 * (transfer_ic1_temp * transfer_ic2[ptr->index_tt_lensing + index_d1] + transfer_ic1[ptr->index_tt_lensing + index_d1] * transfer_ic2_temp) * factor; } } if (_scalars_ && (psp->has_dl == _TRUE_)) { index_ct = 0; for (index_d1 = 0; index_d1 < psp->d_size; index_d1++) { for (index_d2 = MAX(index_d1 - psp->non_diag, 0); index_d2 <= MIN(index_d1 + psp->non_diag, psp->d_size - 1); index_d2++) { cl_integrand[index_q * cl_integrand_num_columns + 1 + psp->index_ct_dl + index_ct] = primordial_pk[index_ic1_ic2] * transfer_ic1_nc[index_d1] * transfer_ic2[ptr->index_tt_lensing + index_d2] * factor; index_ct++; } } } } for (index_ct = 0; index_ct < psp->ct_size; index_ct++) { /* treat null spectra (C_l^BB of scalars, C_l^pp of tensors, etc. */ if ((_scalars_ && (psp->has_bb == _TRUE_) && (index_ct == psp->index_ct_bb)) || (_tensors_ && (psp->has_pp == _TRUE_) && (index_ct == psp->index_ct_pp)) || (_tensors_ && (psp->has_tp == _TRUE_) && (index_ct == psp->index_ct_tp)) || (_tensors_ && (psp->has_ep == _TRUE_) && (index_ct == psp->index_ct_ep)) || (_tensors_ && (psp->has_dd == _TRUE_) && (index_ct == psp->index_ct_dd)) || (_tensors_ && (psp->has_td == _TRUE_) && (index_ct == psp->index_ct_td)) || (_tensors_ && (psp->has_pd == _TRUE_) && (index_ct == psp->index_ct_pd)) || (_tensors_ && (psp->has_ll == _TRUE_) && (index_ct == psp->index_ct_ll)) || (_tensors_ && (psp->has_tl == _TRUE_) && (index_ct == psp->index_ct_tl)) || (_tensors_ && (psp->has_dl == _TRUE_) && (index_ct == psp->index_ct_dl)) ) { psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = 0.; } /* for non-zero spectra, integrate over q */ else { /* spline the integrand over the whole range of k's */ class_call(array_spline(cl_integrand, cl_integrand_num_columns, ptr->q_size, 0, 1 + index_ct, 1 + psp->ct_size + index_ct, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); /* * Technical point: we will now do a spline integral over the * whole range of k's, excepted in the closed (K>0) case. In that * case, it is a bad idea to spline over the values of k * corresponding to nu<nu_flat_approximation. In this region, nu * values are integer values, so the steps dq and dk have some * discrete jumps. This makes the spline routine less accurate * than a trapezoidal integral with finer sampling. So, in the * closed case, we set index_q_spline to * ptr->index_q_flat_approximation, to tell the integration * routine that below this index, it should treat the integral as * a trapezoidal one. For testing, one is free to set * index_q_spline to 0, to enforce spline integration everywhere, * or to (ptr->q_size-1), to enforce trapezoidal integration * everywhere. */ if (pba->sgnK == 1) { index_q_spline = ptr->index_q_flat_approximation; } class_call(array_integrate_all_trapzd_or_spline(cl_integrand, cl_integrand_num_columns, ptr->q_size, index_q_spline, 0, 1 + index_ct, 1 + psp->ct_size + index_ct, &clvalue, psp->error_message), psp->error_message, psp->error_message); /* * in the closed case, instead of an integral, we have a discrete * sum. In practice, this does not matter: the previous routine * does give a correct approximation of the discrete sum, both in * the trapezoidal and spline regions. The only error comes from * the first point: the previous routine assumes a weight for the * first point which is too small compared to what it would be in * the an actual discrete sum. The line below correct this * problem in an exact way. */ if (pba->sgnK == 1) { clvalue += cl_integrand[1 + index_ct] * ptr->q[0] / ptr->k[0][0] * sqrt(pba->K) / 2.; } /* * we have the correct C_l now. We can store it in the transfer * structure. */ psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = clvalue; } } if (ppt->has_cl_number_count == _TRUE_) { free(transfer_ic1_nc); free(transfer_ic2_nc); } return _SUCCESS_; } /* deprecated functions (since v2.8) */ /** * Matter power spectrum for arbitrary redshift and for all initial conditions. * * This function is deprecated since v2.8. Try using nonlinear_pk_at_z() instead. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param mode Input: linear or logarithmic * @param z Input: redshift * @param output_tot Output: total matter power spectrum P(k) in \f$ Mpc^3 \f$ (linear mode), or its logarithms (logarithmic mode) * @param output_ic Output: for each pair of initial conditions, matter power spectra P(k) in \f$ Mpc^3 \f$ (linear mode), or their logarithms and cross-correlation angles (logarithmic mode) * @param output_cb_tot Output: CDM+baryon power spectrum P_cb(k) in \f$ Mpc^3 \f$ (linear mode), or its logarithms (logarithmic mode) * @param output_cb_ic Output: for each pair of initial conditions, CDM+baryon power spectra P_cb(k) in \f$ Mpc^3 \f$ (linear mode), or their logarithms and cross-correlation angles (logarithmic mode) * @return the error status */ int spectra_pk_at_z( struct background *pba, struct spectra *psp, enum linear_or_logarithmic mode, double z, double *output_tot, /* array with argument * output_tot[index_k] (must be * already allocated) */ double *output_ic, /* array with argument * output_tot[index_k * * psp->ic_ic_size[index_md] + * index_ic1_ic2] (must be already * allocated only if more than one * initial condition) */ double *output_cb_tot, /* same as output_tot for the * baryon+CDM only */ double *output_cb_ic /* same as output_ic for the * baryon+CDM only */ ) { fprintf(stderr, " -> [WARNING:] You are calling the function spectra_pk_at_z() which is deprecated since v2.8. Try using nonlinear_pk_at_z() instead.\n"); class_call(nonlinear_pks_at_z( pba, psp->pnl, mode, pk_linear, z, output_tot, output_ic, output_cb_tot, output_cb_ic ), psp->pnl->error_message, psp->error_message); return _SUCCESS_; } /** * Matter power spectrum for arbitrary wavenumber, redshift and initial condition. * * This function is deprecated since v2.8. Try using nonlinear_pk_linear_at_k_and_z() instead. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param ppm Input: pointer to primordial structure (used only in the case 0 < k < kmin) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param pk_tot Output: total matter power spectrum P(k) in \f$ Mpc^3 \f$ * @param pk_ic Output: for each pair of initial conditions, matter power spectra P(k) in \f$ Mpc^3\f$ * @param pk_cb_tot Output: b+CDM power spectrum P(k) in \f$ Mpc^3 \f$ * @param pk_cb_ic Output: for each pair of initial conditions, b+CDM power spectra P(k) in \f$ Mpc^3\f$ * @return the error status */ int spectra_pk_at_k_and_z( struct background *pba, struct primordial *ppm, struct spectra *psp, double k, double z, double *pk_tot, /* pointer to a single number (must * be already allocated) */ double *pk_ic, /* array of argument * pk_ic[index_ic1_ic2] (must be * already allocated only if several * initial conditions) */ double *pk_cb_tot, /* same as pk_tot for * baryon+CDM part only */ double *pk_cb_ic /* same as pk_ic for baryon+CDM part * only */ ) { fprintf(stderr, " -> [WARNING:] You are calling the function spectra_pk_at_k_and_z() which is deprecated since v2.8. Try using nonlinear_pk_linear_at_k_and_z() instead.\n"); class_call(nonlinear_pks_at_k_and_z(pba, ppm, psp->pnl, pk_linear, k, z, pk_tot, pk_ic, pk_cb_tot, pk_cb_ic), psp->pnl->error_message, psp->error_message); return _SUCCESS_; } /** * Non-linear total matter power spectrum for arbitrary redshift. * * This function is deprecated since v2.8. Try using nonlinear_pk_at_z() instead. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param mode Input: linear or logarithmic * @param z Input: redshift * @param output_tot Output: total matter power spectrum P(k) in \f$ Mpc^3\f$ (linear mode), or its logarithms (logarithmic mode) * @param output_cb_tot Output: b+CDM power spectrum P(k) in \f$ Mpc^3\f$ (linear mode), or its logarithms (logarithmic mode) * @return the error status */ int spectra_pk_nl_at_z( struct background *pba, struct spectra *psp, enum linear_or_logarithmic mode, double z, double *output_tot, /* array with argument * output_tot[index_k] (must be * already allocated) */ double *output_cb_tot ) { fprintf(stderr, " -> [WARNING:] You are calling the function spectra_pk_nl_at_z() which is deprecated since v2.8. Try using nonlinear_pk_at_z() instead.\n"); class_call(nonlinear_pks_at_z(pba, psp->pnl, mode, pk_nonlinear, z, output_tot, NULL, output_cb_tot, NULL ), psp->pnl->error_message, psp->error_message); return _SUCCESS_; } /** * Non-linear total matter power spectrum for arbitrary wavenumber and redshift. * * This function is deprecated since v2.8. Try using nonlinear_pk_at_k_and_z() instead. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param ppm Input: pointer to primordial structure (used only in the case 0 < k < kmin) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param pk_tot Output: total matter power spectrum P(k) in \f$ Mpc^3\f$ * @param pk_cb_tot Output: b+CDM power spectrum P(k) in \f$ Mpc^3\f$ * @return the error status */ int spectra_pk_nl_at_k_and_z( struct background *pba, struct primordial *ppm, struct spectra *psp, double k, double z, double *pk_tot, /* pointer to a single number * (must be already * allocated) */ double *pk_cb_tot /* same as pk_tot for * baryon+CDM only */ ) { fprintf(stderr, " -> [WARNING:] You are calling the function spectra_pk_nl_at_k_and_z() which is deprecated since v2.8. Try using nonlinear_pk_at_k_and_z() instead.\n"); class_call(nonlinear_pks_at_k_and_z(pba, ppm, psp->pnl, pk_nonlinear, k, z, pk_tot, NULL, pk_cb_tot, NULL ), psp->pnl->error_message, psp->error_message); return _SUCCESS_; } /** * Return the P(k,z) for a grid of (k_i,z_j) passed in input, * for all available pk types (_m, _cb), * either linear or nonlinear depending on input. * * This function is deprecated since v2.8. Try using nonlinear_pks_at_kvec_and_zvec() instead. * * @param pba Input: pointer to background structure * @param psp Input: pointer to spectra structure * @param kvec Input: array of wavenumbers in ascending order (in 1/Mpc) * @param kvec_size Input: size of array of wavenumbers * @param zvec Input: array of redshifts in arbitrary order * @param zvec_size Input: size of array of redshifts * @param pk_tot_out Output: P(k_i,z_j) for total matter (if available) in Mpc**3 * @param pk_cb_tot_out Output: P_cb(k_i,z_j) for cdm+baryons (if available) in Mpc**3 * @param nonlinear Input: _TRUE_ or _FALSE_ (to output nonlinear or linear P(k,z)) * @return the error status */ int spectra_fast_pk_at_kvec_and_zvec( struct background *pba, struct spectra *psp, double *kvec, int kvec_size, double *zvec, int zvec_size, double *pk_tot_out, //pk_tot_out[index_zvec * kvec_size + index_kvec], //already allocated // (or NULL if user knows there is no _m output) double *pk_cb_tot_out, //idem int nonlinear ) { enum pk_outputs pk_output; fprintf(stderr, " -> [WARNING:] You are calling the function spectra_fast_pks_at_kvec_and_zvec() which is deprecated since v2.8. Try using nonlinear_pk_at_kvec_and_zvec() instead.\n"); if (nonlinear == _TRUE_) pk_output = pk_nonlinear; else pk_output = pk_linear; class_call(nonlinear_pks_at_kvec_and_zvec( pba, psp->pnl, pk_output, kvec, kvec_size, zvec, zvec_size, pk_tot_out, pk_cb_tot_out), psp->pnl->error_message, psp->error_message); return _SUCCESS_; } /** * This routine computes sigma(R) given P(k) for total matter power * spectrum (does not check that k_max is large enough) * * This function is deprecated since v2.8. Try using nonlinear_sigmas_at_z() instead. * * @param pba Input: pointer to background structure * @param ppm Input: pointer to primordial structure * @param psp Input: pointer to spectra structure * @param R Input: radius in Mpc * @param z Input: redshift * @param sigma Output: variance in a sphere of radius R (dimensionless) * @return the error status */ int spectra_sigma( struct background *pba, struct primordial *ppm, struct spectra *psp, double R, double z, double *sigma ) { fprintf(stderr, " -> [WARNING:] You are calling the function spectra_sigma() which is deprecated since v2.8. Try using nonlinear_sigmas_at_z() instead.\n"); if (psp->pnl->has_pk_m) { class_call(nonlinear_sigma_at_z(pba, psp->pnl, R, z, psp->pnl->index_pk_m, 80., //hardcoded, yes, but the function is deprecated... sigma), psp->pnl->error_message, psp->error_message); } return _SUCCESS_; } /** * This routine computes sigma(R) given P(k) for baryon+cdm power * spectrum (does not check that k_max is large enough) * * This function is deprecated since v2.8. Try using nonlinear_sigmas_at_z() instead. * * @param pba Input: pointer to background structure * @param ppm Input: pointer to primordial structure * @param psp Input: pointer to spectra structure * @param R Input: radius in Mpc * @param z Input: redshift * @param sigma_cb Output: variance in a sphere of radius R (dimensionless) * @return the error status */ int spectra_sigma_cb( struct background *pba, struct primordial *ppm, struct spectra *psp, double R, double z, double *sigma_cb ) { fprintf(stderr, " -> [WARNING:] You are calling the function spectra_sigma_cb() which is deprecated since v2.8. Try using nonlinear_sigmas_at_z() instead.\n"); if (psp->pnl->has_pk_cb) { class_call(nonlinear_sigma_at_z(pba, psp->pnl, R, z, psp->pnl->index_pk_cb, 80., //hardcoded, yes, but the function is deprecated... sigma_cb), psp->pnl->error_message, psp->error_message); } return _SUCCESS_; } /* deprecated functions (since v2.1) */ /** * Obsolete function, superseeded by perturb_sources_at_tau() * (at the time of the switch, this function was anyway never used anywhere) * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param z Input: redshift * @param output Output: matter transfer functions * @return the error status */ int spectra_tk_at_z( struct background *pba, struct spectra *psp, double z, double *output /* array with argument * output[(index_k*psp->ic_size[index_md]+inde * x_ic)*psp->tr_size+index_tr] (must be * already allocated) */ ) { class_stop(psp->error_message, "The function spectra_tk_at_z() is obsolete, use instead perturb_sources_at_tau(), it does the same"); return _SUCCESS_; } /** * Obsolete function, superseeded by perturb_sources_at_tau() * (at the time of the switch, this function was anyway never used anywhere) * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param output Output: matter transfer functions * @return the error status */ int spectra_tk_at_k_and_z( struct background *pba, struct spectra *psp, double k, double z, double *output /* array with argument * output[index_ic*psp->tr_size+index_ * tr] (must be already allocated) */ ) { class_stop(psp->error_message, "The function spectra_tk_at_k_and_z() is obsolete, use instead perturb_sources_at_tau(), it does the same provided that you interpolate its output at some wavenumber k"); return _SUCCESS_; } /* end deprecated functions */
tiny_exr_loader.h
/* Copyright (c) 2014 - 2018, Syoyo Fujita and many contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- #ifndef TINYEXR_H_ #define TINYEXR_H_ // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif // Use embedded miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-5) #define TINYEXR_ERROR_CANT_OPEN_FILE (-6) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-7) #define TINYEXR_ERROR_INVALID_HEADER (-8) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-9) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024) #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 int tiled; // tile format image int long_name; // long name attribute int non_image; // deep image(EXR 2.0) int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; int data_window[4]; int display_window[4]; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute *custom_attributes; // array of EXRAttribute. size = // `num_custom_attributes`. EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { to be removed. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Free's internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Free's internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Free's error message extern void FreeEXRErrorMessage(const char *msg); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if succes. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEIFNED #define TINYEXR_IMPLEMENTATION_DEIFNED #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <iostream> #include <sstream> #include <limits> #include <string> #include <vector> #if __cplusplus > 199711L // C++11 #include <cstdint> #endif // __cplusplus > 199711L #ifdef _OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #include "zfp.h" #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #if TINYEXR_USE_MINIZ namespace miniz { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #pragma clang diagnostic ignored "-Wundef" #if __has_warning("-Wcomma") #pragma clang diagnostic ignored "-Wcomma" #endif #if __has_warning("-Wmacro-redefined") #pragma clang diagnostic ignored "-Wmacro-redefined" #endif #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #if __has_warning("-Wtautological-constant-compare") #pragma clang diagnostic ignored "-Wtautological-constant-compare" #endif #endif /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <[email protected]>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks [email protected]) which could cause locate files to not find files. This bug would only have occured in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From [email protected] and [email protected] - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <[email protected]> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED //#include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. #define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) //#include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void(*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t(*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t(*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int(*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool(*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; //#include <assert.h> //#include <string.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c }; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } // static void *def_realloc_func(void *opaque, void *address, size_t items, // size_t size) { // (void)opaque, (void)address, (void)items, (void)size; // return MZ_REALLOC(address, items * size); //} const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = { { MZ_OK, "" }, { MZ_STREAM_END, "stream end" }, { MZ_NEED_DICT, "need dictionary" }, { MZ_ERRNO, "file error" }, { MZ_STREAM_ERROR, "stream error" }, { MZ_DATA_ERROR, "data error" }, { MZ_MEM_ERROR, "out of memory" }, { MZ_BUF_ERROR, "buf error" }, { MZ_VERSION_ERROR, "version error" }, { MZ_PARAM_ERROR, "parameter error" } }; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 }; static const int s_length_extra[31] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0 }; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0 }; static const int s_dist_extra[32] = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 }; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; static const int s_min_table_sizes[3] = { 257, 1, 4 }; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit : r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285 }; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0 }; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17 }; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 }; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 }; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13 }; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // [email protected], Jyrki Katajainen, [email protected], November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF }; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 }; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 }; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init( pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = { 0x00, 0x00, 0x04, 0x02, 0x06 }; mz_uint8 pnghdr[41] = { 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54 }; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #error "No arvhive APIs" #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename( mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh( mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare( const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment( mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room( pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer( pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef _MSC_VER #pragma warning(pop) #endif } // namespace miniz #else // Reuse MINIZ_LITTE_ENDIAN macro #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #endif // TINYEXR_USE_MINIZ // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static void SetErrorMessage(const std::string &msg, const char **err) { if (err) { #ifdef _WIN32 (*err) = _strdup(msg.c_str()); #else (*err) = strdup(msg.c_str()); #endif } } static const int kEXRVersionSize = 8; static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; } static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } static void cpy4(int *dst_val, const int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(unsigned int *dst_val, const unsigned int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(float *dst_val, const float *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void swap4(unsigned int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } #if 0 static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } #endif static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = { 113 << 23 }; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = { 0 }; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s) = std::string(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { if ((*type).compare("string") == 0) { // Accept empty string attribute. marker += sizeof(uint32_t); size -= sizeof(uint32_t); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t); data->resize(1); (*data)[0] = '\0'; return true; } else { return false; } } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen)); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; int data_window[4]; int line_order; int display_window[4]; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; void clear() { channels.clear(); attributes.clear(); data_window[0] = 0; data_window[1] = 0; data_window[2] = 0; data_window[3] = 0; line_order = 0; display_window[0] = 0; display_window[1] = 0; display_window[2] = 0; display_window[3] = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; } } HeaderInfo; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } const unsigned char *data_end = reinterpret_cast<const unsigned char *>(p) + 16; if (data_end >= (data.data() + data.size())) { return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling)); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling)); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (miniz::MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressable run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; if (0 >(maxLength -= count)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static void DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); assert(ret == static_cast<int>(uncompressed_size)); (void)ret; // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierachical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- int len : 8; // code length 0 int lit : 24; // lit p size int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // std::vector<int> hlink(HUF_ENCSIZE); std::vector<long long *> fHeap(HUF_ENCSIZE); *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); std::vector<long long> scode(HUF_ENCSIZE); memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode > ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { int *p = pl->p; pl->p = new int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #if 0 #define getCode(po, rlc, c, lc, in, out, ob, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ /* TinyEXR issue 78 */ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } #else static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in, const char *in_end, unsigned short *&out, const unsigned short *ob, const unsigned short *oe) { (void)ob; if (po == rlc) { if (lc < 8) { /* TinyEXR issue 78 */ if ((in + 1) >= in_end) { return false; } getChar(c, lc, in); } lc -= 8; unsigned char cs = (c >> lc); if (out + cs > oe) return false; // Bounds check for safety if ((out - 1) <= ob) return false; unsigned short s = out[-1]; while (cs-- > 0) *out++ = s; } else if (out < oe) { *out++ = po; } else { return false; } return true; } #endif // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; // begin unsigned short *oe = out + no; // end const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; // std::cout << "lit = " << pl.lit << std::endl; // std::cout << "rlc = " << rlc << std::endl; // std::cout << "c = " << c << std::endl; // std::cout << "lc = " << lc << std::endl; // std::cout << "in = " << in << std::endl; // std::cout << "out = " << out << std::endl; // std::cout << "oe = " << oe << std::endl; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) { return false; } break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(std::vector<long long> &freq, const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; std::vector<long long> freq(HUF_ENCSIZE); countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq.data(), &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq.data(), im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, std::vector<unsigned short> *raw) { if (nCompressed == 0) { if (raw->size() != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(), raw->data()); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap.data(), minNonZero, maxNonZero); std::vector<unsigned short> lut(USHORT_RANGE); unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data()); applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSize) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap.data(), 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr)); // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } std::vector<unsigned short> lut(USHORT_RANGE); memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data()); // // Huffman decoding // int length; // length = *(reinterpret_cast<const int *>(ptr)); tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; int precision; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0f; } }; bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) && (attributes[i].size == 1)) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; } } if (!foundType) { return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else { assert(0); } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, int num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = dst_width * dst_num_lines * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((dst_width & 3U) || (dst_num_lines & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, dst_width, dst_num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = dst_width * dst_num_lines; for (int c = 0; c < num_channels; c++) { // decompress 4x4 pixel block. for (int y = 0; y < dst_num_lines; y += 4) { for (int x = 0; x < dst_width; x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * dst_width + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((width & 3U) || (num_lines & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, width, num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = width * num_lines; for (int c = 0; c < num_channels; c++) { // compress 4x4 pixel block. for (int y = 0; y < num_lines; y += 4) { for (int x = 0; x < width; x += 4) { float fblock[16]; for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * width + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = zfp_stream_compressed_size(zfp); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // // TODO(syoyo): Refactor function arguments. static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) { // Invalid input #90 return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); assert(ret); (void)ret; // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; // hf.u = line_ptr[u]; // use `cpy` to avoid unaligned memory access when compiler's // optimization is on. tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len)); // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, attributes, num_attributes)) { assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // address may not be aliged. use byte-wise copy for safety.#76 // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { if (reinterpret_cast<const unsigned char *>(line_ptr + u) >= (data_ptr + data_len)) { // Corrupsed data? return false; } unsigned int val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } return true; } static void DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { assert(tile_offset_x * tile_size_x < data_width); assert(tile_offset_y * tile_size_y < data_height); // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { // ??? return false; } } return true; } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; info->data_window[0] = 0; info->data_window[1] = 0; info->data_window[2] = 0; info->data_window[3] = 0; info->line_order = 0; // @fixme info->display_window[0] = 0; info->display_window[1] = 0; info->display_window[2] = 0; info->display_window[3] = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) { if (0 == size) { if (err) { (*err) += "Insufficient data size for attributes.\n"; } return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { if (err) { (*err) += "Failed to read attribute.\n"; } return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (version->tiled && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) += "Failed to parse channel info.\n"; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) += "# of channels is zero.\n"; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window[0], &data.at(0), sizeof(int)); memcpy(&info->data_window[1], &data.at(4), sizeof(int)); memcpy(&info->data_window[2], &data.at(8), sizeof(int)); memcpy(&info->data_window[3], &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3])); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window[0], &data.at(0), sizeof(int)); memcpy(&info->display_window[1], &data.at(4), sizeof(int)); memcpy(&info->display_window[2], &data.at(8), sizeof(int)); memcpy(&info->display_window[3], &data.at(12), sizeof(int)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[3])); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio)); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[1])); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_width)); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count)); } } else { // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window[0] = info.display_window[0]; exr_header->display_window[1] = info.display_window[1]; exr_header->display_window[2] = info.display_window[2]; exr_header->display_window[3] = info.display_window[3]; exr_header->data_window[0] = info.data_window[0]; exr_header->data_window[1] = info.data_window[1]; exr_header->data_window[2] = info.data_window[2]; exr_header->data_window[3] = info.data_window[3]; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); if (exr_header->num_custom_attributes > 0) { // TODO(syoyo): Report warning when # of attributes exceeds // `TINYEXR_MAX_CUSTOM_ATTRIBUTES` if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) { exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES; } exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc( sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes))); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy poiner exr_header->custom_attributes[i].value = info.attributes[i].value; } } else { exr_header->custom_attributes = NULL; } exr_header->header_len = info.header_len; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety. if (exr_header->tiled) { size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) if (offsets[tile_idx] + sizeof(int) * 5 > size) { if (err) { (*err) += "Insufficient data size.\n"; } return TINYEXR_ERROR_INVALID_DATA; } size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3])); // @todo{ LoD } if (tile_coordinates[2] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } if (tile_coordinates[3] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len < 4 || size_t(data_len) > data_size) { if (err) { (*err) += "Insufficient data length.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; exr_image->num_tiles = static_cast<int>(num_tiles); } } else { // scanline format exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (size_t(data_len) > data_size) { invalid_data = true; } else { int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window[3] + 1)); int num_lines = end_line_no - line_no; // assert(num_lines > 0); if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y line_no -= exr_header->data_window[1]; if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } } // omp parallel } if (invalid_data) { return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0]; if (data_width >= std::numeric_limits<int>::max()) { // Issue 63 tinyexr::SetErrorMessage("Invalid data window value", err); return TINYEXR_ERROR_INVALID_DATA; } data_width++; int data_height = exr_header->data_window[3] - exr_header->data_window[1]; if (data_height >= std::numeric_limits<int>::max()) { tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } data_height++; if ((data_width < 0) || (data_height < 0)) { tinyexr::SetErrorMessage("data window or data height is negative.", err); return TINYEXR_ERROR_INVALID_DATA; } // Read offset tables. size_t num_blocks = 0; if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); } else if (exr_header->tiled) { // @todo { LoD } size_t num_x_tiles = static_cast<size_t>(data_width) / static_cast<size_t>(exr_header->tile_size_x); if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) < static_cast<size_t>(data_width)) { num_x_tiles++; } size_t num_y_tiles = static_cast<size_t>(data_height) / static_cast<size_t>(exr_header->tile_size_y); if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) < static_cast<size_t>(data_height)) { num_y_tiles++; } num_blocks = num_x_tiles * num_y_tiles; } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } } std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks); for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; // Issue #81 if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { tinyexr::SetErrorMessage( "Cannot reconstruct lineOffset table in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } } } { std::string e; int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } // release memory(if exists) if ((exr_header->num_channels > 0) && exr_image && exr_image->images) { for (size_t c = 0; c < size_t(exr_header->num_channels); c++) { if (exr_image->images[c]) { free(exr_image->images[c]); exr_image->images[c] = NULL; } } free(exr_image->images); exr_image->images = NULL; } } return ret; } } } // namespace tinyexr int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if ((idxA == 0) && (idxR == -1) && (idxG == -1) && (idxB == -1)) { // Alpha channel only. if (exr_header.tiled) { // todo.implement this } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { tinyexr::SetErrorMessage( "Invalid argument. `memory` or `exr_header` argument is null in " "ParseEXRHeaderFromMemory()", err); // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Insufficient header/data size.\n", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } } ConvertHeader(exr_header, info); // transfoer `tiled` from version. exr_header->tiled = version->tiled; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Failed to parse EXR version", err); return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize < 16) { tinyexr::SetErrorMessage("File size too short " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } size_t SaveEXRImageToMemory(const EXRImage *exr_image, const EXRHeader *exr_header, unsigned char **memory_out, const char **err) { if (exr_image == NULL || memory_out == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err); return 0; // @fixme } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #endif #if TINYEXR_USE_ZFP for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) { if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) { tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression", err); return 0; } } #endif std::vector<unsigned char> memory; // Header { const char header[] = { 0x76, 0x2f, 0x31, 0x01 }; memory.insert(memory.end(), header, header + 4); } // Version, scanline. { char marker[] = { 2, 0, 0, 0 }; /* @todo if (exr_header->tiled) { marker[1] |= 0x2; } if (exr_header->long_name) { marker[1] |= 0x4; } if (exr_header->non_image) { marker[1] |= 0x8; } if (exr_header->multipart) { marker[1] |= 0x10; } */ memory.insert(memory.end(), marker, marker + 4); } int num_scanlines = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } // Write attributes. std::vector<tinyexr::ChannelInfo> channels; { std::vector<unsigned char> data; for (int c = 0; c < exr_header->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_header->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_header->channels[c].name); channels.push_back(info); } tinyexr::WriteChannelInfo(data, channels); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_header->compression_type; tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp)); tinyexr::WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char *>(&comp), 1); } { int data[4] = { 0, 0, exr_image->width - 1, exr_image->height - 1 }; tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3])); tinyexr::WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); tinyexr::WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { float aspectRatio = 1.0f; tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio)); tinyexr::WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = { 0.0f, 0.0f }; tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[1])); tinyexr::WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = static_cast<float>(exr_image->width); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } // Custom attributes if (exr_header->num_custom_attributes > 0) { for (int i = 0; i < exr_header->num_custom_attributes; i++) { tinyexr::WriteAttributeToMemory( &memory, exr_header->custom_attributes[i].name, exr_header->custom_attributes[i].type, reinterpret_cast<const unsigned char *>( exr_header->custom_attributes[i].value), exr_header->custom_attributes[i].size); } } { // end of header unsigned char e = 0; memory.push_back(e); } int num_blocks = exr_image->height / num_scanlines; if (num_blocks * num_scanlines < exr_image->height) { num_blocks++; } std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks)); size_t headerSize = memory.size(); tinyexr::tinyexr_uint64 offset = headerSize + static_cast<size_t>(num_blocks) * sizeof( tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable) std::vector<unsigned char> data; std::vector<std::vector<unsigned char> > data_list( static_cast<size_t>(num_blocks)); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } } #endif // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { size_t ii = static_cast<size_t>(i); int start_y = num_scanlines * i; int endY = (std::min)(num_scanlines * (i + 1), exr_image->height); int h = endY - start_y; std::vector<unsigned char> buf( static_cast<size_t>(exr_image->width * h * pixel_data_size)); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f)); // line_ptr[x] = f32.f; tinyexr::cpy4(line_ptr + x, &(f32.f)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned short val = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap2(&val); // line_ptr[x] = val; tinyexr::cpy2(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // line_ptr[x] = h16.u; tinyexr::cpy2(line_ptr + x, &(h16.u)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { float val = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned int val = reinterpret_cast<unsigned int **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } } if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(buf.size()); memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), buf.begin(), buf.begin() + data_len); } else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 8192 + static_cast<unsigned int>( 2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, exr_image->width, h); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), exr_image->width, h, exr_header->num_channels, zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else { assert(0); } } // omp parallel for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { data.insert(data.end(), data_list[i].begin(), data_list[i].end()); offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size(); } { memory.insert( memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)), reinterpret_cast<unsigned char *>(&offsets.at(0)) + sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks)); } { memory.insert(memory.end(), data.begin(), data.end()); } assert(memory.size() > 0); (*memory_out) = static_cast<unsigned char *>(malloc(memory.size())); memcpy((*memory_out), &memory.at(0), memory.size()); return memory.size(); // OK } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #endif #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "wb"); #else FILE *fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_OPEN_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if ((mem_size > 0) && mem) { fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _MSC_VER FILE *fp = NULL; errno_t errcode = fopen_s(&fp, filename, "rb"); if ((0 != errcode) || (!fp)) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = { 0x76, 0x2f, 0x31, 0x01 }; if (memcmp(marker, header, 4) != 0) { tinyexr::SetErrorMessage("Invalid magic number", err); return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { tinyexr::SetErrorMessage("Unsupported version or scanline", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { std::stringstream ss; ss << "Unsupported compression type : " << compression_type; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { tinyexr::SetErrorMessage("Failed to parse channel info", err); return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { tinyexr::SetErrorMessage("Invalid channels format", err); return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh)); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&h)); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { tinyexr::SetErrorMessage("Unsupported compression format", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui; unsigned int *src_ptr = reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); tinyexr::cpy4(&ui, src_ptr); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; const unsigned short *src_ptr = reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::cpy2(&(f16.u), src_ptr); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f; const float *src_ptr = reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); tinyexr::cpy4(&f, src_ptr); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->num_tiles = 0; } void FreeEXRErrorMessage(const char *msg) { if (msg) { free(reinterpret_cast<void *>(const_cast<char *>(msg))); } return; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } if (exr_header->custom_attributes) { free(exr_header->custom_attributes); } return TINYEXR_SUCCESS; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("fread() error on " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Data size too short", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage(err_str, err); return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { tinyexr::SetErrorMessage( "`chunkCount' attribute is not found in the header.", err); return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); ConvertHeader(exr_header, infos[i]); // transfoer `tiled` from version. exr_header->tiled = exr_version->tiled; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromFile()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = { 0x76, 0x2f, 0x31, 0x01 }; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromMemory()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list; for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> offset_table( static_cast<size_t>(exr_headers[i]->chunk_count)); for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } chunk_offset_table_list.push_back(offset_table); } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> &offset_table = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (size_t c = 0; c < offset_table.size(); c++) { const unsigned char *part_number_addr = memory + offset_table[c] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } } std::string e; int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table, memory, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename) { if ((components == 1) || components == 3 || components == 4) { // OK } else { return TINYEXR_ERROR_INVALID_ARGUMENT; } // Assume at least 16x16 pixels. if (width < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; if (height < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; EXRHeader header; InitEXRHeader(&header); EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = { 0, 0, 0, 0 }; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } const char *err; int ret = SaveEXRImageToFile(&image, &header, outfilename, &err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEIFNED #endif // TINYEXR_IMPLEMENTATION
// TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- #ifndef TINYEXR_H_ #define TINYEXR_H_ // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif // Use embedded miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-5) #define TINYEXR_ERROR_CANT_OPEN_FILE (-6) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-7) #define TINYEXR_ERROR_INVALID_HEADER (-8) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-9) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024) #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 int tiled; // tile format image int long_name; // long name attribute int non_image; // deep image(EXR 2.0) int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; int data_window[4]; int display_window[4]; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute *custom_attributes; // array of EXRAttribute. size = // `num_custom_attributes`. EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { to be removed. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Free's internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Free's internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Free's error message extern void FreeEXRErrorMessage(const char *msg); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if succes. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEIFNED #define TINYEXR_IMPLEMENTATION_DEIFNED #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <iostream> #include <sstream> #include <limits> #include <string> #include <vector> #if __cplusplus > 199711L // C++11 #include <cstdint> #endif // __cplusplus > 199711L #if TINYEXR_USE_MINIZ #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #include "zfp.h" #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #if TINYEXR_USE_MINIZ namespace miniz { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #pragma clang diagnostic ignored "-Wundef" #if __has_warning("-Wcomma") #pragma clang diagnostic ignored "-Wcomma" #endif #if __has_warning("-Wmacro-redefined") #pragma clang diagnostic ignored "-Wmacro-redefined" #endif #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #if __has_warning("-Wtautological-constant-compare") #pragma clang diagnostic ignored "-Wtautological-constant-compare" #endif #endif /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <[email protected]>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks [email protected]) which could cause locate files to not find files. This bug would only have occured in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From [email protected] and [email protected] - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <[email protected]> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED //#include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. #define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) //#include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void(*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t(*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t(*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int(*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool(*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; //#include <assert.h> //#include <string.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c }; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } // static void *def_realloc_func(void *opaque, void *address, size_t items, // size_t size) { // (void)opaque, (void)address, (void)items, (void)size; // return MZ_REALLOC(address, items * size); //} const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = { { MZ_OK, "" }, { MZ_STREAM_END, "stream end" }, { MZ_NEED_DICT, "need dictionary" }, { MZ_ERRNO, "file error" }, { MZ_STREAM_ERROR, "stream error" }, { MZ_DATA_ERROR, "data error" }, { MZ_MEM_ERROR, "out of memory" }, { MZ_BUF_ERROR, "buf error" }, { MZ_VERSION_ERROR, "version error" }, { MZ_PARAM_ERROR, "parameter error" } }; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 }; static const int s_length_extra[31] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0 }; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0 }; static const int s_dist_extra[32] = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 }; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; static const int s_min_table_sizes[3] = { 257, 1, 4 }; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit : r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285 }; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0 }; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17 }; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 }; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 }; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13 }; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // [email protected], Jyrki Katajainen, [email protected], November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF }; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 }; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 }; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init( pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = { 0x00, 0x00, 0x04, 0x02, 0x06 }; mz_uint8 pnghdr[41] = { 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54 }; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #error "No arvhive APIs" #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename( mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh( mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare( const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment( mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room( pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer( pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef _MSC_VER #pragma warning(pop) #endif } // namespace miniz #else // Reuse MINIZ_LITTE_ENDIAN macro #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #endif // TINYEXR_USE_MINIZ // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static void SetErrorMessage(const std::string &msg, const char **err) { if (err) { #ifdef _WIN32 (*err) = _strdup(msg.c_str()); #else (*err) = strdup(msg.c_str()); #endif } } static const int kEXRVersionSize = 8; static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; } static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } static void cpy4(int *dst_val, const int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(unsigned int *dst_val, const unsigned int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(float *dst_val, const float *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void swap4(unsigned int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } #if 0 static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } #endif static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = { 113 << 23 }; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = { 0 }; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s) = std::string(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { if ((*type).compare("string") == 0) { // Accept empty string attribute. marker += sizeof(uint32_t); size -= sizeof(uint32_t); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t); data->resize(1); (*data)[0] = '\0'; return true; } else { return false; } } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen)); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; int data_window[4]; int line_order; int display_window[4]; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; void clear() { channels.clear(); attributes.clear(); data_window[0] = 0; data_window[1] = 0; data_window[2] = 0; data_window[3] = 0; line_order = 0; display_window[0] = 0; display_window[1] = 0; display_window[2] = 0; display_window[3] = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; } } HeaderInfo; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } const unsigned char *data_end = reinterpret_cast<const unsigned char *>(p) + 16; if (data_end >= (data.data() + data.size())) { return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling)); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling)); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (miniz::MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressable run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; if (0 >(maxLength -= count)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static void DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); assert(ret == static_cast<int>(uncompressed_size)); (void)ret; // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierachical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- int len : 8; // code length 0 int lit : 24; // lit p size int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // std::vector<int> hlink(HUF_ENCSIZE); std::vector<long long *> fHeap(HUF_ENCSIZE); *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); std::vector<long long> scode(HUF_ENCSIZE); memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode > ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { int *p = pl->p; pl->p = new int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #if 0 #define getCode(po, rlc, c, lc, in, out, ob, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ /* TinyEXR issue 78 */ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } #else static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in, const char *in_end, unsigned short *&out, const unsigned short *ob, const unsigned short *oe) { (void)ob; if (po == rlc) { if (lc < 8) { /* TinyEXR issue 78 */ if ((in + 1) >= in_end) { return false; } getChar(c, lc, in); } lc -= 8; unsigned char cs = (c >> lc); if (out + cs > oe) return false; // Bounds check for safety if ((out - 1) <= ob) return false; unsigned short s = out[-1]; while (cs-- > 0) *out++ = s; } else if (out < oe) { *out++ = po; } else { return false; } return true; } #endif // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; // begin unsigned short *oe = out + no; // end const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; // std::cout << "lit = " << pl.lit << std::endl; // std::cout << "rlc = " << rlc << std::endl; // std::cout << "c = " << c << std::endl; // std::cout << "lc = " << lc << std::endl; // std::cout << "in = " << in << std::endl; // std::cout << "out = " << out << std::endl; // std::cout << "oe = " << oe << std::endl; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) { return false; } break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(std::vector<long long> &freq, const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; std::vector<long long> freq(HUF_ENCSIZE); countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq.data(), &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq.data(), im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, std::vector<unsigned short> *raw) { if (nCompressed == 0) { if (raw->size() != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(), raw->data()); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap.data(), minNonZero, maxNonZero); std::vector<unsigned short> lut(USHORT_RANGE); unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data()); applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSize) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap.data(), 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr)); // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } std::vector<unsigned short> lut(USHORT_RANGE); memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data()); // // Huffman decoding // int length; // length = *(reinterpret_cast<const int *>(ptr)); tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; int precision; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0f; } }; bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) && (attributes[i].size == 1)) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; } } if (!foundType) { return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else { assert(0); } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, int num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = dst_width * dst_num_lines * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((dst_width & 3U) || (dst_num_lines & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, dst_width, dst_num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = dst_width * dst_num_lines; for (int c = 0; c < num_channels; c++) { // decompress 4x4 pixel block. for (int y = 0; y < dst_num_lines; y += 4) { for (int x = 0; x < dst_width; x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * dst_width + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((width & 3U) || (num_lines & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, width, num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = width * num_lines; for (int c = 0; c < num_channels; c++) { // compress 4x4 pixel block. for (int y = 0; y < num_lines; y += 4) { for (int x = 0; x < width; x += 4) { float fblock[16]; for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * width + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = zfp_stream_compressed_size(zfp); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // // TODO(syoyo): Refactor function arguments. static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) { // Invalid input #90 return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); assert(ret); (void)ret; // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; // hf.u = line_ptr[u]; // use `cpy` to avoid unaligned memory access when compiler's // optimization is on. tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len)); // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, attributes, num_attributes)) { assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // address may not be aliged. use byte-wise copy for safety.#76 // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { if (reinterpret_cast<const unsigned char *>(line_ptr + u) >= (data_ptr + data_len)) { // Corrupsed data? return false; } unsigned int val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } return true; } static void DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { assert(tile_offset_x * tile_size_x < data_width); assert(tile_offset_y * tile_size_y < data_height); // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { // ??? return false; } } return true; } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; info->data_window[0] = 0; info->data_window[1] = 0; info->data_window[2] = 0; info->data_window[3] = 0; info->line_order = 0; // @fixme info->display_window[0] = 0; info->display_window[1] = 0; info->display_window[2] = 0; info->display_window[3] = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) { if (0 == size) { if (err) { (*err) += "Insufficient data size for attributes.\n"; } return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { if (err) { (*err) += "Failed to read attribute.\n"; } return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (version->tiled && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) += "Failed to parse channel info.\n"; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) += "# of channels is zero.\n"; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window[0], &data.at(0), sizeof(int)); memcpy(&info->data_window[1], &data.at(4), sizeof(int)); memcpy(&info->data_window[2], &data.at(8), sizeof(int)); memcpy(&info->data_window[3], &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3])); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window[0], &data.at(0), sizeof(int)); memcpy(&info->display_window[1], &data.at(4), sizeof(int)); memcpy(&info->display_window[2], &data.at(8), sizeof(int)); memcpy(&info->display_window[3], &data.at(12), sizeof(int)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[3])); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio)); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[1])); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_width)); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count)); } } else { // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window[0] = info.display_window[0]; exr_header->display_window[1] = info.display_window[1]; exr_header->display_window[2] = info.display_window[2]; exr_header->display_window[3] = info.display_window[3]; exr_header->data_window[0] = info.data_window[0]; exr_header->data_window[1] = info.data_window[1]; exr_header->data_window[2] = info.data_window[2]; exr_header->data_window[3] = info.data_window[3]; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); if (exr_header->num_custom_attributes > 0) { // TODO(syoyo): Report warning when # of attributes exceeds // `TINYEXR_MAX_CUSTOM_ATTRIBUTES` if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) { exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES; } exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc( sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes))); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy poiner exr_header->custom_attributes[i].value = info.attributes[i].value; } } else { exr_header->custom_attributes = NULL; } exr_header->header_len = info.header_len; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety. if (exr_header->tiled) { size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) if (offsets[tile_idx] + sizeof(int) * 5 > size) { if (err) { (*err) += "Insufficient data size.\n"; } return TINYEXR_ERROR_INVALID_DATA; } size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3])); // @todo{ LoD } if (tile_coordinates[2] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } if (tile_coordinates[3] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len < 4 || size_t(data_len) > data_size) { if (err) { (*err) += "Insufficient data length.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; exr_image->num_tiles = static_cast<int>(num_tiles); } } else { // scanline format exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); for (int y = 0; y < static_cast<int>(num_blocks); y++) { size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (size_t(data_len) > data_size) { invalid_data = true; } else { int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window[3] + 1)); int num_lines = end_line_no - line_no; // assert(num_lines > 0); if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y line_no -= exr_header->data_window[1]; if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } } // omp parallel } if (invalid_data) { return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0]; if (data_width >= std::numeric_limits<int>::max()) { // Issue 63 tinyexr::SetErrorMessage("Invalid data window value", err); return TINYEXR_ERROR_INVALID_DATA; } data_width++; int data_height = exr_header->data_window[3] - exr_header->data_window[1]; if (data_height >= std::numeric_limits<int>::max()) { tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } data_height++; if ((data_width < 0) || (data_height < 0)) { tinyexr::SetErrorMessage("data window or data height is negative.", err); return TINYEXR_ERROR_INVALID_DATA; } // Read offset tables. size_t num_blocks = 0; if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); } else if (exr_header->tiled) { // @todo { LoD } size_t num_x_tiles = static_cast<size_t>(data_width) / static_cast<size_t>(exr_header->tile_size_x); if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) < static_cast<size_t>(data_width)) { num_x_tiles++; } size_t num_y_tiles = static_cast<size_t>(data_height) / static_cast<size_t>(exr_header->tile_size_y); if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) < static_cast<size_t>(data_height)) { num_y_tiles++; } num_blocks = num_x_tiles * num_y_tiles; } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } } std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks); for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; // Issue #81 if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { tinyexr::SetErrorMessage( "Cannot reconstruct lineOffset table in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } } } { std::string e; int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } // release memory(if exists) if ((exr_header->num_channels > 0) && exr_image && exr_image->images) { for (size_t c = 0; c < size_t(exr_header->num_channels); c++) { if (exr_image->images[c]) { free(exr_image->images[c]); exr_image->images[c] = NULL; } } free(exr_image->images); exr_image->images = NULL; } } return ret; } } } // namespace tinyexr int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if ((idxA == 0) && (idxR == -1) && (idxG == -1) && (idxB == -1)) { // Alpha channel only. if (exr_header.tiled) { // todo.implement this } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { tinyexr::SetErrorMessage( "Invalid argument. `memory` or `exr_header` argument is null in " "ParseEXRHeaderFromMemory()", err); // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Insufficient header/data size.\n", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } } ConvertHeader(exr_header, info); // transfoer `tiled` from version. exr_header->tiled = version->tiled; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Failed to parse EXR version", err); return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize < 16) { tinyexr::SetErrorMessage("File size too short " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } size_t SaveEXRImageToMemory(const EXRImage *exr_image, const EXRHeader *exr_header, unsigned char **memory_out, const char **err) { if (exr_image == NULL || memory_out == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err); return 0; // @fixme } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #endif #if TINYEXR_USE_ZFP for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) { if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) { tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression", err); return 0; } } #endif std::vector<unsigned char> memory; // Header { const char header[] = { 0x76, 0x2f, 0x31, 0x01 }; memory.insert(memory.end(), header, header + 4); } // Version, scanline. { char marker[] = { 2, 0, 0, 0 }; /* @todo if (exr_header->tiled) { marker[1] |= 0x2; } if (exr_header->long_name) { marker[1] |= 0x4; } if (exr_header->non_image) { marker[1] |= 0x8; } if (exr_header->multipart) { marker[1] |= 0x10; } */ memory.insert(memory.end(), marker, marker + 4); } int num_scanlines = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } // Write attributes. std::vector<tinyexr::ChannelInfo> channels; { std::vector<unsigned char> data; for (int c = 0; c < exr_header->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_header->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_header->channels[c].name); channels.push_back(info); } tinyexr::WriteChannelInfo(data, channels); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_header->compression_type; tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp)); tinyexr::WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char *>(&comp), 1); } { int data[4] = { 0, 0, exr_image->width - 1, exr_image->height - 1 }; tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3])); tinyexr::WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); tinyexr::WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { float aspectRatio = 1.0f; tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio)); tinyexr::WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = { 0.0f, 0.0f }; tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[1])); tinyexr::WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = static_cast<float>(exr_image->width); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } // Custom attributes if (exr_header->num_custom_attributes > 0) { for (int i = 0; i < exr_header->num_custom_attributes; i++) { tinyexr::WriteAttributeToMemory( &memory, exr_header->custom_attributes[i].name, exr_header->custom_attributes[i].type, reinterpret_cast<const unsigned char *>( exr_header->custom_attributes[i].value), exr_header->custom_attributes[i].size); } } { // end of header unsigned char e = 0; memory.push_back(e); } int num_blocks = exr_image->height / num_scanlines; if (num_blocks * num_scanlines < exr_image->height) { num_blocks++; } std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks)); size_t headerSize = memory.size(); tinyexr::tinyexr_uint64 offset = headerSize + static_cast<size_t>(num_blocks) * sizeof( tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable) std::vector<unsigned char> data; std::vector<std::vector<unsigned char> > data_list( static_cast<size_t>(num_blocks)); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } } #endif // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` for (int i = 0; i < num_blocks; i++) { size_t ii = static_cast<size_t>(i); int start_y = num_scanlines * i; int endY = (std::min)(num_scanlines * (i + 1), exr_image->height); int h = endY - start_y; std::vector<unsigned char> buf( static_cast<size_t>(exr_image->width * h * pixel_data_size)); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f)); // line_ptr[x] = f32.f; tinyexr::cpy4(line_ptr + x, &(f32.f)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned short val = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap2(&val); // line_ptr[x] = val; tinyexr::cpy2(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // line_ptr[x] = h16.u; tinyexr::cpy2(line_ptr + x, &(h16.u)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { float val = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned int val = reinterpret_cast<unsigned int **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } } if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(buf.size()); memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), buf.begin(), buf.begin() + data_len); } else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 8192 + static_cast<unsigned int>( 2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, exr_image->width, h); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), exr_image->width, h, exr_header->num_channels, zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else { assert(0); } } // omp parallel for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { data.insert(data.end(), data_list[i].begin(), data_list[i].end()); offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size(); } { memory.insert( memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)), reinterpret_cast<unsigned char *>(&offsets.at(0)) + sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks)); } { memory.insert(memory.end(), data.begin(), data.end()); } assert(memory.size() > 0); (*memory_out) = static_cast<unsigned char *>(malloc(memory.size())); memcpy((*memory_out), &memory.at(0), memory.size()); return memory.size(); // OK } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #endif #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "wb"); #else FILE *fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_OPEN_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if ((mem_size > 0) && mem) { fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _MSC_VER FILE *fp = NULL; errno_t errcode = fopen_s(&fp, filename, "rb"); if ((0 != errcode) || (!fp)) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = { 0x76, 0x2f, 0x31, 0x01 }; if (memcmp(marker, header, 4) != 0) { tinyexr::SetErrorMessage("Invalid magic number", err); return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { tinyexr::SetErrorMessage("Unsupported version or scanline", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { std::stringstream ss; ss << "Unsupported compression type : " << compression_type; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { tinyexr::SetErrorMessage("Failed to parse channel info", err); return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { tinyexr::SetErrorMessage("Invalid channels format", err); return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh)); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&h)); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { tinyexr::SetErrorMessage("Unsupported compression format", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui; unsigned int *src_ptr = reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); tinyexr::cpy4(&ui, src_ptr); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; const unsigned short *src_ptr = reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::cpy2(&(f16.u), src_ptr); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f; const float *src_ptr = reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); tinyexr::cpy4(&f, src_ptr); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->num_tiles = 0; } void FreeEXRErrorMessage(const char *msg) { if (msg) { free(reinterpret_cast<void *>(const_cast<char *>(msg))); } return; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } if (exr_header->custom_attributes) { free(exr_header->custom_attributes); } return TINYEXR_SUCCESS; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("fread() error on " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Data size too short", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage(err_str, err); return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { tinyexr::SetErrorMessage( "`chunkCount' attribute is not found in the header.", err); return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); ConvertHeader(exr_header, infos[i]); // transfoer `tiled` from version. exr_header->tiled = exr_version->tiled; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromFile()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = { 0x76, 0x2f, 0x31, 0x01 }; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromMemory()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list; for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> offset_table( static_cast<size_t>(exr_headers[i]->chunk_count)); for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } chunk_offset_table_list.push_back(offset_table); } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> &offset_table = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (size_t c = 0; c < offset_table.size(); c++) { const unsigned char *part_number_addr = memory + offset_table[c] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } } std::string e; int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table, memory, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename) { if ((components == 1) || components == 3 || components == 4) { // OK } else { return TINYEXR_ERROR_INVALID_ARGUMENT; } // Assume at least 16x16 pixels. if (width < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; if (height < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; EXRHeader header; InitEXRHeader(&header); EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = { 0, 0, 0, 0 }; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } const char *err; int ret = SaveEXRImageToFile(&image, &header, outfilename, &err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEIFNED #endif // TINYEXR_IMPLEMENTATION
// TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- #ifndef TINYEXR_H_ #define TINYEXR_H_ // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif // Use embedded miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-5) #define TINYEXR_ERROR_CANT_OPEN_FILE (-6) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-7) #define TINYEXR_ERROR_INVALID_HEADER (-8) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-9) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024) #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 int tiled; // tile format image int long_name; // long name attribute int non_image; // deep image(EXR 2.0) int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; int data_window[4]; int display_window[4]; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute *custom_attributes; // array of EXRAttribute. size = // `num_custom_attributes`. EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { to be removed. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Free's internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Free's internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Free's error message extern void FreeEXRErrorMessage(const char *msg); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if succes. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEIFNED #define TINYEXR_IMPLEMENTATION_DEIFNED #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <iostream> #include <sstream> #include <limits> #include <string> #include <vector> #if __cplusplus > 199711L // C++11 #include <cstdint> #endif // __cplusplus > 199711L #ifdef _OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #include "zfp.h" #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #if TINYEXR_USE_MINIZ namespace miniz { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #pragma clang diagnostic ignored "-Wundef" #if __has_warning("-Wcomma") #pragma clang diagnostic ignored "-Wcomma" #endif #if __has_warning("-Wmacro-redefined") #pragma clang diagnostic ignored "-Wmacro-redefined" #endif #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #if __has_warning("-Wtautological-constant-compare") #pragma clang diagnostic ignored "-Wtautological-constant-compare" #endif #endif /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <[email protected]>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks [email protected]) which could cause locate files to not find files. This bug would only have occured in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From [email protected] and [email protected] - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <[email protected]> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED //#include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. #define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) //#include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void(*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t(*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t(*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int(*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool(*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; //#include <assert.h> //#include <string.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c }; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } // static void *def_realloc_func(void *opaque, void *address, size_t items, // size_t size) { // (void)opaque, (void)address, (void)items, (void)size; // return MZ_REALLOC(address, items * size); //} const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = { { MZ_OK, "" }, { MZ_STREAM_END, "stream end" }, { MZ_NEED_DICT, "need dictionary" }, { MZ_ERRNO, "file error" }, { MZ_STREAM_ERROR, "stream error" }, { MZ_DATA_ERROR, "data error" }, { MZ_MEM_ERROR, "out of memory" }, { MZ_BUF_ERROR, "buf error" }, { MZ_VERSION_ERROR, "version error" }, { MZ_PARAM_ERROR, "parameter error" } }; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 }; static const int s_length_extra[31] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0 }; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0 }; static const int s_dist_extra[32] = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 }; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; static const int s_min_table_sizes[3] = { 257, 1, 4 }; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit : r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285 }; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0 }; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17 }; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 }; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 }; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13 }; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // [email protected], Jyrki Katajainen, [email protected], November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF }; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 }; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 }; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init( pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = { 0x00, 0x00, 0x04, 0x02, 0x06 }; mz_uint8 pnghdr[41] = { 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54 }; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #error "No arvhive APIs" #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename( mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh( mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare( const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment( mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room( pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer( pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef _MSC_VER #pragma warning(pop) #endif } // namespace miniz #else // Reuse MINIZ_LITTE_ENDIAN macro #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #endif // TINYEXR_USE_MINIZ // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static void SetErrorMessage(const std::string &msg, const char **err) { if (err) { #ifdef _WIN32 (*err) = _strdup(msg.c_str()); #else (*err) = strdup(msg.c_str()); #endif } } static const int kEXRVersionSize = 8; static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; } static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } static void cpy4(int *dst_val, const int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(unsigned int *dst_val, const unsigned int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(float *dst_val, const float *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void swap4(unsigned int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } #if 0 static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } #endif static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = { 113 << 23 }; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = { 0 }; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s) = std::string(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { if ((*type).compare("string") == 0) { // Accept empty string attribute. marker += sizeof(uint32_t); size -= sizeof(uint32_t); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t); data->resize(1); (*data)[0] = '\0'; return true; } else { return false; } } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen)); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; int data_window[4]; int line_order; int display_window[4]; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; void clear() { channels.clear(); attributes.clear(); data_window[0] = 0; data_window[1] = 0; data_window[2] = 0; data_window[3] = 0; line_order = 0; display_window[0] = 0; display_window[1] = 0; display_window[2] = 0; display_window[3] = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; } } HeaderInfo; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } const unsigned char *data_end = reinterpret_cast<const unsigned char *>(p) + 16; if (data_end >= (data.data() + data.size())) { return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling)); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling)); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (miniz::MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressable run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; if (0 >(maxLength -= count)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static void DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); assert(ret == static_cast<int>(uncompressed_size)); (void)ret; // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierachical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- int len : 8; // code length 0 int lit : 24; // lit p size int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // std::vector<int> hlink(HUF_ENCSIZE); std::vector<long long *> fHeap(HUF_ENCSIZE); *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); std::vector<long long> scode(HUF_ENCSIZE); memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode > ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { int *p = pl->p; pl->p = new int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #if 0 #define getCode(po, rlc, c, lc, in, out, ob, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ /* TinyEXR issue 78 */ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } #else static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in, const char *in_end, unsigned short *&out, const unsigned short *ob, const unsigned short *oe) { (void)ob; if (po == rlc) { if (lc < 8) { /* TinyEXR issue 78 */ if ((in + 1) >= in_end) { return false; } getChar(c, lc, in); } lc -= 8; unsigned char cs = (c >> lc); if (out + cs > oe) return false; // Bounds check for safety if ((out - 1) <= ob) return false; unsigned short s = out[-1]; while (cs-- > 0) *out++ = s; } else if (out < oe) { *out++ = po; } else { return false; } return true; } #endif // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; // begin unsigned short *oe = out + no; // end const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; // std::cout << "lit = " << pl.lit << std::endl; // std::cout << "rlc = " << rlc << std::endl; // std::cout << "c = " << c << std::endl; // std::cout << "lc = " << lc << std::endl; // std::cout << "in = " << in << std::endl; // std::cout << "out = " << out << std::endl; // std::cout << "oe = " << oe << std::endl; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) { return false; } break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(std::vector<long long> &freq, const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; std::vector<long long> freq(HUF_ENCSIZE); countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq.data(), &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq.data(), im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, std::vector<unsigned short> *raw) { if (nCompressed == 0) { if (raw->size() != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(), raw->data()); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap.data(), minNonZero, maxNonZero); std::vector<unsigned short> lut(USHORT_RANGE); unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data()); applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSize) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap.data(), 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr)); // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } std::vector<unsigned short> lut(USHORT_RANGE); memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data()); // // Huffman decoding // int length; // length = *(reinterpret_cast<const int *>(ptr)); tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; int precision; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0f; } }; bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) && (attributes[i].size == 1)) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; } } if (!foundType) { return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else { assert(0); } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, int num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = dst_width * dst_num_lines * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((dst_width & 3U) || (dst_num_lines & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, dst_width, dst_num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = dst_width * dst_num_lines; for (int c = 0; c < num_channels; c++) { // decompress 4x4 pixel block. for (int y = 0; y < dst_num_lines; y += 4) { for (int x = 0; x < dst_width; x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * dst_width + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((width & 3U) || (num_lines & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, width, num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = width * num_lines; for (int c = 0; c < num_channels; c++) { // compress 4x4 pixel block. for (int y = 0; y < num_lines; y += 4) { for (int x = 0; x < width; x += 4) { float fblock[16]; for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * width + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = zfp_stream_compressed_size(zfp); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // // TODO(syoyo): Refactor function arguments. static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) { // Invalid input #90 return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); assert(ret); (void)ret; // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; // hf.u = line_ptr[u]; // use `cpy` to avoid unaligned memory access when compiler's // optimization is on. tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len)); // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, attributes, num_attributes)) { assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // address may not be aliged. use byte-wise copy for safety.#76 // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { if (reinterpret_cast<const unsigned char *>(line_ptr + u) >= (data_ptr + data_len)) { // Corrupsed data? return false; } unsigned int val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } return true; } static void DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { assert(tile_offset_x * tile_size_x < data_width); assert(tile_offset_y * tile_size_y < data_height); // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { // ??? return false; } } return true; } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; info->data_window[0] = 0; info->data_window[1] = 0; info->data_window[2] = 0; info->data_window[3] = 0; info->line_order = 0; // @fixme info->display_window[0] = 0; info->display_window[1] = 0; info->display_window[2] = 0; info->display_window[3] = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) { if (0 == size) { if (err) { (*err) += "Insufficient data size for attributes.\n"; } return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { if (err) { (*err) += "Failed to read attribute.\n"; } return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (version->tiled && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) += "Failed to parse channel info.\n"; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) += "# of channels is zero.\n"; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window[0], &data.at(0), sizeof(int)); memcpy(&info->data_window[1], &data.at(4), sizeof(int)); memcpy(&info->data_window[2], &data.at(8), sizeof(int)); memcpy(&info->data_window[3], &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3])); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window[0], &data.at(0), sizeof(int)); memcpy(&info->display_window[1], &data.at(4), sizeof(int)); memcpy(&info->display_window[2], &data.at(8), sizeof(int)); memcpy(&info->display_window[3], &data.at(12), sizeof(int)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[3])); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio)); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[1])); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_width)); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count)); } } else { // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window[0] = info.display_window[0]; exr_header->display_window[1] = info.display_window[1]; exr_header->display_window[2] = info.display_window[2]; exr_header->display_window[3] = info.display_window[3]; exr_header->data_window[0] = info.data_window[0]; exr_header->data_window[1] = info.data_window[1]; exr_header->data_window[2] = info.data_window[2]; exr_header->data_window[3] = info.data_window[3]; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); if (exr_header->num_custom_attributes > 0) { // TODO(syoyo): Report warning when # of attributes exceeds // `TINYEXR_MAX_CUSTOM_ATTRIBUTES` if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) { exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES; } exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc( sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes))); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy poiner exr_header->custom_attributes[i].value = info.attributes[i].value; } } else { exr_header->custom_attributes = NULL; } exr_header->header_len = info.header_len; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety. if (exr_header->tiled) { size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) if (offsets[tile_idx] + sizeof(int) * 5 > size) { if (err) { (*err) += "Insufficient data size.\n"; } return TINYEXR_ERROR_INVALID_DATA; } size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3])); // @todo{ LoD } if (tile_coordinates[2] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } if (tile_coordinates[3] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len < 4 || size_t(data_len) > data_size) { if (err) { (*err) += "Insufficient data length.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; exr_image->num_tiles = static_cast<int>(num_tiles); } } else { // scanline format exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (size_t(data_len) > data_size) { invalid_data = true; } else { int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window[3] + 1)); int num_lines = end_line_no - line_no; // assert(num_lines > 0); if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y line_no -= exr_header->data_window[1]; if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } } // omp parallel } if (invalid_data) { return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0]; if (data_width >= std::numeric_limits<int>::max()) { // Issue 63 tinyexr::SetErrorMessage("Invalid data window value", err); return TINYEXR_ERROR_INVALID_DATA; } data_width++; int data_height = exr_header->data_window[3] - exr_header->data_window[1]; if (data_height >= std::numeric_limits<int>::max()) { tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } data_height++; if ((data_width < 0) || (data_height < 0)) { tinyexr::SetErrorMessage("data window or data height is negative.", err); return TINYEXR_ERROR_INVALID_DATA; } // Read offset tables. size_t num_blocks = 0; if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); } else if (exr_header->tiled) { // @todo { LoD } size_t num_x_tiles = static_cast<size_t>(data_width) / static_cast<size_t>(exr_header->tile_size_x); if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) < static_cast<size_t>(data_width)) { num_x_tiles++; } size_t num_y_tiles = static_cast<size_t>(data_height) / static_cast<size_t>(exr_header->tile_size_y); if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) < static_cast<size_t>(data_height)) { num_y_tiles++; } num_blocks = num_x_tiles * num_y_tiles; } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } } std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks); for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; // Issue #81 if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { tinyexr::SetErrorMessage( "Cannot reconstruct lineOffset table in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } } } { std::string e; int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } // release memory(if exists) if ((exr_header->num_channels > 0) && exr_image && exr_image->images) { for (size_t c = 0; c < size_t(exr_header->num_channels); c++) { if (exr_image->images[c]) { free(exr_image->images[c]); exr_image->images[c] = NULL; } } free(exr_image->images); exr_image->images = NULL; } } return ret; } } } // namespace tinyexr int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if ((idxA == 0) && (idxR == -1) && (idxG == -1) && (idxB == -1)) { // Alpha channel only. if (exr_header.tiled) { // todo.implement this } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { tinyexr::SetErrorMessage( "Invalid argument. `memory` or `exr_header` argument is null in " "ParseEXRHeaderFromMemory()", err); // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Insufficient header/data size.\n", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } } ConvertHeader(exr_header, info); // transfoer `tiled` from version. exr_header->tiled = version->tiled; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Failed to parse EXR version", err); return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize < 16) { tinyexr::SetErrorMessage("File size too short " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } size_t SaveEXRImageToMemory(const EXRImage *exr_image, const EXRHeader *exr_header, unsigned char **memory_out, const char **err) { if (exr_image == NULL || memory_out == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err); return 0; // @fixme } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #endif #if TINYEXR_USE_ZFP for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) { if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) { tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression", err); return 0; } } #endif std::vector<unsigned char> memory; // Header { const char header[] = { 0x76, 0x2f, 0x31, 0x01 }; memory.insert(memory.end(), header, header + 4); } // Version, scanline. { char marker[] = { 2, 0, 0, 0 }; /* @todo if (exr_header->tiled) { marker[1] |= 0x2; } if (exr_header->long_name) { marker[1] |= 0x4; } if (exr_header->non_image) { marker[1] |= 0x8; } if (exr_header->multipart) { marker[1] |= 0x10; } */ memory.insert(memory.end(), marker, marker + 4); } int num_scanlines = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } // Write attributes. std::vector<tinyexr::ChannelInfo> channels; { std::vector<unsigned char> data; for (int c = 0; c < exr_header->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_header->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_header->channels[c].name); channels.push_back(info); } tinyexr::WriteChannelInfo(data, channels); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_header->compression_type; tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp)); tinyexr::WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char *>(&comp), 1); } { int data[4] = { 0, 0, exr_image->width - 1, exr_image->height - 1 }; tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3])); tinyexr::WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); tinyexr::WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { float aspectRatio = 1.0f; tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio)); tinyexr::WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = { 0.0f, 0.0f }; tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[1])); tinyexr::WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = static_cast<float>(exr_image->width); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } // Custom attributes if (exr_header->num_custom_attributes > 0) { for (int i = 0; i < exr_header->num_custom_attributes; i++) { tinyexr::WriteAttributeToMemory( &memory, exr_header->custom_attributes[i].name, exr_header->custom_attributes[i].type, reinterpret_cast<const unsigned char *>( exr_header->custom_attributes[i].value), exr_header->custom_attributes[i].size); } } { // end of header unsigned char e = 0; memory.push_back(e); } int num_blocks = exr_image->height / num_scanlines; if (num_blocks * num_scanlines < exr_image->height) { num_blocks++; } std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks)); size_t headerSize = memory.size(); tinyexr::tinyexr_uint64 offset = headerSize + static_cast<size_t>(num_blocks) * sizeof( tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable) std::vector<unsigned char> data; std::vector<std::vector<unsigned char> > data_list( static_cast<size_t>(num_blocks)); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } } #endif // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { size_t ii = static_cast<size_t>(i); int start_y = num_scanlines * i; int endY = (std::min)(num_scanlines * (i + 1), exr_image->height); int h = endY - start_y; std::vector<unsigned char> buf( static_cast<size_t>(exr_image->width * h * pixel_data_size)); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f)); // line_ptr[x] = f32.f; tinyexr::cpy4(line_ptr + x, &(f32.f)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned short val = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap2(&val); // line_ptr[x] = val; tinyexr::cpy2(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // line_ptr[x] = h16.u; tinyexr::cpy2(line_ptr + x, &(h16.u)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { float val = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned int val = reinterpret_cast<unsigned int **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } } if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(buf.size()); memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), buf.begin(), buf.begin() + data_len); } else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 8192 + static_cast<unsigned int>( 2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, exr_image->width, h); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), exr_image->width, h, exr_header->num_channels, zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else { assert(0); } } // omp parallel for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { data.insert(data.end(), data_list[i].begin(), data_list[i].end()); offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size(); } { memory.insert( memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)), reinterpret_cast<unsigned char *>(&offsets.at(0)) + sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks)); } { memory.insert(memory.end(), data.begin(), data.end()); } assert(memory.size() > 0); (*memory_out) = static_cast<unsigned char *>(malloc(memory.size())); memcpy((*memory_out), &memory.at(0), memory.size()); return memory.size(); // OK } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #endif #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "wb"); #else FILE *fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_OPEN_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if ((mem_size > 0) && mem) { fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _MSC_VER FILE *fp = NULL; errno_t errcode = fopen_s(&fp, filename, "rb"); if ((0 != errcode) || (!fp)) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = { 0x76, 0x2f, 0x31, 0x01 }; if (memcmp(marker, header, 4) != 0) { tinyexr::SetErrorMessage("Invalid magic number", err); return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { tinyexr::SetErrorMessage("Unsupported version or scanline", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { std::stringstream ss; ss << "Unsupported compression type : " << compression_type; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { tinyexr::SetErrorMessage("Failed to parse channel info", err); return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { tinyexr::SetErrorMessage("Invalid channels format", err); return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh)); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&h)); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { tinyexr::SetErrorMessage("Unsupported compression format", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui; unsigned int *src_ptr = reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); tinyexr::cpy4(&ui, src_ptr); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; const unsigned short *src_ptr = reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::cpy2(&(f16.u), src_ptr); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f; const float *src_ptr = reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); tinyexr::cpy4(&f, src_ptr); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->num_tiles = 0; } void FreeEXRErrorMessage(const char *msg) { if (msg) { free(reinterpret_cast<void *>(const_cast<char *>(msg))); } return; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } if (exr_header->custom_attributes) { free(exr_header->custom_attributes); } return TINYEXR_SUCCESS; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("fread() error on " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Data size too short", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage(err_str, err); return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { tinyexr::SetErrorMessage( "`chunkCount' attribute is not found in the header.", err); return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); ConvertHeader(exr_header, infos[i]); // transfoer `tiled` from version. exr_header->tiled = exr_version->tiled; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromFile()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = { 0x76, 0x2f, 0x31, 0x01 }; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromMemory()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list; for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> offset_table( static_cast<size_t>(exr_headers[i]->chunk_count)); for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } chunk_offset_table_list.push_back(offset_table); } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> &offset_table = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (size_t c = 0; c < offset_table.size(); c++) { const unsigned char *part_number_addr = memory + offset_table[c] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } } std::string e; int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table, memory, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename) { if ((components == 1) || components == 3 || components == 4) { // OK } else { return TINYEXR_ERROR_INVALID_ARGUMENT; } // Assume at least 16x16 pixels. if (width < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; if (height < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; EXRHeader header; InitEXRHeader(&header); EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = { 0, 0, 0, 0 }; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } const char *err; int ret = SaveEXRImageToFile(&image, &header, outfilename, &err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEIFNED #endif // TINYEXR_IMPLEMENTATION
GB_binop__islt_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__islt_int8 // A.*B function (eWiseMult): GB_AemultB__islt_int8 // A*D function (colscale): GB_AxD__islt_int8 // D*A function (rowscale): GB_DxB__islt_int8 // C+=B function (dense accum): GB_Cdense_accumB__islt_int8 // C+=b function (dense accum): GB_Cdense_accumb__islt_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__islt_int8 // C=scalar+B GB_bind1st__islt_int8 // C=scalar+B' GB_bind1st_tran__islt_int8 // C=A+scalar GB_bind2nd__islt_int8 // C=A'+scalar GB_bind2nd_tran__islt_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x < y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_INT8 || GxB_NO_ISLT_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__islt_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__islt_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__islt_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__islt_int8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__islt_int8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__islt_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__islt_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__islt_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__islt_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB_bind1st_tran__islt_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB_bind2nd_tran__islt_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__islt_int8 // A.*B function (eWiseMult): GB_AemultB__islt_int8 // A*D function (colscale): GB_AxD__islt_int8 // D*A function (rowscale): GB_DxB__islt_int8 // C+=B function (dense accum): GB_Cdense_accumB__islt_int8 // C+=b function (dense accum): GB_Cdense_accumb__islt_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__islt_int8 // C=scalar+B GB_bind1st__islt_int8 // C=scalar+B' GB_bind1st_tran__islt_int8 // C=A+scalar GB_bind2nd__islt_int8 // C=A'+scalar GB_bind2nd_tran__islt_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x < y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_INT8 || GxB_NO_ISLT_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__islt_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__islt_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__islt_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__islt_int8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__islt_int8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__islt_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__islt_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__islt_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__islt_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB_bind1st_tran__islt_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB_bind2nd_tran__islt_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__islt_int8 // A.*B function (eWiseMult): GB_AemultB__islt_int8 // A*D function (colscale): GB_AxD__islt_int8 // D*A function (rowscale): GB_DxB__islt_int8 // C+=B function (dense accum): GB_Cdense_accumB__islt_int8 // C+=b function (dense accum): GB_Cdense_accumb__islt_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__islt_int8 // C=scalar+B GB_bind1st__islt_int8 // C=scalar+B' GB_bind1st_tran__islt_int8 // C=A+scalar GB_bind2nd__islt_int8 // C=A'+scalar GB_bind2nd_tran__islt_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x < y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_INT8 || GxB_NO_ISLT_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__islt_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__islt_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__islt_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__islt_int8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__islt_int8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__islt_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__islt_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__islt_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__islt_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB_bind1st_tran__islt_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB_bind2nd_tran__islt_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Matrix.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * * Matrix - Matrix stored and accessible by rows. Indices and values for * the matrix nonzeros are copied into the matrix a row at a time, in any * order using the MatrixGetRow function. The MatrixPutRow function returns * a pointer to the indices and values of a row. The matrix has a set of * row and column indices such that these indices begin at "beg" and end * at "end", where 0 <= "beg" <= "end". In other words, the matrix indices * have any nonnegative base value, and the base values of the row and column * indices must agree. * *****************************************************************************/ #include <stdlib.h> #include <memory.h> #include <assert.h> #include "Common.h" #include "Matrix.h" #include "Numbering.h" #define MAX_NZ_PER_ROW 1000 /*-------------------------------------------------------------------------- * MatrixCreate - Return (a pointer to) a matrix object. *--------------------------------------------------------------------------*/ Matrix *MatrixCreate(MPI_Comm comm, HYPRE_Int beg_row, HYPRE_Int end_row) { HYPRE_Int num_rows, mype, npes; Matrix *mat = (Matrix *) malloc(sizeof(Matrix)); mat->comm = comm; mat->beg_row = beg_row; mat->end_row = end_row; mat->mem = (Mem *) MemCreate(); num_rows = mat->end_row - mat->beg_row + 1; mat->lens = (HYPRE_Int *) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int)); mat->inds = (HYPRE_Int **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int *)); mat->vals = (HYPRE_Real **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Real *)); /* Send beg_row and end_row to all processors */ /* This is needed in order to map row numbers to processors */ hypre_MPI_Comm_rank(comm, &mype); hypre_MPI_Comm_size(comm, &npes); mat->beg_rows = (HYPRE_Int *) MemAlloc(mat->mem, npes * sizeof(HYPRE_Int)); mat->end_rows = (HYPRE_Int *) MemAlloc(mat->mem, npes * sizeof(HYPRE_Int)); hypre_MPI_Allgather(&beg_row, 1, HYPRE_MPI_INT, mat->beg_rows, 1, HYPRE_MPI_INT, comm); hypre_MPI_Allgather(&end_row, 1, HYPRE_MPI_INT, mat->end_rows, 1, HYPRE_MPI_INT, comm); mat->num_recv = 0; mat->num_send = 0; mat->recv_req = NULL; mat->send_req = NULL; mat->recv_req2 = NULL; mat->send_req2 = NULL; mat->statuses = NULL; mat->sendind = NULL; mat->sendbuf = NULL; mat->recvbuf = NULL; mat->numb = NULL; return mat; } /*-------------------------------------------------------------------------- * MatrixCreateLocal - Return (a pointer to) a matrix object. * The matrix created by this call is a local matrix, not a global matrix. *--------------------------------------------------------------------------*/ Matrix *MatrixCreateLocal(HYPRE_Int beg_row, HYPRE_Int end_row) { HYPRE_Int num_rows; Matrix *mat = (Matrix *) malloc(sizeof(Matrix)); mat->comm = hypre_MPI_COMM_NULL; mat->beg_row = beg_row; mat->end_row = end_row; mat->mem = (Mem *) MemCreate(); num_rows = mat->end_row - mat->beg_row + 1; mat->lens = (HYPRE_Int *) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int)); mat->inds = (HYPRE_Int **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int *)); mat->vals = (HYPRE_Real **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Real *)); /* Send beg_row and end_row to all processors */ /* This is needed in order to map row numbers to processors */ mat->beg_rows = NULL; mat->end_rows = NULL; mat->num_recv = 0; mat->num_send = 0; mat->recv_req = NULL; mat->send_req = NULL; mat->recv_req2 = NULL; mat->send_req2 = NULL; mat->statuses = NULL; mat->sendind = NULL; mat->sendbuf = NULL; mat->recvbuf = NULL; mat->numb = NULL; return mat; } /*-------------------------------------------------------------------------- * MatrixDestroy - Destroy a matrix object "mat". *--------------------------------------------------------------------------*/ void MatrixDestroy(Matrix *mat) { HYPRE_Int i; for (i=0; i<mat->num_recv; i++) hypre_MPI_Request_free(&mat->recv_req[i]); for (i=0; i<mat->num_send; i++) hypre_MPI_Request_free(&mat->send_req[i]); for (i=0; i<mat->num_send; i++) hypre_MPI_Request_free(&mat->recv_req2[i]); for (i=0; i<mat->num_recv; i++) hypre_MPI_Request_free(&mat->send_req2[i]); free(mat->recv_req); free(mat->send_req); free(mat->recv_req2); free(mat->send_req2); free(mat->statuses); free(mat->sendind); free(mat->sendbuf); free(mat->recvbuf); MemDestroy(mat->mem); if (mat->numb) NumberingDestroy(mat->numb); free(mat); } /*-------------------------------------------------------------------------- * MatrixSetRow - Set a row in a matrix. Only local rows can be set. * Once a row has been set, it should not be set again, or else the * memory used by the existing row will not be recovered until * the matrix is destroyed. "row" is in global coordinate numbering. *--------------------------------------------------------------------------*/ void MatrixSetRow(Matrix *mat, HYPRE_Int row, HYPRE_Int len, HYPRE_Int *ind, HYPRE_Real *val) { row -= mat->beg_row; mat->lens[row] = len; mat->inds[row] = (HYPRE_Int *) MemAlloc(mat->mem, len*sizeof(HYPRE_Int)); mat->vals[row] = (HYPRE_Real *) MemAlloc(mat->mem, len*sizeof(HYPRE_Real)); if (ind != NULL) memcpy(mat->inds[row], ind, len*sizeof(HYPRE_Int)); if (val != NULL) memcpy(mat->vals[row], val, len*sizeof(HYPRE_Real)); } /*-------------------------------------------------------------------------- * MatrixGetRow - Get a *local* row in a matrix. *--------------------------------------------------------------------------*/ void MatrixGetRow(Matrix *mat, HYPRE_Int row, HYPRE_Int *lenp, HYPRE_Int **indp, HYPRE_Real **valp) { *lenp = mat->lens[row]; *indp = mat->inds[row]; *valp = mat->vals[row]; } /*-------------------------------------------------------------------------- * MatrixRowPe - Map "row" to a processor number. *--------------------------------------------------------------------------*/ HYPRE_Int MatrixRowPe(Matrix *mat, HYPRE_Int row) { HYPRE_Int npes, pe; HYPRE_Int *beg = mat->beg_rows; HYPRE_Int *end = mat->end_rows; hypre_MPI_Comm_size(mat->comm, &npes); for (pe=0; pe<npes; pe++) { if (row >= beg[pe] && row <= end[pe]) return pe; } hypre_printf("MatrixRowPe: could not map row %d.\n", row); PARASAILS_EXIT; return -1; /* for picky compilers */ } /*-------------------------------------------------------------------------- * MatrixNnz - Return total number of nonzeros in preconditioner. *--------------------------------------------------------------------------*/ HYPRE_Int MatrixNnz(Matrix *mat) { HYPRE_Int num_local, i, total, alltotal; num_local = mat->end_row - mat->beg_row + 1; total = 0; for (i=0; i<num_local; i++) total += mat->lens[i]; hypre_MPI_Allreduce(&total, &alltotal, 1, HYPRE_MPI_INT, hypre_MPI_SUM, mat->comm); return alltotal; } /*-------------------------------------------------------------------------- * MatrixPrint - Print a matrix to a file "filename". Each processor * appends to the file in order, but the file is overwritten if it exists. *--------------------------------------------------------------------------*/ void MatrixPrint(Matrix *mat, char *filename) { HYPRE_Int mype, npes, pe; HYPRE_Int row, i, len, *ind; HYPRE_Real *val; hypre_MPI_Comm_rank(mat->comm, &mype); hypre_MPI_Comm_size(mat->comm, &npes); for (pe=0; pe<npes; pe++) { hypre_MPI_Barrier(mat->comm); if (mype == pe) { FILE *file = fopen(filename, (pe==0 ? "w" : "a")); assert(file != NULL); for (row=0; row<=mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); for (i=0; i<len; i++) hypre_fprintf(file, "%d %d %.14e\n", row + mat->beg_row, mat->numb->local_to_global[ind[i]], val[i]); } fclose(file); } } } /*-------------------------------------------------------------------------- * MatrixReadMaster - MatrixRead routine for processor 0. Internal use. *--------------------------------------------------------------------------*/ static void MatrixReadMaster(Matrix *mat, char *filename) { MPI_Comm comm = mat->comm; HYPRE_Int mype, npes; FILE *file; HYPRE_Int ret; HYPRE_Int num_rows, curr_proc; HYPRE_Int row, col; HYPRE_Real value; hypre_longint offset; hypre_longint outbuf; HYPRE_Int curr_row; HYPRE_Int len; HYPRE_Int ind[MAX_NZ_PER_ROW]; HYPRE_Real val[MAX_NZ_PER_ROW]; char line[100]; HYPRE_Int oldrow; hypre_MPI_Request request; hypre_MPI_Status status; hypre_MPI_Comm_size(mat->comm, &npes); hypre_MPI_Comm_rank(mat->comm, &mype); file = fopen(filename, "r"); assert(file != NULL); fgets(line, 100, file); #ifdef EMSOLVE ret = hypre_sscanf(line, "%*d %d %*d %*d", &num_rows); for (row=0; row<num_rows; row++) hypre_fscanf(file, "%*d"); #else ret = hypre_sscanf(line, "%d %*d %*d", &num_rows); #endif offset = ftell(file); hypre_fscanf(file, "%d %d %lf", &row, &col, &value); request = hypre_MPI_REQUEST_NULL; curr_proc = 1; /* proc for which we are looking for the beginning */ while (curr_proc < npes) { if (row == mat->beg_rows[curr_proc]) { hypre_MPI_Wait(&request, &status); outbuf = offset; hypre_MPI_Isend(&outbuf, 1, hypre_MPI_LONG, curr_proc, 0, comm, &request); curr_proc++; } offset = ftell(file); oldrow = row; hypre_fscanf(file, "%d %d %lf", &row, &col, &value); if (oldrow > row) { hypre_fprintf(stderr, "Matrix file is not sorted by rows.\n"); PARASAILS_EXIT; } } /* Now read our own part */ rewind(file); fgets(line, 100, file); #ifdef EMSOLVE ret = hypre_sscanf(line, "%*d %d %*d %*d", &num_rows); for (row=0; row<num_rows; row++) hypre_fscanf(file, "%*d"); #else ret = hypre_sscanf(line, "%d %*d %*d", &num_rows); #endif ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value); curr_row = row; len = 0; while (ret != EOF && row <= mat->end_row) { if (row != curr_row) { /* store this row */ MatrixSetRow(mat, curr_row, len, ind, val); curr_row = row; /* reset row pointer */ len = 0; } if (len >= MAX_NZ_PER_ROW) { hypre_fprintf(stderr, "The matrix has exceeded %d\n", MAX_NZ_PER_ROW); hypre_fprintf(stderr, "nonzeros per row. Internal buffers must be\n"); hypre_fprintf(stderr, "increased to continue.\n"); PARASAILS_EXIT; } ind[len] = col; val[len] = value; len++; ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value); } /* Store the final row */ if (ret == EOF || row > mat->end_row) MatrixSetRow(mat, mat->end_row, len, ind, val); fclose(file); hypre_MPI_Wait(&request, &status); } /*-------------------------------------------------------------------------- * MatrixReadSlave - MatrixRead routine for other processors. Internal use. *--------------------------------------------------------------------------*/ static void MatrixReadSlave(Matrix *mat, char *filename) { MPI_Comm comm = mat->comm; hypre_MPI_Status status; HYPRE_Int mype; FILE *file; HYPRE_Int ret; HYPRE_Int row, col; HYPRE_Real value; hypre_longint offset; HYPRE_Int curr_row; HYPRE_Int len; HYPRE_Int ind[MAX_NZ_PER_ROW]; HYPRE_Real val[MAX_NZ_PER_ROW]; HYPRE_Real time0, time1; file = fopen(filename, "r"); assert(file != NULL); hypre_MPI_Comm_rank(mat->comm, &mype); hypre_MPI_Recv(&offset, 1, hypre_MPI_LONG, 0, 0, comm, &status); time0 = hypre_MPI_Wtime(); ret = fseek(file, offset, SEEK_SET); assert(ret == 0); ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value); curr_row = row; len = 0; while (ret != EOF && row <= mat->end_row) { if (row != curr_row) { /* store this row */ MatrixSetRow(mat, curr_row, len, ind, val); curr_row = row; /* reset row pointer */ len = 0; } if (len >= MAX_NZ_PER_ROW) { hypre_fprintf(stderr, "The matrix has exceeded %d\n", MAX_NZ_PER_ROW); hypre_fprintf(stderr, "nonzeros per row. Internal buffers must be\n"); hypre_fprintf(stderr, "increased to continue.\n"); PARASAILS_EXIT; } ind[len] = col; val[len] = value; len++; ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value); } /* Store the final row */ if (ret == EOF || row > mat->end_row) MatrixSetRow(mat, mat->end_row, len, ind, val); fclose(file); time1 = hypre_MPI_Wtime(); hypre_printf("%d: Time for slave read: %f\n", mype, time1-time0); } /*-------------------------------------------------------------------------- * MatrixRead - Read a matrix file "filename" from disk and store in the * matrix "mat" which has already been created using MatrixCreate. The format * assumes no nonzero rows, the rows are in order, and there will be at least * one row per processor. *--------------------------------------------------------------------------*/ void MatrixRead(Matrix *mat, char *filename) { HYPRE_Int mype; HYPRE_Real time0, time1; hypre_MPI_Comm_rank(mat->comm, &mype); time0 = hypre_MPI_Wtime(); if (mype == 0) MatrixReadMaster(mat, filename); else MatrixReadSlave(mat, filename); time1 = hypre_MPI_Wtime(); hypre_printf("%d: Time for reading matrix: %f\n", mype, time1-time0); MatrixComplete(mat); } /*-------------------------------------------------------------------------- * RhsRead - Read a right-hand side file "filename" from disk and store in the * location pointed to by "rhs". "mat" is needed to provide the partitioning * information. The expected format is: a header line (n, nrhs) followed * by n values. Also allows isis format, indicated by 1 HYPRE_Int in first line. *--------------------------------------------------------------------------*/ void RhsRead(HYPRE_Real *rhs, Matrix *mat, char *filename) { FILE *file; hypre_MPI_Status status; HYPRE_Int mype, npes; HYPRE_Int num_rows, num_local, pe, i, converted; HYPRE_Real *buffer = NULL; HYPRE_Int buflen = 0; char line[100]; HYPRE_Int dummy; hypre_MPI_Comm_size(mat->comm, &npes); hypre_MPI_Comm_rank(mat->comm, &mype); num_local = mat->end_row - mat->beg_row + 1; if (mype != 0) { hypre_MPI_Recv(rhs, num_local, hypre_MPI_DOUBLE, 0, 0, mat->comm, &status); return; } file = fopen(filename, "r"); assert(file != NULL); fgets(line, 100, file); converted = hypre_sscanf(line, "%d %d", &num_rows, &dummy); assert(num_rows == mat->end_rows[npes-1]); /* Read own rows first */ for (i=0; i<num_local; i++) if (converted == 1) /* isis format */ hypre_fscanf(file, "%*d %lf", &rhs[i]); else hypre_fscanf(file, "%lf", &rhs[i]); for (pe=1; pe<npes; pe++) { num_local = mat->end_rows[pe] - mat->beg_rows[pe]+ 1; if (buflen < num_local) { free(buffer); buflen = num_local; buffer = (HYPRE_Real *) malloc(buflen * sizeof(HYPRE_Real)); } for (i=0; i<num_local; i++) if (converted == 1) /* isis format */ hypre_fscanf(file, "%*d %lf", &buffer[i]); else hypre_fscanf(file, "%lf", &buffer[i]); hypre_MPI_Send(buffer, num_local, hypre_MPI_DOUBLE, pe, 0, mat->comm); } free(buffer); } /*-------------------------------------------------------------------------- * SetupReceives *--------------------------------------------------------------------------*/ static void SetupReceives(Matrix *mat, HYPRE_Int reqlen, HYPRE_Int *reqind, HYPRE_Int *outlist) { HYPRE_Int i, j, this_pe, mype; hypre_MPI_Request request; MPI_Comm comm = mat->comm; HYPRE_Int num_local = mat->end_row - mat->beg_row + 1; hypre_MPI_Comm_rank(comm, &mype); mat->num_recv = 0; /* Allocate recvbuf */ /* recvbuf has numlocal entires saved for local part of x, used in matvec */ mat->recvlen = reqlen; /* used for the transpose multiply */ mat->recvbuf = (HYPRE_Real *) malloc((reqlen+num_local) * sizeof(HYPRE_Real)); for (i=0; i<reqlen; i=j) /* j is set below */ { /* The processor that owns the row with index reqind[i] */ this_pe = MatrixRowPe(mat, reqind[i]); /* Figure out other rows we need from this_pe */ for (j=i+1; j<reqlen; j++) { /* if row is on different pe */ if (reqind[j] < mat->beg_rows[this_pe] || reqind[j] > mat->end_rows[this_pe]) break; } /* Request rows in reqind[i..j-1] */ hypre_MPI_Isend(&reqind[i], j-i, HYPRE_MPI_INT, this_pe, 444, comm, &request); hypre_MPI_Request_free(&request); /* Count of number of number of indices needed from this_pe */ outlist[this_pe] = j-i; hypre_MPI_Recv_init(&mat->recvbuf[i+num_local], j-i, hypre_MPI_DOUBLE, this_pe, 555, comm, &mat->recv_req[mat->num_recv]); hypre_MPI_Send_init(&mat->recvbuf[i+num_local], j-i, hypre_MPI_DOUBLE, this_pe, 666, comm, &mat->send_req2[mat->num_recv]); mat->num_recv++; } } /*-------------------------------------------------------------------------- * SetupSends * This function will wait for all receives to complete. *--------------------------------------------------------------------------*/ static void SetupSends(Matrix *mat, HYPRE_Int *inlist) { HYPRE_Int i, j, mype, npes; hypre_MPI_Request *requests; hypre_MPI_Status *statuses; MPI_Comm comm = mat->comm; hypre_MPI_Comm_rank(comm, &mype); hypre_MPI_Comm_size(comm, &npes); requests = (hypre_MPI_Request *) malloc(npes * sizeof(hypre_MPI_Request)); statuses = (hypre_MPI_Status *) malloc(npes * sizeof(hypre_MPI_Status)); /* Determine size of and allocate sendbuf and sendind */ mat->sendlen = 0; for (i=0; i<npes; i++) mat->sendlen += inlist[i]; mat->sendbuf = NULL; mat->sendind = NULL; if (mat->sendlen) { mat->sendbuf = (HYPRE_Real *) malloc(mat->sendlen * sizeof(HYPRE_Real)); mat->sendind = (HYPRE_Int *) malloc(mat->sendlen * sizeof(HYPRE_Int)); } j = 0; mat->num_send = 0; for (i=0; i<npes; i++) { if (inlist[i] != 0) { /* Post receive for the actual indices */ hypre_MPI_Irecv(&mat->sendind[j], inlist[i], HYPRE_MPI_INT, i, 444, comm, &requests[mat->num_send]); /* Set up the send */ hypre_MPI_Send_init(&mat->sendbuf[j], inlist[i], hypre_MPI_DOUBLE, i, 555, comm, &mat->send_req[mat->num_send]); /* Set up the receive for the transpose */ hypre_MPI_Recv_init(&mat->sendbuf[j], inlist[i], hypre_MPI_DOUBLE, i, 666, comm, &mat->recv_req2[mat->num_send]); mat->num_send++; j += inlist[i]; } } hypre_MPI_Waitall(mat->num_send, requests, statuses); free(requests); free(statuses); /* convert global indices to local indices */ /* these are all indices on this processor */ for (i=0; i<mat->sendlen; i++) mat->sendind[i] -= mat->beg_row; } /*-------------------------------------------------------------------------- * MatrixComplete *--------------------------------------------------------------------------*/ void MatrixComplete(Matrix *mat) { HYPRE_Int mype, npes; HYPRE_Int *outlist, *inlist; HYPRE_Int row, len, *ind; HYPRE_Real *val; hypre_MPI_Comm_rank(mat->comm, &mype); hypre_MPI_Comm_size(mat->comm, &npes); mat->recv_req = (hypre_MPI_Request *) malloc(npes * sizeof(hypre_MPI_Request)); mat->send_req = (hypre_MPI_Request *) malloc(npes * sizeof(hypre_MPI_Request)); mat->recv_req2 = (hypre_MPI_Request *) malloc(npes * sizeof(hypre_MPI_Request)); mat->send_req2 = (hypre_MPI_Request *) malloc(npes * sizeof(hypre_MPI_Request)); mat->statuses = (hypre_MPI_Status *) malloc(npes * sizeof(hypre_MPI_Status)); outlist = (HYPRE_Int *) calloc(npes, sizeof(HYPRE_Int)); inlist = (HYPRE_Int *) calloc(npes, sizeof(HYPRE_Int)); /* Create Numbering object */ mat->numb = NumberingCreate(mat, PARASAILS_NROWS); SetupReceives(mat, mat->numb->num_ind - mat->numb->num_loc, &mat->numb->local_to_global[mat->numb->num_loc], outlist); hypre_MPI_Alltoall(outlist, 1, HYPRE_MPI_INT, inlist, 1, HYPRE_MPI_INT, mat->comm); SetupSends(mat, inlist); free(outlist); free(inlist); /* Convert to local indices */ for (row=0; row<=mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); NumberingGlobalToLocal(mat->numb, len, ind, ind); } } /*-------------------------------------------------------------------------- * MatrixMatvec * Can be done in place. *--------------------------------------------------------------------------*/ void MatrixMatvec(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y) { HYPRE_Int row, i, len, *ind; HYPRE_Real *val, temp; HYPRE_Int num_local = mat->end_row - mat->beg_row + 1; /* Set up persistent communications */ /* Assumes MatrixComplete has been called */ /* Put components of x into the right outgoing buffers */ for (i=0; i<mat->sendlen; i++) mat->sendbuf[i] = x[mat->sendind[i]]; hypre_MPI_Startall(mat->num_recv, mat->recv_req); hypre_MPI_Startall(mat->num_send, mat->send_req); /* Copy local part of x into top part of recvbuf */ for (i=0; i<num_local; i++) mat->recvbuf[i] = x[i]; hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->statuses); /* do the multiply */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(row,len,ind,val,temp,i) schedule(static) #endif for (row=0; row<=mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); temp = 0.0; for (i=0; i<len; i++) { temp = temp + val[i] * mat->recvbuf[ind[i]]; } y[row] = temp; } hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->statuses); } void MatrixMatvecSerial(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y) { HYPRE_Int row, i, len, *ind; HYPRE_Real *val, temp; HYPRE_Int num_local = mat->end_row - mat->beg_row + 1; /* Set up persistent communications */ /* Assumes MatrixComplete has been called */ /* Put components of x into the right outgoing buffers */ for (i=0; i<mat->sendlen; i++) mat->sendbuf[i] = x[mat->sendind[i]]; hypre_MPI_Startall(mat->num_recv, mat->recv_req); hypre_MPI_Startall(mat->num_send, mat->send_req); /* Copy local part of x into top part of recvbuf */ for (i=0; i<num_local; i++) mat->recvbuf[i] = x[i]; hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->statuses); /* do the multiply */ for (row=0; row<=mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); temp = 0.0; for (i=0; i<len; i++) { temp = temp + val[i] * mat->recvbuf[ind[i]]; } y[row] = temp; } hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->statuses); } /*-------------------------------------------------------------------------- * MatrixMatvecTrans * Can be done in place. *--------------------------------------------------------------------------*/ void MatrixMatvecTrans(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y) { HYPRE_Int row, i, len, *ind; HYPRE_Real *val; HYPRE_Int num_local = mat->end_row - mat->beg_row + 1; /* Set up persistent communications */ /* Assumes MatrixComplete has been called */ /* Post receives for local parts of the solution y */ hypre_MPI_Startall(mat->num_send, mat->recv_req2); /* initialize accumulator buffer to zero */ for (i=0; i<mat->recvlen+num_local; i++) mat->recvbuf[i] = 0.0; /* do the multiply */ for (row=0; row<=mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); for (i=0; i<len; i++) { mat->recvbuf[ind[i]] += val[i] * x[row]; } } /* Now can send nonlocal parts of solution to other procs */ hypre_MPI_Startall(mat->num_recv, mat->send_req2); /* copy local part of solution into y */ for (i=0; i<num_local; i++) y[i] = mat->recvbuf[i]; /* alternatively, loop over a wait any */ hypre_MPI_Waitall(mat->num_send, mat->recv_req2, mat->statuses); /* add all the incoming partial sums to y */ for (i=0; i<mat->sendlen; i++) y[mat->sendind[i]] += mat->sendbuf[i]; hypre_MPI_Waitall(mat->num_recv, mat->send_req2, mat->statuses); }
/****************************************************************************** * * Matrix - Matrix stored and accessible by rows. Indices and values for * the matrix nonzeros are copied into the matrix a row at a time, in any * order using the MatrixGetRow function. The MatrixPutRow function returns * a pointer to the indices and values of a row. The matrix has a set of * row and column indices such that these indices begin at "beg" and end * at "end", where 0 <= "beg" <= "end". In other words, the matrix indices * have any nonnegative base value, and the base values of the row and column * indices must agree. * *****************************************************************************/ #include <stdlib.h> #include <memory.h> #include <assert.h> #include "Common.h" #include "Matrix.h" #include "Numbering.h" #define MAX_NZ_PER_ROW 1000 /*-------------------------------------------------------------------------- * MatrixCreate - Return (a pointer to) a matrix object. *--------------------------------------------------------------------------*/ Matrix * MatrixCreate(MPI_Comm comm, HYPRE_Int beg_row, HYPRE_Int end_row) { HYPRE_Int num_rows, mype, npes; Matrix *mat = (Matrix *) malloc(sizeof(Matrix)); mat->comm = comm; mat->beg_row = beg_row; mat->end_row = end_row; mat->mem = (Mem *) MemCreate(); num_rows = mat->end_row - mat->beg_row + 1; mat->lens = (HYPRE_Int *) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int)); mat->inds = (HYPRE_Int **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int *)); mat->vals = (HYPRE_Real **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Real *)); /* Send beg_row and end_row to all processors */ /* This is needed in order to map row numbers to processors */ hypre_MPI_Comm_rank(comm, &mype); hypre_MPI_Comm_size(comm, &npes); mat->beg_rows = (HYPRE_Int *) MemAlloc(mat->mem, npes * sizeof(HYPRE_Int)); mat->end_rows = (HYPRE_Int *) MemAlloc(mat->mem, npes * sizeof(HYPRE_Int)); hypre_MPI_Allgather(&beg_row, 1, HYPRE_MPI_INT, mat->beg_rows, 1, HYPRE_MPI_INT, comm); hypre_MPI_Allgather(&end_row, 1, HYPRE_MPI_INT, mat->end_rows, 1, HYPRE_MPI_INT, comm); mat->num_recv = 0; mat->num_send = 0; mat->recv_req = NULL; mat->send_req = NULL; mat->recv_req2 = NULL; mat->send_req2 = NULL; mat->statuses = NULL; mat->sendind = NULL; mat->sendbuf = NULL; mat->recvbuf = NULL; mat->numb = NULL; return mat; } /*-------------------------------------------------------------------------- * MatrixCreateLocal - Return (a pointer to) a matrix object. * The matrix created by this call is a local matrix, not a global matrix. *--------------------------------------------------------------------------*/ Matrix * MatrixCreateLocal(HYPRE_Int beg_row, HYPRE_Int end_row) { HYPRE_Int num_rows; Matrix *mat = (Matrix *) malloc(sizeof(Matrix)); mat->comm = hypre_MPI_COMM_NULL; mat->beg_row = beg_row; mat->end_row = end_row; mat->mem = (Mem *) MemCreate(); num_rows = mat->end_row - mat->beg_row + 1; mat->lens = (HYPRE_Int *) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int)); mat->inds = (HYPRE_Int **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int *)); mat->vals = (HYPRE_Real **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Real *)); /* Send beg_row and end_row to all processors */ /* This is needed in order to map row numbers to processors */ mat->beg_rows = NULL; mat->end_rows = NULL; mat->num_recv = 0; mat->num_send = 0; mat->recv_req = NULL; mat->send_req = NULL; mat->recv_req2 = NULL; mat->send_req2 = NULL; mat->statuses = NULL; mat->sendind = NULL; mat->sendbuf = NULL; mat->recvbuf = NULL; mat->numb = NULL; return mat; } /*-------------------------------------------------------------------------- * MatrixDestroy - Destroy a matrix object "mat". *--------------------------------------------------------------------------*/ void MatrixDestroy(Matrix * mat) { HYPRE_Int i; for (i = 0; i < mat->num_recv; i++) hypre_MPI_Request_free(&mat->recv_req[i]); for (i = 0; i < mat->num_send; i++) hypre_MPI_Request_free(&mat->send_req[i]); for (i = 0; i < mat->num_send; i++) hypre_MPI_Request_free(&mat->recv_req2[i]); for (i = 0; i < mat->num_recv; i++) hypre_MPI_Request_free(&mat->send_req2[i]); free(mat->recv_req); free(mat->send_req); free(mat->recv_req2); free(mat->send_req2); free(mat->statuses); free(mat->sendind); free(mat->sendbuf); free(mat->recvbuf); MemDestroy(mat->mem); if (mat->numb) NumberingDestroy(mat->numb); free(mat); } /*-------------------------------------------------------------------------- * MatrixSetRow - Set a row in a matrix. Only local rows can be set. * Once a row has been set, it should not be set again, or else the * memory used by the existing row will not be recovered until * the matrix is destroyed. "row" is in global coordinate numbering. *--------------------------------------------------------------------------*/ void MatrixSetRow(Matrix * mat, HYPRE_Int row, HYPRE_Int len, HYPRE_Int * ind, HYPRE_Real * val) { row -= mat->beg_row; mat->lens[row] = len; mat->inds[row] = (HYPRE_Int *) MemAlloc(mat->mem, len * sizeof(HYPRE_Int)); mat->vals[row] = (HYPRE_Real *) MemAlloc(mat->mem, len * sizeof(HYPRE_Real)); if (ind != NULL) memcpy(mat->inds[row], ind, len * sizeof(HYPRE_Int)); if (val != NULL) memcpy(mat->vals[row], val, len * sizeof(HYPRE_Real)); } /*-------------------------------------------------------------------------- * MatrixGetRow - Get a *local* row in a matrix. *--------------------------------------------------------------------------*/ void MatrixGetRow(Matrix * mat, HYPRE_Int row, HYPRE_Int * lenp, HYPRE_Int ** indp, HYPRE_Real ** valp) { *lenp = mat->lens[row]; *indp = mat->inds[row]; *valp = mat->vals[row]; } /*-------------------------------------------------------------------------- * MatrixRowPe - Map "row" to a processor number. *--------------------------------------------------------------------------*/ HYPRE_Int MatrixRowPe(Matrix * mat, HYPRE_Int row) { HYPRE_Int npes, pe; HYPRE_Int *beg = mat->beg_rows; HYPRE_Int *end = mat->end_rows; hypre_MPI_Comm_size(mat->comm, &npes); for (pe = 0; pe < npes; pe++) { if (row >= beg[pe] && row <= end[pe]) return pe; } hypre_printf("MatrixRowPe: could not map row %d.\n", row); PARASAILS_EXIT; return -1; /* for picky compilers */ } /*-------------------------------------------------------------------------- * MatrixNnz - Return total number of nonzeros in preconditioner. *--------------------------------------------------------------------------*/ HYPRE_Int MatrixNnz(Matrix * mat) { HYPRE_Int num_local, i, total, alltotal; num_local = mat->end_row - mat->beg_row + 1; total = 0; for (i = 0; i < num_local; i++) total += mat->lens[i]; hypre_MPI_Allreduce(&total, &alltotal, 1, HYPRE_MPI_INT, hypre_MPI_SUM, mat->comm); return alltotal; } /*-------------------------------------------------------------------------- * MatrixPrint - Print a matrix to a file "filename". Each processor * appends to the file in order, but the file is overwritten if it exists. *--------------------------------------------------------------------------*/ void MatrixPrint(Matrix * mat, char *filename) { HYPRE_Int mype, npes, pe; HYPRE_Int row, i, len, *ind; HYPRE_Real *val; hypre_MPI_Comm_rank(mat->comm, &mype); hypre_MPI_Comm_size(mat->comm, &npes); for (pe = 0; pe < npes; pe++) { hypre_MPI_Barrier(mat->comm); if (mype == pe) { FILE *file = fopen(filename, (pe == 0 ? "w" : "a")); assert(file != NULL); for (row = 0; row <= mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); for (i = 0; i < len; i++) hypre_fprintf(file, "%d %d %.14e\n", row + mat->beg_row, mat->numb->local_to_global[ind[i]], val[i]); } fclose(file); } } } /*-------------------------------------------------------------------------- * MatrixReadMaster - MatrixRead routine for processor 0. Internal use. *--------------------------------------------------------------------------*/ static void MatrixReadMaster(Matrix * mat, char *filename) { MPI_Comm comm = mat->comm; HYPRE_Int mype, npes; FILE *file; HYPRE_Int ret; HYPRE_Int num_rows, curr_proc; HYPRE_Int row, col; HYPRE_Real value; hypre_longint offset; hypre_longint outbuf; HYPRE_Int curr_row; HYPRE_Int len; HYPRE_Int ind[MAX_NZ_PER_ROW]; HYPRE_Real val[MAX_NZ_PER_ROW]; char line[100]; HYPRE_Int oldrow; hypre_MPI_Request request; hypre_MPI_Status status; hypre_MPI_Comm_size(mat->comm, &npes); hypre_MPI_Comm_rank(mat->comm, &mype); file = fopen(filename, "r"); assert(file != NULL); fgets(line, 100, file); #ifdef EMSOLVE ret = hypre_sscanf(line, "%*d %d %*d %*d", &num_rows); for (row = 0; row < num_rows; row++) hypre_fscanf(file, "%*d"); #else ret = hypre_sscanf(line, "%d %*d %*d", &num_rows); #endif offset = ftell(file); hypre_fscanf(file, "%d %d %lf", &row, &col, &value); request = hypre_MPI_REQUEST_NULL; curr_proc = 1; /* proc for which we are looking for the * beginning */ while (curr_proc < npes) { if (row == mat->beg_rows[curr_proc]) { hypre_MPI_Wait(&request, &status); outbuf = offset; hypre_MPI_Isend(&outbuf, 1, hypre_MPI_LONG, curr_proc, 0, comm, &request); curr_proc++; } offset = ftell(file); oldrow = row; hypre_fscanf(file, "%d %d %lf", &row, &col, &value); if (oldrow > row) { hypre_fprintf(stderr, "Matrix file is not sorted by rows.\n"); PARASAILS_EXIT; } } /* Now read our own part */ rewind(file); fgets(line, 100, file); #ifdef EMSOLVE ret = hypre_sscanf(line, "%*d %d %*d %*d", &num_rows); for (row = 0; row < num_rows; row++) hypre_fscanf(file, "%*d"); #else ret = hypre_sscanf(line, "%d %*d %*d", &num_rows); #endif ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value); curr_row = row; len = 0; while (ret != EOF && row <= mat->end_row) { if (row != curr_row) { /* store this row */ MatrixSetRow(mat, curr_row, len, ind, val); curr_row = row; /* reset row pointer */ len = 0; } if (len >= MAX_NZ_PER_ROW) { hypre_fprintf(stderr, "The matrix has exceeded %d\n", MAX_NZ_PER_ROW); hypre_fprintf(stderr, "nonzeros per row. Internal buffers must be\n"); hypre_fprintf(stderr, "increased to continue.\n"); PARASAILS_EXIT; } ind[len] = col; val[len] = value; len++; ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value); } /* Store the final row */ if (ret == EOF || row > mat->end_row) MatrixSetRow(mat, mat->end_row, len, ind, val); fclose(file); hypre_MPI_Wait(&request, &status); } /*-------------------------------------------------------------------------- * MatrixReadSlave - MatrixRead routine for other processors. Internal use. *--------------------------------------------------------------------------*/ static void MatrixReadSlave(Matrix * mat, char *filename) { MPI_Comm comm = mat->comm; hypre_MPI_Status status; HYPRE_Int mype; FILE *file; HYPRE_Int ret; HYPRE_Int row, col; HYPRE_Real value; hypre_longint offset; HYPRE_Int curr_row; HYPRE_Int len; HYPRE_Int ind[MAX_NZ_PER_ROW]; HYPRE_Real val[MAX_NZ_PER_ROW]; HYPRE_Real time0, time1; file = fopen(filename, "r"); assert(file != NULL); hypre_MPI_Comm_rank(mat->comm, &mype); hypre_MPI_Recv(&offset, 1, hypre_MPI_LONG, 0, 0, comm, &status); time0 = hypre_MPI_Wtime(); ret = fseek(file, offset, SEEK_SET); assert(ret == 0); ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value); curr_row = row; len = 0; while (ret != EOF && row <= mat->end_row) { if (row != curr_row) { /* store this row */ MatrixSetRow(mat, curr_row, len, ind, val); curr_row = row; /* reset row pointer */ len = 0; } if (len >= MAX_NZ_PER_ROW) { hypre_fprintf(stderr, "The matrix has exceeded %d\n", MAX_NZ_PER_ROW); hypre_fprintf(stderr, "nonzeros per row. Internal buffers must be\n"); hypre_fprintf(stderr, "increased to continue.\n"); PARASAILS_EXIT; } ind[len] = col; val[len] = value; len++; ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value); } /* Store the final row */ if (ret == EOF || row > mat->end_row) MatrixSetRow(mat, mat->end_row, len, ind, val); fclose(file); time1 = hypre_MPI_Wtime(); hypre_printf("%d: Time for slave read: %f\n", mype, time1 - time0); } /*-------------------------------------------------------------------------- * MatrixRead - Read a matrix file "filename" from disk and store in the * matrix "mat" which has already been created using MatrixCreate. The format * assumes no nonzero rows, the rows are in order, and there will be at least * one row per processor. *--------------------------------------------------------------------------*/ void MatrixRead(Matrix * mat, char *filename) { HYPRE_Int mype; HYPRE_Real time0, time1; hypre_MPI_Comm_rank(mat->comm, &mype); time0 = hypre_MPI_Wtime(); if (mype == 0) MatrixReadMaster(mat, filename); else MatrixReadSlave(mat, filename); time1 = hypre_MPI_Wtime(); hypre_printf("%d: Time for reading matrix: %f\n", mype, time1 - time0); MatrixComplete(mat); } /*-------------------------------------------------------------------------- * RhsRead - Read a right-hand side file "filename" from disk and store in the * location pointed to by "rhs". "mat" is needed to provide the partitioning * information. The expected format is: a header line (n, nrhs) followed * by n values. Also allows isis format, indicated by 1 HYPRE_Int in first line. *--------------------------------------------------------------------------*/ void RhsRead(HYPRE_Real * rhs, Matrix * mat, char *filename) { FILE *file; hypre_MPI_Status status; HYPRE_Int mype, npes; HYPRE_Int num_rows, num_local, pe, i, converted; HYPRE_Real *buffer = NULL; HYPRE_Int buflen = 0; char line[100]; HYPRE_Int dummy; hypre_MPI_Comm_size(mat->comm, &npes); hypre_MPI_Comm_rank(mat->comm, &mype); num_local = mat->end_row - mat->beg_row + 1; if (mype != 0) { hypre_MPI_Recv(rhs, num_local, hypre_MPI_DOUBLE, 0, 0, mat->comm, &status); return; } file = fopen(filename, "r"); assert(file != NULL); fgets(line, 100, file); converted = hypre_sscanf(line, "%d %d", &num_rows, &dummy); assert(num_rows == mat->end_rows[npes - 1]); /* Read own rows first */ for (i = 0; i < num_local; i++) if (converted == 1) /* isis format */ hypre_fscanf(file, "%*d %lf", &rhs[i]); else hypre_fscanf(file, "%lf", &rhs[i]); for (pe = 1; pe < npes; pe++) { num_local = mat->end_rows[pe] - mat->beg_rows[pe] + 1; if (buflen < num_local) { free(buffer); buflen = num_local; buffer = (HYPRE_Real *) malloc(buflen * sizeof(HYPRE_Real)); } for (i = 0; i < num_local; i++) if (converted == 1) /* isis format */ hypre_fscanf(file, "%*d %lf", &buffer[i]); else hypre_fscanf(file, "%lf", &buffer[i]); hypre_MPI_Send(buffer, num_local, hypre_MPI_DOUBLE, pe, 0, mat->comm); } free(buffer); } /*-------------------------------------------------------------------------- * SetupReceives *--------------------------------------------------------------------------*/ static void SetupReceives(Matrix * mat, HYPRE_Int reqlen, HYPRE_Int * reqind, HYPRE_Int * outlist) { HYPRE_Int i, j, this_pe, mype; hypre_MPI_Request request; MPI_Comm comm = mat->comm; HYPRE_Int num_local = mat->end_row - mat->beg_row + 1; hypre_MPI_Comm_rank(comm, &mype); mat->num_recv = 0; /* Allocate recvbuf */ /* recvbuf has numlocal entires saved for local part of x, used in matvec */ mat->recvlen = reqlen; /* used for the transpose multiply */ mat->recvbuf = (HYPRE_Real *) malloc((reqlen + num_local) * sizeof(HYPRE_Real)); for (i = 0; i < reqlen; i = j) /* j is set below */ { /* The processor that owns the row with index reqind[i] */ this_pe = MatrixRowPe(mat, reqind[i]); /* Figure out other rows we need from this_pe */ for (j = i + 1; j < reqlen; j++) { /* if row is on different pe */ if (reqind[j] < mat->beg_rows[this_pe] || reqind[j] > mat->end_rows[this_pe]) break; } /* Request rows in reqind[i..j-1] */ hypre_MPI_Isend(&reqind[i], j - i, HYPRE_MPI_INT, this_pe, 444, comm, &request); hypre_MPI_Request_free(&request); /* Count of number of number of indices needed from this_pe */ outlist[this_pe] = j - i; hypre_MPI_Recv_init(&mat->recvbuf[i + num_local], j - i, hypre_MPI_DOUBLE, this_pe, 555, comm, &mat->recv_req[mat->num_recv]); hypre_MPI_Send_init(&mat->recvbuf[i + num_local], j - i, hypre_MPI_DOUBLE, this_pe, 666, comm, &mat->send_req2[mat->num_recv]); mat->num_recv++; } } /*-------------------------------------------------------------------------- * SetupSends * This function will wait for all receives to complete. *--------------------------------------------------------------------------*/ static void SetupSends(Matrix * mat, HYPRE_Int * inlist) { HYPRE_Int i, j, mype, npes; hypre_MPI_Request *requests; hypre_MPI_Status *statuses; MPI_Comm comm = mat->comm; hypre_MPI_Comm_rank(comm, &mype); hypre_MPI_Comm_size(comm, &npes); requests = (hypre_MPI_Request *) malloc(npes * sizeof(hypre_MPI_Request)); statuses = (hypre_MPI_Status *) malloc(npes * sizeof(hypre_MPI_Status)); /* Determine size of and allocate sendbuf and sendind */ mat->sendlen = 0; for (i = 0; i < npes; i++) mat->sendlen += inlist[i]; mat->sendbuf = NULL; mat->sendind = NULL; if (mat->sendlen) { mat->sendbuf = (HYPRE_Real *) malloc(mat->sendlen * sizeof(HYPRE_Real)); mat->sendind = (HYPRE_Int *) malloc(mat->sendlen * sizeof(HYPRE_Int)); } j = 0; mat->num_send = 0; for (i = 0; i < npes; i++) { if (inlist[i] != 0) { /* Post receive for the actual indices */ hypre_MPI_Irecv(&mat->sendind[j], inlist[i], HYPRE_MPI_INT, i, 444, comm, &requests[mat->num_send]); /* Set up the send */ hypre_MPI_Send_init(&mat->sendbuf[j], inlist[i], hypre_MPI_DOUBLE, i, 555, comm, &mat->send_req[mat->num_send]); /* Set up the receive for the transpose */ hypre_MPI_Recv_init(&mat->sendbuf[j], inlist[i], hypre_MPI_DOUBLE, i, 666, comm, &mat->recv_req2[mat->num_send]); mat->num_send++; j += inlist[i]; } } hypre_MPI_Waitall(mat->num_send, requests, statuses); free(requests); free(statuses); /* convert global indices to local indices */ /* these are all indices on this processor */ for (i = 0; i < mat->sendlen; i++) mat->sendind[i] -= mat->beg_row; } /*-------------------------------------------------------------------------- * MatrixComplete *--------------------------------------------------------------------------*/ void MatrixComplete(Matrix * mat) { HYPRE_Int mype, npes; HYPRE_Int *outlist, *inlist; HYPRE_Int row, len, *ind; HYPRE_Real *val; hypre_MPI_Comm_rank(mat->comm, &mype); hypre_MPI_Comm_size(mat->comm, &npes); mat->recv_req = (hypre_MPI_Request *) malloc(npes * sizeof(hypre_MPI_Request)); mat->send_req = (hypre_MPI_Request *) malloc(npes * sizeof(hypre_MPI_Request)); mat->recv_req2 = (hypre_MPI_Request *) malloc(npes * sizeof(hypre_MPI_Request)); mat->send_req2 = (hypre_MPI_Request *) malloc(npes * sizeof(hypre_MPI_Request)); mat->statuses = (hypre_MPI_Status *) malloc(npes * sizeof(hypre_MPI_Status)); outlist = (HYPRE_Int *) calloc(npes, sizeof(HYPRE_Int)); inlist = (HYPRE_Int *) calloc(npes, sizeof(HYPRE_Int)); /* Create Numbering object */ mat->numb = NumberingCreate(mat, PARASAILS_NROWS); SetupReceives(mat, mat->numb->num_ind - mat->numb->num_loc, &mat->numb->local_to_global[mat->numb->num_loc], outlist); hypre_MPI_Alltoall(outlist, 1, HYPRE_MPI_INT, inlist, 1, HYPRE_MPI_INT, mat->comm); SetupSends(mat, inlist); free(outlist); free(inlist); /* Convert to local indices */ for (row = 0; row <= mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); NumberingGlobalToLocal(mat->numb, len, ind, ind); } } /*-------------------------------------------------------------------------- * MatrixMatvec * Can be done in place. *--------------------------------------------------------------------------*/ void MatrixMatvec(Matrix * mat, HYPRE_Real * x, HYPRE_Real * y) { HYPRE_Int row, i, len, *ind; HYPRE_Real *val, temp; HYPRE_Int num_local = mat->end_row - mat->beg_row + 1; /* Set up persistent communications */ /* Assumes MatrixComplete has been called */ /* Put components of x into the right outgoing buffers */ for (i = 0; i < mat->sendlen; i++) mat->sendbuf[i] = x[mat->sendind[i]]; hypre_MPI_Startall(mat->num_recv, mat->recv_req); hypre_MPI_Startall(mat->num_send, mat->send_req); /* Copy local part of x into top part of recvbuf */ for (i = 0; i < num_local; i++) mat->recvbuf[i] = x[i]; hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->statuses); /* do the multiply */ for (row = 0; row <= mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); temp = 0.0; for (i = 0; i < len; i++) { temp = temp + val[i] * mat->recvbuf[ind[i]]; } y[row] = temp; } hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->statuses); } void MatrixMatvecSerial(Matrix * mat, HYPRE_Real * x, HYPRE_Real * y) { HYPRE_Int row, i, len, *ind; HYPRE_Real *val, temp; HYPRE_Int num_local = mat->end_row - mat->beg_row + 1; /* Set up persistent communications */ /* Assumes MatrixComplete has been called */ /* Put components of x into the right outgoing buffers */ for (i = 0; i < mat->sendlen; i++) mat->sendbuf[i] = x[mat->sendind[i]]; hypre_MPI_Startall(mat->num_recv, mat->recv_req); hypre_MPI_Startall(mat->num_send, mat->send_req); /* Copy local part of x into top part of recvbuf */ for (i = 0; i < num_local; i++) mat->recvbuf[i] = x[i]; hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->statuses); /* do the multiply */ for (row = 0; row <= mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); temp = 0.0; for (i = 0; i < len; i++) { temp = temp + val[i] * mat->recvbuf[ind[i]]; } y[row] = temp; } hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->statuses); } /*-------------------------------------------------------------------------- * MatrixMatvecTrans * Can be done in place. *--------------------------------------------------------------------------*/ void MatrixMatvecTrans(Matrix * mat, HYPRE_Real * x, HYPRE_Real * y) { HYPRE_Int row, i, len, *ind; HYPRE_Real *val; HYPRE_Int num_local = mat->end_row - mat->beg_row + 1; /* Set up persistent communications */ /* Assumes MatrixComplete has been called */ /* Post receives for local parts of the solution y */ hypre_MPI_Startall(mat->num_send, mat->recv_req2); /* initialize accumulator buffer to zero */ for (i = 0; i < mat->recvlen + num_local; i++) mat->recvbuf[i] = 0.0; /* do the multiply */ for (row = 0; row <= mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); for (i = 0; i < len; i++) { mat->recvbuf[ind[i]] += val[i] * x[row]; } } /* Now can send nonlocal parts of solution to other procs */ hypre_MPI_Startall(mat->num_recv, mat->send_req2); /* copy local part of solution into y */ for (i = 0; i < num_local; i++) y[i] = mat->recvbuf[i]; /* alternatively, loop over a wait any */ hypre_MPI_Waitall(mat->num_send, mat->recv_req2, mat->statuses); /* add all the incoming partial sums to y */ for (i = 0; i < mat->sendlen; i++) y[mat->sendind[i]] += mat->sendbuf[i]; hypre_MPI_Waitall(mat->num_recv, mat->send_req2, mat->statuses); }
/****************************************************************************** * * Matrix - Matrix stored and accessible by rows. Indices and values for * the matrix nonzeros are copied into the matrix a row at a time, in any * order using the MatrixGetRow function. The MatrixPutRow function returns * a pointer to the indices and values of a row. The matrix has a set of * row and column indices such that these indices begin at "beg" and end * at "end", where 0 <= "beg" <= "end". In other words, the matrix indices * have any nonnegative base value, and the base values of the row and column * indices must agree. * *****************************************************************************/ #include <stdlib.h> #include <memory.h> #include <assert.h> #include "Common.h" #include "Matrix.h" #include "Numbering.h" #define MAX_NZ_PER_ROW 1000 /*-------------------------------------------------------------------------- * MatrixCreate - Return (a pointer to) a matrix object. *--------------------------------------------------------------------------*/ Matrix * MatrixCreate(MPI_Comm comm, HYPRE_Int beg_row, HYPRE_Int end_row) { HYPRE_Int num_rows, mype, npes; Matrix *mat = (Matrix *) malloc(sizeof(Matrix)); mat->comm = comm; mat->beg_row = beg_row; mat->end_row = end_row; mat->mem = (Mem *) MemCreate(); num_rows = mat->end_row - mat->beg_row + 1; mat->lens = (HYPRE_Int *) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int)); mat->inds = (HYPRE_Int **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int *)); mat->vals = (HYPRE_Real **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Real *)); /* Send beg_row and end_row to all processors */ /* This is needed in order to map row numbers to processors */ hypre_MPI_Comm_rank(comm, &mype); hypre_MPI_Comm_size(comm, &npes); mat->beg_rows = (HYPRE_Int *) MemAlloc(mat->mem, npes * sizeof(HYPRE_Int)); mat->end_rows = (HYPRE_Int *) MemAlloc(mat->mem, npes * sizeof(HYPRE_Int)); hypre_MPI_Allgather(&beg_row, 1, HYPRE_MPI_INT, mat->beg_rows, 1, HYPRE_MPI_INT, comm); hypre_MPI_Allgather(&end_row, 1, HYPRE_MPI_INT, mat->end_rows, 1, HYPRE_MPI_INT, comm); mat->num_recv = 0; mat->num_send = 0; mat->recv_req = NULL; mat->send_req = NULL; mat->recv_req2 = NULL; mat->send_req2 = NULL; mat->statuses = NULL; mat->sendind = NULL; mat->sendbuf = NULL; mat->recvbuf = NULL; mat->numb = NULL; return mat; } /*-------------------------------------------------------------------------- * MatrixCreateLocal - Return (a pointer to) a matrix object. * The matrix created by this call is a local matrix, not a global matrix. *--------------------------------------------------------------------------*/ Matrix * MatrixCreateLocal(HYPRE_Int beg_row, HYPRE_Int end_row) { HYPRE_Int num_rows; Matrix *mat = (Matrix *) malloc(sizeof(Matrix)); mat->comm = hypre_MPI_COMM_NULL; mat->beg_row = beg_row; mat->end_row = end_row; mat->mem = (Mem *) MemCreate(); num_rows = mat->end_row - mat->beg_row + 1; mat->lens = (HYPRE_Int *) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int)); mat->inds = (HYPRE_Int **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int *)); mat->vals = (HYPRE_Real **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Real *)); /* Send beg_row and end_row to all processors */ /* This is needed in order to map row numbers to processors */ mat->beg_rows = NULL; mat->end_rows = NULL; mat->num_recv = 0; mat->num_send = 0; mat->recv_req = NULL; mat->send_req = NULL; mat->recv_req2 = NULL; mat->send_req2 = NULL; mat->statuses = NULL; mat->sendind = NULL; mat->sendbuf = NULL; mat->recvbuf = NULL; mat->numb = NULL; return mat; } /*-------------------------------------------------------------------------- * MatrixDestroy - Destroy a matrix object "mat". *--------------------------------------------------------------------------*/ void MatrixDestroy(Matrix * mat) { HYPRE_Int i; for (i = 0; i < mat->num_recv; i++) hypre_MPI_Request_free(&mat->recv_req[i]); for (i = 0; i < mat->num_send; i++) hypre_MPI_Request_free(&mat->send_req[i]); for (i = 0; i < mat->num_send; i++) hypre_MPI_Request_free(&mat->recv_req2[i]); for (i = 0; i < mat->num_recv; i++) hypre_MPI_Request_free(&mat->send_req2[i]); free(mat->recv_req); free(mat->send_req); free(mat->recv_req2); free(mat->send_req2); free(mat->statuses); free(mat->sendind); free(mat->sendbuf); free(mat->recvbuf); MemDestroy(mat->mem); if (mat->numb) NumberingDestroy(mat->numb); free(mat); } /*-------------------------------------------------------------------------- * MatrixSetRow - Set a row in a matrix. Only local rows can be set. * Once a row has been set, it should not be set again, or else the * memory used by the existing row will not be recovered until * the matrix is destroyed. "row" is in global coordinate numbering. *--------------------------------------------------------------------------*/ void MatrixSetRow(Matrix * mat, HYPRE_Int row, HYPRE_Int len, HYPRE_Int * ind, HYPRE_Real * val) { row -= mat->beg_row; mat->lens[row] = len; mat->inds[row] = (HYPRE_Int *) MemAlloc(mat->mem, len * sizeof(HYPRE_Int)); mat->vals[row] = (HYPRE_Real *) MemAlloc(mat->mem, len * sizeof(HYPRE_Real)); if (ind != NULL) memcpy(mat->inds[row], ind, len * sizeof(HYPRE_Int)); if (val != NULL) memcpy(mat->vals[row], val, len * sizeof(HYPRE_Real)); } /*-------------------------------------------------------------------------- * MatrixGetRow - Get a *local* row in a matrix. *--------------------------------------------------------------------------*/ void MatrixGetRow(Matrix * mat, HYPRE_Int row, HYPRE_Int * lenp, HYPRE_Int ** indp, HYPRE_Real ** valp) { *lenp = mat->lens[row]; *indp = mat->inds[row]; *valp = mat->vals[row]; } /*-------------------------------------------------------------------------- * MatrixRowPe - Map "row" to a processor number. *--------------------------------------------------------------------------*/ HYPRE_Int MatrixRowPe(Matrix * mat, HYPRE_Int row) { HYPRE_Int npes, pe; HYPRE_Int *beg = mat->beg_rows; HYPRE_Int *end = mat->end_rows; hypre_MPI_Comm_size(mat->comm, &npes); for (pe = 0; pe < npes; pe++) { if (row >= beg[pe] && row <= end[pe]) return pe; } hypre_printf("MatrixRowPe: could not map row %d.\n", row); PARASAILS_EXIT; return -1; /* for picky compilers */ } /*-------------------------------------------------------------------------- * MatrixNnz - Return total number of nonzeros in preconditioner. *--------------------------------------------------------------------------*/ HYPRE_Int MatrixNnz(Matrix * mat) { HYPRE_Int num_local, i, total, alltotal; num_local = mat->end_row - mat->beg_row + 1; total = 0; for (i = 0; i < num_local; i++) total += mat->lens[i]; hypre_MPI_Allreduce(&total, &alltotal, 1, HYPRE_MPI_INT, hypre_MPI_SUM, mat->comm); return alltotal; } /*-------------------------------------------------------------------------- * MatrixPrint - Print a matrix to a file "filename". Each processor * appends to the file in order, but the file is overwritten if it exists. *--------------------------------------------------------------------------*/ void MatrixPrint(Matrix * mat, char *filename) { HYPRE_Int mype, npes, pe; HYPRE_Int row, i, len, *ind; HYPRE_Real *val; hypre_MPI_Comm_rank(mat->comm, &mype); hypre_MPI_Comm_size(mat->comm, &npes); for (pe = 0; pe < npes; pe++) { hypre_MPI_Barrier(mat->comm); if (mype == pe) { FILE *file = fopen(filename, (pe == 0 ? "w" : "a")); assert(file != NULL); for (row = 0; row <= mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); for (i = 0; i < len; i++) hypre_fprintf(file, "%d %d %.14e\n", row + mat->beg_row, mat->numb->local_to_global[ind[i]], val[i]); } fclose(file); } } } /*-------------------------------------------------------------------------- * MatrixReadMaster - MatrixRead routine for processor 0. Internal use. *--------------------------------------------------------------------------*/ static void MatrixReadMaster(Matrix * mat, char *filename) { MPI_Comm comm = mat->comm; HYPRE_Int mype, npes; FILE *file; HYPRE_Int ret; HYPRE_Int num_rows, curr_proc; HYPRE_Int row, col; HYPRE_Real value; hypre_longint offset; hypre_longint outbuf; HYPRE_Int curr_row; HYPRE_Int len; HYPRE_Int ind[MAX_NZ_PER_ROW]; HYPRE_Real val[MAX_NZ_PER_ROW]; char line[100]; HYPRE_Int oldrow; hypre_MPI_Request request; hypre_MPI_Status status; hypre_MPI_Comm_size(mat->comm, &npes); hypre_MPI_Comm_rank(mat->comm, &mype); file = fopen(filename, "r"); assert(file != NULL); fgets(line, 100, file); #ifdef EMSOLVE ret = hypre_sscanf(line, "%*d %d %*d %*d", &num_rows); for (row = 0; row < num_rows; row++) hypre_fscanf(file, "%*d"); #else ret = hypre_sscanf(line, "%d %*d %*d", &num_rows); #endif offset = ftell(file); hypre_fscanf(file, "%d %d %lf", &row, &col, &value); request = hypre_MPI_REQUEST_NULL; curr_proc = 1; /* proc for which we are looking for the * beginning */ while (curr_proc < npes) { if (row == mat->beg_rows[curr_proc]) { hypre_MPI_Wait(&request, &status); outbuf = offset; hypre_MPI_Isend(&outbuf, 1, hypre_MPI_LONG, curr_proc, 0, comm, &request); curr_proc++; } offset = ftell(file); oldrow = row; hypre_fscanf(file, "%d %d %lf", &row, &col, &value); if (oldrow > row) { hypre_fprintf(stderr, "Matrix file is not sorted by rows.\n"); PARASAILS_EXIT; } } /* Now read our own part */ rewind(file); fgets(line, 100, file); #ifdef EMSOLVE ret = hypre_sscanf(line, "%*d %d %*d %*d", &num_rows); for (row = 0; row < num_rows; row++) hypre_fscanf(file, "%*d"); #else ret = hypre_sscanf(line, "%d %*d %*d", &num_rows); #endif ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value); curr_row = row; len = 0; while (ret != EOF && row <= mat->end_row) { if (row != curr_row) { /* store this row */ MatrixSetRow(mat, curr_row, len, ind, val); curr_row = row; /* reset row pointer */ len = 0; } if (len >= MAX_NZ_PER_ROW) { hypre_fprintf(stderr, "The matrix has exceeded %d\n", MAX_NZ_PER_ROW); hypre_fprintf(stderr, "nonzeros per row. Internal buffers must be\n"); hypre_fprintf(stderr, "increased to continue.\n"); PARASAILS_EXIT; } ind[len] = col; val[len] = value; len++; ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value); } /* Store the final row */ if (ret == EOF || row > mat->end_row) MatrixSetRow(mat, mat->end_row, len, ind, val); fclose(file); hypre_MPI_Wait(&request, &status); } /*-------------------------------------------------------------------------- * MatrixReadSlave - MatrixRead routine for other processors. Internal use. *--------------------------------------------------------------------------*/ static void MatrixReadSlave(Matrix * mat, char *filename) { MPI_Comm comm = mat->comm; hypre_MPI_Status status; HYPRE_Int mype; FILE *file; HYPRE_Int ret; HYPRE_Int row, col; HYPRE_Real value; hypre_longint offset; HYPRE_Int curr_row; HYPRE_Int len; HYPRE_Int ind[MAX_NZ_PER_ROW]; HYPRE_Real val[MAX_NZ_PER_ROW]; HYPRE_Real time0, time1; file = fopen(filename, "r"); assert(file != NULL); hypre_MPI_Comm_rank(mat->comm, &mype); hypre_MPI_Recv(&offset, 1, hypre_MPI_LONG, 0, 0, comm, &status); time0 = hypre_MPI_Wtime(); ret = fseek(file, offset, SEEK_SET); assert(ret == 0); ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value); curr_row = row; len = 0; while (ret != EOF && row <= mat->end_row) { if (row != curr_row) { /* store this row */ MatrixSetRow(mat, curr_row, len, ind, val); curr_row = row; /* reset row pointer */ len = 0; } if (len >= MAX_NZ_PER_ROW) { hypre_fprintf(stderr, "The matrix has exceeded %d\n", MAX_NZ_PER_ROW); hypre_fprintf(stderr, "nonzeros per row. Internal buffers must be\n"); hypre_fprintf(stderr, "increased to continue.\n"); PARASAILS_EXIT; } ind[len] = col; val[len] = value; len++; ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value); } /* Store the final row */ if (ret == EOF || row > mat->end_row) MatrixSetRow(mat, mat->end_row, len, ind, val); fclose(file); time1 = hypre_MPI_Wtime(); hypre_printf("%d: Time for slave read: %f\n", mype, time1 - time0); } /*-------------------------------------------------------------------------- * MatrixRead - Read a matrix file "filename" from disk and store in the * matrix "mat" which has already been created using MatrixCreate. The format * assumes no nonzero rows, the rows are in order, and there will be at least * one row per processor. *--------------------------------------------------------------------------*/ void MatrixRead(Matrix * mat, char *filename) { HYPRE_Int mype; HYPRE_Real time0, time1; hypre_MPI_Comm_rank(mat->comm, &mype); time0 = hypre_MPI_Wtime(); if (mype == 0) MatrixReadMaster(mat, filename); else MatrixReadSlave(mat, filename); time1 = hypre_MPI_Wtime(); hypre_printf("%d: Time for reading matrix: %f\n", mype, time1 - time0); MatrixComplete(mat); } /*-------------------------------------------------------------------------- * RhsRead - Read a right-hand side file "filename" from disk and store in the * location pointed to by "rhs". "mat" is needed to provide the partitioning * information. The expected format is: a header line (n, nrhs) followed * by n values. Also allows isis format, indicated by 1 HYPRE_Int in first line. *--------------------------------------------------------------------------*/ void RhsRead(HYPRE_Real * rhs, Matrix * mat, char *filename) { FILE *file; hypre_MPI_Status status; HYPRE_Int mype, npes; HYPRE_Int num_rows, num_local, pe, i, converted; HYPRE_Real *buffer = NULL; HYPRE_Int buflen = 0; char line[100]; HYPRE_Int dummy; hypre_MPI_Comm_size(mat->comm, &npes); hypre_MPI_Comm_rank(mat->comm, &mype); num_local = mat->end_row - mat->beg_row + 1; if (mype != 0) { hypre_MPI_Recv(rhs, num_local, hypre_MPI_DOUBLE, 0, 0, mat->comm, &status); return; } file = fopen(filename, "r"); assert(file != NULL); fgets(line, 100, file); converted = hypre_sscanf(line, "%d %d", &num_rows, &dummy); assert(num_rows == mat->end_rows[npes - 1]); /* Read own rows first */ for (i = 0; i < num_local; i++) if (converted == 1) /* isis format */ hypre_fscanf(file, "%*d %lf", &rhs[i]); else hypre_fscanf(file, "%lf", &rhs[i]); for (pe = 1; pe < npes; pe++) { num_local = mat->end_rows[pe] - mat->beg_rows[pe] + 1; if (buflen < num_local) { free(buffer); buflen = num_local; buffer = (HYPRE_Real *) malloc(buflen * sizeof(HYPRE_Real)); } for (i = 0; i < num_local; i++) if (converted == 1) /* isis format */ hypre_fscanf(file, "%*d %lf", &buffer[i]); else hypre_fscanf(file, "%lf", &buffer[i]); hypre_MPI_Send(buffer, num_local, hypre_MPI_DOUBLE, pe, 0, mat->comm); } free(buffer); } /*-------------------------------------------------------------------------- * SetupReceives *--------------------------------------------------------------------------*/ static void SetupReceives(Matrix * mat, HYPRE_Int reqlen, HYPRE_Int * reqind, HYPRE_Int * outlist) { HYPRE_Int i, j, this_pe, mype; hypre_MPI_Request request; MPI_Comm comm = mat->comm; HYPRE_Int num_local = mat->end_row - mat->beg_row + 1; hypre_MPI_Comm_rank(comm, &mype); mat->num_recv = 0; /* Allocate recvbuf */ /* recvbuf has numlocal entires saved for local part of x, used in matvec */ mat->recvlen = reqlen; /* used for the transpose multiply */ mat->recvbuf = (HYPRE_Real *) malloc((reqlen + num_local) * sizeof(HYPRE_Real)); for (i = 0; i < reqlen; i = j) /* j is set below */ { /* The processor that owns the row with index reqind[i] */ this_pe = MatrixRowPe(mat, reqind[i]); /* Figure out other rows we need from this_pe */ for (j = i + 1; j < reqlen; j++) { /* if row is on different pe */ if (reqind[j] < mat->beg_rows[this_pe] || reqind[j] > mat->end_rows[this_pe]) break; } /* Request rows in reqind[i..j-1] */ hypre_MPI_Isend(&reqind[i], j - i, HYPRE_MPI_INT, this_pe, 444, comm, &request); hypre_MPI_Request_free(&request); /* Count of number of number of indices needed from this_pe */ outlist[this_pe] = j - i; hypre_MPI_Recv_init(&mat->recvbuf[i + num_local], j - i, hypre_MPI_DOUBLE, this_pe, 555, comm, &mat->recv_req[mat->num_recv]); hypre_MPI_Send_init(&mat->recvbuf[i + num_local], j - i, hypre_MPI_DOUBLE, this_pe, 666, comm, &mat->send_req2[mat->num_recv]); mat->num_recv++; } } /*-------------------------------------------------------------------------- * SetupSends * This function will wait for all receives to complete. *--------------------------------------------------------------------------*/ static void SetupSends(Matrix * mat, HYPRE_Int * inlist) { HYPRE_Int i, j, mype, npes; hypre_MPI_Request *requests; hypre_MPI_Status *statuses; MPI_Comm comm = mat->comm; hypre_MPI_Comm_rank(comm, &mype); hypre_MPI_Comm_size(comm, &npes); requests = (hypre_MPI_Request *) malloc(npes * sizeof(hypre_MPI_Request)); statuses = (hypre_MPI_Status *) malloc(npes * sizeof(hypre_MPI_Status)); /* Determine size of and allocate sendbuf and sendind */ mat->sendlen = 0; for (i = 0; i < npes; i++) mat->sendlen += inlist[i]; mat->sendbuf = NULL; mat->sendind = NULL; if (mat->sendlen) { mat->sendbuf = (HYPRE_Real *) malloc(mat->sendlen * sizeof(HYPRE_Real)); mat->sendind = (HYPRE_Int *) malloc(mat->sendlen * sizeof(HYPRE_Int)); } j = 0; mat->num_send = 0; for (i = 0; i < npes; i++) { if (inlist[i] != 0) { /* Post receive for the actual indices */ hypre_MPI_Irecv(&mat->sendind[j], inlist[i], HYPRE_MPI_INT, i, 444, comm, &requests[mat->num_send]); /* Set up the send */ hypre_MPI_Send_init(&mat->sendbuf[j], inlist[i], hypre_MPI_DOUBLE, i, 555, comm, &mat->send_req[mat->num_send]); /* Set up the receive for the transpose */ hypre_MPI_Recv_init(&mat->sendbuf[j], inlist[i], hypre_MPI_DOUBLE, i, 666, comm, &mat->recv_req2[mat->num_send]); mat->num_send++; j += inlist[i]; } } hypre_MPI_Waitall(mat->num_send, requests, statuses); free(requests); free(statuses); /* convert global indices to local indices */ /* these are all indices on this processor */ for (i = 0; i < mat->sendlen; i++) mat->sendind[i] -= mat->beg_row; } /*-------------------------------------------------------------------------- * MatrixComplete *--------------------------------------------------------------------------*/ void MatrixComplete(Matrix * mat) { HYPRE_Int mype, npes; HYPRE_Int *outlist, *inlist; HYPRE_Int row, len, *ind; HYPRE_Real *val; hypre_MPI_Comm_rank(mat->comm, &mype); hypre_MPI_Comm_size(mat->comm, &npes); mat->recv_req = (hypre_MPI_Request *) malloc(npes * sizeof(hypre_MPI_Request)); mat->send_req = (hypre_MPI_Request *) malloc(npes * sizeof(hypre_MPI_Request)); mat->recv_req2 = (hypre_MPI_Request *) malloc(npes * sizeof(hypre_MPI_Request)); mat->send_req2 = (hypre_MPI_Request *) malloc(npes * sizeof(hypre_MPI_Request)); mat->statuses = (hypre_MPI_Status *) malloc(npes * sizeof(hypre_MPI_Status)); outlist = (HYPRE_Int *) calloc(npes, sizeof(HYPRE_Int)); inlist = (HYPRE_Int *) calloc(npes, sizeof(HYPRE_Int)); /* Create Numbering object */ mat->numb = NumberingCreate(mat, PARASAILS_NROWS); SetupReceives(mat, mat->numb->num_ind - mat->numb->num_loc, &mat->numb->local_to_global[mat->numb->num_loc], outlist); hypre_MPI_Alltoall(outlist, 1, HYPRE_MPI_INT, inlist, 1, HYPRE_MPI_INT, mat->comm); SetupSends(mat, inlist); free(outlist); free(inlist); /* Convert to local indices */ for (row = 0; row <= mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); NumberingGlobalToLocal(mat->numb, len, ind, ind); } } /*-------------------------------------------------------------------------- * MatrixMatvec * Can be done in place. *--------------------------------------------------------------------------*/ void MatrixMatvec(Matrix * mat, HYPRE_Real * x, HYPRE_Real * y) { HYPRE_Int row, i, len, *ind; HYPRE_Real *val, temp; HYPRE_Int num_local = mat->end_row - mat->beg_row + 1; /* Set up persistent communications */ /* Assumes MatrixComplete has been called */ /* Put components of x into the right outgoing buffers */ for (i = 0; i < mat->sendlen; i++) mat->sendbuf[i] = x[mat->sendind[i]]; hypre_MPI_Startall(mat->num_recv, mat->recv_req); hypre_MPI_Startall(mat->num_send, mat->send_req); /* Copy local part of x into top part of recvbuf */ for (i = 0; i < num_local; i++) mat->recvbuf[i] = x[i]; hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->statuses); /* do the multiply */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(row,len,ind,val,temp,i) schedule(static) #endif for (row = 0; row <= mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); temp = 0.0; for (i = 0; i < len; i++) { temp = temp + val[i] * mat->recvbuf[ind[i]]; } y[row] = temp; } hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->statuses); } void MatrixMatvecSerial(Matrix * mat, HYPRE_Real * x, HYPRE_Real * y) { HYPRE_Int row, i, len, *ind; HYPRE_Real *val, temp; HYPRE_Int num_local = mat->end_row - mat->beg_row + 1; /* Set up persistent communications */ /* Assumes MatrixComplete has been called */ /* Put components of x into the right outgoing buffers */ for (i = 0; i < mat->sendlen; i++) mat->sendbuf[i] = x[mat->sendind[i]]; hypre_MPI_Startall(mat->num_recv, mat->recv_req); hypre_MPI_Startall(mat->num_send, mat->send_req); /* Copy local part of x into top part of recvbuf */ for (i = 0; i < num_local; i++) mat->recvbuf[i] = x[i]; hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->statuses); /* do the multiply */ for (row = 0; row <= mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); temp = 0.0; for (i = 0; i < len; i++) { temp = temp + val[i] * mat->recvbuf[ind[i]]; } y[row] = temp; } hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->statuses); } /*-------------------------------------------------------------------------- * MatrixMatvecTrans * Can be done in place. *--------------------------------------------------------------------------*/ void MatrixMatvecTrans(Matrix * mat, HYPRE_Real * x, HYPRE_Real * y) { HYPRE_Int row, i, len, *ind; HYPRE_Real *val; HYPRE_Int num_local = mat->end_row - mat->beg_row + 1; /* Set up persistent communications */ /* Assumes MatrixComplete has been called */ /* Post receives for local parts of the solution y */ hypre_MPI_Startall(mat->num_send, mat->recv_req2); /* initialize accumulator buffer to zero */ for (i = 0; i < mat->recvlen + num_local; i++) mat->recvbuf[i] = 0.0; /* do the multiply */ for (row = 0; row <= mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); for (i = 0; i < len; i++) { mat->recvbuf[ind[i]] += val[i] * x[row]; } } /* Now can send nonlocal parts of solution to other procs */ hypre_MPI_Startall(mat->num_recv, mat->send_req2); /* copy local part of solution into y */ for (i = 0; i < num_local; i++) y[i] = mat->recvbuf[i]; /* alternatively, loop over a wait any */ hypre_MPI_Waitall(mat->num_send, mat->recv_req2, mat->statuses); /* add all the incoming partial sums to y */ for (i = 0; i < mat->sendlen; i++) y[mat->sendind[i]] += mat->sendbuf[i]; hypre_MPI_Waitall(mat->num_recv, mat->send_req2, mat->statuses); }
Example_declare_target.6.c
/* * @@name: declare_target.6.c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_4.5 */ #define N 100000000 float sp[N], sv1[N], sv2[N]; double dp[N], dv1[N], dv2[N]; #pragma omp declare target link(sp,sv1,sv2) \ link(dp,dv1,dv2) void s_init(float *, float *, int); void d_init(double *, double *, int); void s_output(float *, int); void d_output(double *, int); #pragma omp declare target void s_vec_mult_accum() { int i; #pragma omp parallel for for (i=0; i<N; i++) sp[i] = sv1[i] * sv2[i]; } void d_vec_mult_accum() { int i; #pragma omp parallel for for (i=0; i<N; i++) dp[i] = dv1[i] * dv2[i]; } #pragma omp end declare target int main() { s_init(sv1, sv2, N); #pragma omp target map(to:sv1,sv2) map(from:sp) s_vec_mult_accum(); s_output(sp, N); d_init(dv1, dv2, N); #pragma omp target map(to:dv1,dv2) map(from:dp) d_vec_mult_accum(); d_output(dp, N); return 0; }
/* * @@name: declare_target.6.c @@type: C @@compilable: yes @@linkable: no * @@expect: success @@version: omp_4.5 */ #define N 100000000 float sp[N], sv1[N], sv2[N]; double dp[N], dv1[N], dv2[N]; link(dp, dv1, dv2) void s_init(float *, float *, int); void d_init(double *, double *, int); void s_output(float *, int); void d_output(double *, int); void s_vec_mult_accum() { int i; for (i = 0; i < N; i++) sp[i] = sv1[i] * sv2[i]; } void d_vec_mult_accum() { int i; for (i = 0; i < N; i++) dp[i] = dv1[i] * dv2[i]; } int main() { s_init(sv1, sv2, N); s_vec_mult_accum(); s_output(sp, N); d_init(dv1, dv2, N); d_vec_mult_accum(); d_output(dp, N); return 0; }
/* * @@name: declare_target.6.c @@type: C @@compilable: yes @@linkable: no * @@expect: success @@version: omp_4.5 */ #define N 100000000 float sp[N], sv1[N], sv2[N]; double dp[N], dv1[N], dv2[N]; #pragma omp declare target link(sp,sv1,sv2) \ link(dp,dv1,dv2) void s_init(float *, float *, int); void d_init(double *, double *, int); void s_output(float *, int); void d_output(double *, int); #pragma omp declare target void s_vec_mult_accum() { int i; #pragma omp parallel for for (i = 0; i < N; i++) sp[i] = sv1[i] * sv2[i]; } void d_vec_mult_accum() { int i; #pragma omp parallel for for (i = 0; i < N; i++) dp[i] = dv1[i] * dv2[i]; } #pragma omp end declare target int main() { s_init(sv1, sv2, N); #pragma omp target map(to:sv1,sv2) map(from:sp) s_vec_mult_accum(); s_output(sp, N); d_init(dv1, dv2, N); #pragma omp target map(to:dv1,dv2) map(from:dp) d_vec_mult_accum(); d_output(dp, N); return 0; }
MD5_fmt.c
/* * This file is part of John the Ripper password cracker, * Copyright (c) 1996-2001,2008,2010-2012 by Solar Designer * * ...with changes in the jumbo patch, by bartavelle and magnum. * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There's ABSOLUTELY NO WARRANTY, express or implied. */ #include <string.h> #include "arch.h" #include "misc.h" #include "simd-intrinsics.h" #include "MD5_std.h" #include "common.h" #include "formats.h" #include "cryptmd5_common.h" #if defined(_OPENMP) && defined(SIMD_PARA_MD5) #ifndef OMP_SCALE #define OMP_SCALE 4 #endif #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "md5crypt" #define FORMAT_NAME "crypt(3) $1$" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 15 #define CIPHERTEXT_LENGTH 22 #ifdef SIMD_PARA_MD5 #define BINARY_SIZE 16 #else #define BINARY_SIZE 4 #endif #define BINARY_ALIGN 4 #define SALT_SIZE 9 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT MD5_N #define MAX_KEYS_PER_CRYPT MD5_N static struct fmt_tests tests[] = { {"$1$12345678$aIccj83HRDBo6ux1bVx7D1", "0123456789ABCDE"}, {"$apr1$Q6ZYh...$RV6ft2bZ8j.NGrxLYaJt9.", "test"}, {"$1$12345678$f8QoJuo0DpBRfQSD0vglc1", "12345678"}, {"$1$$qRPK7m23GJusamGpoGLby/", ""}, {"$apr1$a2Jqm...$grFrwEgiQleDr0zR4Jx1b.", "15 chars is max"}, {"$1$$AuJCr07mI7DSew03TmBIv/", "no salt"}, {"$1$`!@#%^&*$E6hD76/pKTS8qToBCkux30", "invalid salt"}, {"$1$12345678$xek.CpjQUVgdf/P2N9KQf/", ""}, {"$1$1234$BdIMOAWFOV2AQlLsrN/Sw.", "1234"}, {"$apr1$rBXqc...$NlXxN9myBOk95T0AyLAsJ0", "john"}, {"$apr1$Grpld/..$qp5GyjwM2dnA5Cdej9b411", "the"}, {"$apr1$GBx.D/..$yfVeeYFCIiEXInfRhBRpy/", "ripper"}, {"$1$bb$19smCEBG0Q1pVil0/HqK./", "aaaaa"}, {"$1$coin$rebm0t9KJ56mgGWJF5o5M0", "lapin"}, {"$1$pouet$/Ecz/vyk.zCYvrr6wB78h0", "canard"}, {"$1$test2$02MCIATVoxq3IhgK6XRkb1", "test1"}, {"$1$aussi$X67z3kXsWo92F15uChx1H1", "felicie"}, {"$1$boire$gf.YM2y3InYEu9.NbVr.v0", "manger"}, {"$1$bas$qvkmmWnVHRCSv/6LQ1doH/", "haut"}, {"$1$gauche$EPvd6LZlrgb0MMFPxUrJN1", "droite"}, /* following hashes are AIX non-standard smd5 hashes */ {"{smd5}s8/xSJ/v$uGam4GB8hOjTLQqvBfxJ2/", "password"}, {"{smd5}alRJaSLb$aKM3H1.h1ycXl5GEVDH1e1", "aixsucks?"}, {"{smd5}eLB0QWeS$Eg.YfWY8clZuCxF0xNrKg.", "0123456789ABCDE"}, /* following hashes are AIX standard smd5 hashes (with corrected tag) * lpa_options = std_hash=true */ {"$1$JVDbGx8K$T9h8HK4LZxeLPMTAxCfpc1", "password"}, {"$1$1Cu6fEvv$42kuaJ5fMEqyVStPuFG040", "0123456789ABCDE"}, {"$1$ql5x.xXL$vYVDhExol2xUBBpERRWcn1", "jtr>hashcat"}, {"$1$27iyq7Ya$miN09fW1Scj0DHVNyewoU/", ""}, {"$1$84Othc1n$v1cuReaa5lRdGuHaOa76n0", "a"}, {"$1$4zq0BsCR$U2ua9WZtDEhzy4gFSiLxN1", "aa"}, {"$1$DKwjKWxp$PY6PdlPZsXjOppPDoFOz4.", "aaa"}, {"$1$OKDV6ppN$viTVmH48bSePiCrMvXT/./", "aaaa"}, {"$1$QEWsCY0O$xrTTMKTepiHMp7Oxgz0pX/", "aaaaa"}, {"$1$5dfdk2dF$XiJBPNrfKcCgdQ/kcoB40/", "aaaaaa"}, {"$1$Ps6A1Cy6$WsvLg9cQhm9JU0rXkLEtz.", "aaaaaaa"}, {"$1$9IK7nZ4M$4nx7Mdj05KGPJX/mZaDrh.", "aaaaaaaa"}, {"$1$l3pNTqwT$GAc.dcRaxCvC20CFGCjp4/", "aaaaaaaaa"}, {"$1$jSAARhJR$6daQ/ekjAL0MgOUgGJyp10", "aaaaaaaaaa"}, {"$1$wk3Xwqqg$2AtdiucwJvJgbaVT1jWpb0", "aaaaaaaaaaa"}, {"$1$G6Fn69Ei$d7AKJUOIdz/gO4Utc0TQP1", "aaaaaaaaaaaa"}, {"$1$A7XJ7lGK$W5jTnH/4lW4XwZ.6F7n1N.", "aaaaaaaaaaaaa"}, {"$1$Rcm46RfA$LfdIK/OP16yHzMYHSlx/B.", "aaaaaaaaaaaaaa"}, {"$1$4bCSSJMN$TcYKTsukD4SFJE1n4MwMZ/", "aaaaaaaaaaaaaaa"}, #if PLAINTEXT_LENGTH > 15 {"$1$mJxBkkl8$u7OHfWCPmNxvf0um7hH89.", "aaaaaaaaaaaaaaaa"}, {"$1$Ub1gBUt4$TNaLxU7Pq5mk/MiDEb60b/", "aaaaaaaaaaaaaaaaa"}, {"$1$8ot7QScR$x.p4vjIgdFxxS83x29PkJ0", "aaaaaaaaaaaaaaaaaa"}, {"$1$wRi4OjD3$eJjKD2AwLMWfOTRYA30zn.", "aaaaaaaaaaaaaaaaaaa"}, {"$1$lmektrsg$2KSRY4EUFzsYNMg80fG4/0", "aaaaaaaaaaaaaaaaaaaa"}, {"$1$tgVBKBmE$YRvzsi7qHP2MC1Atg8VCV.", "aaaaaaaaaaaaaaaaaaaaa"}, {"$1$oTsk88YC$Eh435T1BQzmjQekfqkHof/", "aaaaaaaaaaaaaaaaaaaaaa"}, {"$1$ykxSZEfP$hJrFeGOFk049L.94Mgggj/", "aaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$LBK4p5tD$5/gAIx8/7hpTVwDC/.KQv/", "aaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$fkEasaUI$G7CelOWHkol2nVHN8XQP40", "aaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$gRevVzeY$eMMQrsl5OHL5dP1p/ktJc/", "aaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$164TNEjj$ppoV6Ju6Vu63j1OlM4zit/", "aaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$ErPmhjp2$lZZstb2M455Xhk50eeH4i/", "aaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$NUssS5fT$QaS4Ywt0IwzxbE0FAGnXn0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$NxlTyiJ7$gxkXTEJdeTzY8P6tqKmcz.", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$Cmy9x7gW$kamvHI42Kh1CH4Shy6g6S/", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$IsuapfCX$4Yq0Adq5nNZgl0LwbSl5Y0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$rSZfNcKX$N4XPvGrfhKsyoEcRSaqmG0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, #endif {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; #ifdef SIMD_PARA_MD5 static unsigned char cursalt[SALT_SIZE]; static int CryptType; static MD5_word (*sout); static int omp_para = 1; #endif static void init(struct fmt_main *self) { MD5_std_init(self); #if defined(_OPENMP) && defined(SIMD_PARA_MD5) omp_para = omp_get_max_threads(); if (omp_para < 1) omp_para = 1; self->params.min_keys_per_crypt = MD5_N * omp_para; omp_para *= OMP_SCALE; self->params.max_keys_per_crypt = MD5_N * omp_para; #elif MD5_std_mt self->params.min_keys_per_crypt = MD5_std_min_kpc; self->params.max_keys_per_crypt = MD5_std_max_kpc; #endif saved_key = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*saved_key), MEM_ALIGN_CACHE); #ifdef SIMD_PARA_MD5 sout = mem_calloc(self->params.max_keys_per_crypt, sizeof(*sout) * BINARY_SIZE); #endif } static void done(void) { #ifdef SIMD_PARA_MD5 MEM_FREE(sout); #endif MEM_FREE(saved_key); } static int get_hash_0(int index) { #ifdef SIMD_PARA_MD5 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_0; #else init_t(); return MD5_out[index][0] & PH_MASK_0; #endif } static int get_hash_1(int index) { #ifdef SIMD_PARA_MD5 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_1; #else init_t(); return MD5_out[index][0] & PH_MASK_1; #endif } static int get_hash_2(int index) { #ifdef SIMD_PARA_MD5 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_2; #else init_t(); return MD5_out[index][0] & PH_MASK_2; #endif } static int get_hash_3(int index) { #ifdef SIMD_PARA_MD5 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_3; #else init_t(); return MD5_out[index][0] & PH_MASK_3; #endif } static int get_hash_4(int index) { #ifdef SIMD_PARA_MD5 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_4; #else init_t(); return MD5_out[index][0] & PH_MASK_4; #endif } static int get_hash_5(int index) { #ifdef SIMD_PARA_MD5 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_5; #else init_t(); return MD5_out[index][0] & PH_MASK_5; #endif } static int get_hash_6(int index) { #ifdef SIMD_PARA_MD5 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_6; #else init_t(); return MD5_out[index][0] & PH_MASK_6; #endif } static int salt_hash(void *salt) { unsigned int i, h, retval; retval = 0; for (i = 0; i <= 6; i += 2) { h = (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i])]; h ^= ((unsigned char *)salt)[i + 1]; h <<= 6; h ^= (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i + 1])]; h ^= ((unsigned char *)salt)[i]; retval += h; } retval ^= retval >> SALT_HASH_LOG; retval &= SALT_HASH_SIZE - 1; return retval; } static void set_key(char *key, int index) { #ifndef SIMD_PARA_MD5 MD5_std_set_key(key, index); #endif strnfcpy(saved_key[index], key, PLAINTEXT_LENGTH); } static char *get_key(int index) { saved_key[index][PLAINTEXT_LENGTH] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; #ifdef SIMD_PARA_MD5 #ifdef _OPENMP int t; #pragma omp parallel for for (t = 0; t < omp_para; t++) md5cryptsse((unsigned char *)(&saved_key[t*MD5_N]), cursalt, (char *)(&sout[t*MD5_N*BINARY_SIZE/sizeof(MD5_word)]), CryptType); #else md5cryptsse((unsigned char *)saved_key, cursalt, (char *)sout, CryptType); #endif #else MD5_std_crypt(count); #endif return count; } static int cmp_all(void *binary, int count) { #ifdef SIMD_PARA_MD5 unsigned int x,y; for(y=0;y<SIMD_PARA_MD5*omp_para;y++) for(x=0;x<SIMD_COEF_32;x++) { if( ((MD5_word *)binary)[0] == ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] ) return 1; } return 0; #else #if MD5_std_mt int t, n = (count + (MD5_N - 1)) / MD5_N; #endif for_each_t(n) { #if MD5_X2 if (*(MD5_word *)binary == MD5_out[0][0] || *(MD5_word *)binary == MD5_out[1][0]) return 1; #else if (*(MD5_word *)binary == MD5_out[0][0]) return 1; #endif } return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_PARA_MD5 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; if(((unsigned int*)binary)[0] != ((unsigned int*)sout)[x+y*SIMD_COEF_32*4+0*SIMD_COEF_32]) return 0; if(((unsigned int*)binary)[1] != ((unsigned int*)sout)[x+y*SIMD_COEF_32*4+1*SIMD_COEF_32]) return 0; if(((unsigned int*)binary)[2] != ((unsigned int*)sout)[x+y*SIMD_COEF_32*4+2*SIMD_COEF_32]) return 0; if(((unsigned int*)binary)[3] != ((unsigned int*)sout)[x+y*SIMD_COEF_32*4+3*SIMD_COEF_32]) return 0; return 1; #else init_t(); return *(MD5_word *)binary == MD5_out[index][0]; #endif } static int cmp_exact(char *source, int index) { #ifdef SIMD_PARA_MD5 return 1; #else init_t(); return !memcmp(MD5_std_get_binary(source), MD5_out[index], sizeof(MD5_binary)); #endif } static void set_salt(void *salt) { #ifdef SIMD_PARA_MD5 memcpy(cursalt, salt, SALT_SIZE); CryptType = cursalt[8]; cursalt[8] = 0; #endif MD5_std_set_salt(salt); } static void *get_salt(char *ciphertext) { return MD5_std_get_salt(ciphertext); } static void *get_binary(char *ciphertext) { return MD5_std_get_binary(ciphertext); } struct fmt_main fmt_MD5 = { { FORMAT_LABEL, FORMAT_NAME, "MD5 " MD5_ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #if MD5_std_mt || defined(SIMD_PARA_MD5) FMT_OMP | #endif FMT_CASE | FMT_8_BIT, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, cryptmd5_common_valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } };
#include <string.h> #include "arch.h" #include "misc.h" #include "simd-intrinsics.h" #include "MD5_std.h" #include "common.h" #include "formats.h" #include "cryptmd5_common.h" #include "memdbg.h" #define FORMAT_LABEL "md5crypt" #define FORMAT_NAME "crypt(3) $1$" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 15 #define CIPHERTEXT_LENGTH 22 #ifdef SIMD_PARA_MD5 #define BINARY_SIZE 16 #else #define BINARY_SIZE 4 #endif #define BINARY_ALIGN 4 #define SALT_SIZE 9 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT MD5_N #define MAX_KEYS_PER_CRYPT MD5_N static struct fmt_tests tests[] = { {"$1$12345678$aIccj83HRDBo6ux1bVx7D1", "0123456789ABCDE"}, {"$apr1$Q6ZYh...$RV6ft2bZ8j.NGrxLYaJt9.", "test"}, {"$1$12345678$f8QoJuo0DpBRfQSD0vglc1", "12345678"}, {"$1$$qRPK7m23GJusamGpoGLby/", ""}, {"$apr1$a2Jqm...$grFrwEgiQleDr0zR4Jx1b.", "15 chars is max"}, {"$1$$AuJCr07mI7DSew03TmBIv/", "no salt"}, {"$1$`!@#%^&*$E6hD76/pKTS8qToBCkux30", "invalid salt"}, {"$1$12345678$xek.CpjQUVgdf/P2N9KQf/", ""}, {"$1$1234$BdIMOAWFOV2AQlLsrN/Sw.", "1234"}, {"$apr1$rBXqc...$NlXxN9myBOk95T0AyLAsJ0", "john"}, {"$apr1$Grpld/..$qp5GyjwM2dnA5Cdej9b411", "the"}, {"$apr1$GBx.D/..$yfVeeYFCIiEXInfRhBRpy/", "ripper"}, {"$1$bb$19smCEBG0Q1pVil0/HqK./", "aaaaa"}, {"$1$coin$rebm0t9KJ56mgGWJF5o5M0", "lapin"}, {"$1$pouet$/Ecz/vyk.zCYvrr6wB78h0", "canard"}, {"$1$test2$02MCIATVoxq3IhgK6XRkb1", "test1"}, {"$1$aussi$X67z3kXsWo92F15uChx1H1", "felicie"}, {"$1$boire$gf.YM2y3InYEu9.NbVr.v0", "manger"}, {"$1$bas$qvkmmWnVHRCSv/6LQ1doH/", "haut"}, {"$1$gauche$EPvd6LZlrgb0MMFPxUrJN1", "droite"}, /* following hashes are AIX non-standard smd5 hashes */ {"{smd5}s8/xSJ/v$uGam4GB8hOjTLQqvBfxJ2/", "password"}, {"{smd5}alRJaSLb$aKM3H1.h1ycXl5GEVDH1e1", "aixsucks?"}, {"{smd5}eLB0QWeS$Eg.YfWY8clZuCxF0xNrKg.", "0123456789ABCDE"}, /* * following hashes are AIX standard smd5 hashes (with corrected tag) * lpa_options = std_hash=true */ {"$1$JVDbGx8K$T9h8HK4LZxeLPMTAxCfpc1", "password"}, {"$1$1Cu6fEvv$42kuaJ5fMEqyVStPuFG040", "0123456789ABCDE"}, {"$1$ql5x.xXL$vYVDhExol2xUBBpERRWcn1", "jtr>hashcat"}, {"$1$27iyq7Ya$miN09fW1Scj0DHVNyewoU/", ""}, {"$1$84Othc1n$v1cuReaa5lRdGuHaOa76n0", "a"}, {"$1$4zq0BsCR$U2ua9WZtDEhzy4gFSiLxN1", "aa"}, {"$1$DKwjKWxp$PY6PdlPZsXjOppPDoFOz4.", "aaa"}, {"$1$OKDV6ppN$viTVmH48bSePiCrMvXT/./", "aaaa"}, {"$1$QEWsCY0O$xrTTMKTepiHMp7Oxgz0pX/", "aaaaa"}, {"$1$5dfdk2dF$XiJBPNrfKcCgdQ/kcoB40/", "aaaaaa"}, {"$1$Ps6A1Cy6$WsvLg9cQhm9JU0rXkLEtz.", "aaaaaaa"}, {"$1$9IK7nZ4M$4nx7Mdj05KGPJX/mZaDrh.", "aaaaaaaa"}, {"$1$l3pNTqwT$GAc.dcRaxCvC20CFGCjp4/", "aaaaaaaaa"}, {"$1$jSAARhJR$6daQ/ekjAL0MgOUgGJyp10", "aaaaaaaaaa"}, {"$1$wk3Xwqqg$2AtdiucwJvJgbaVT1jWpb0", "aaaaaaaaaaa"}, {"$1$G6Fn69Ei$d7AKJUOIdz/gO4Utc0TQP1", "aaaaaaaaaaaa"}, {"$1$A7XJ7lGK$W5jTnH/4lW4XwZ.6F7n1N.", "aaaaaaaaaaaaa"}, {"$1$Rcm46RfA$LfdIK/OP16yHzMYHSlx/B.", "aaaaaaaaaaaaaa"}, {"$1$4bCSSJMN$TcYKTsukD4SFJE1n4MwMZ/", "aaaaaaaaaaaaaaa"}, #if PLAINTEXT_LENGTH > 15 {"$1$mJxBkkl8$u7OHfWCPmNxvf0um7hH89.", "aaaaaaaaaaaaaaaa"}, {"$1$Ub1gBUt4$TNaLxU7Pq5mk/MiDEb60b/", "aaaaaaaaaaaaaaaaa"}, {"$1$8ot7QScR$x.p4vjIgdFxxS83x29PkJ0", "aaaaaaaaaaaaaaaaaa"}, {"$1$wRi4OjD3$eJjKD2AwLMWfOTRYA30zn.", "aaaaaaaaaaaaaaaaaaa"}, {"$1$lmektrsg$2KSRY4EUFzsYNMg80fG4/0", "aaaaaaaaaaaaaaaaaaaa"}, {"$1$tgVBKBmE$YRvzsi7qHP2MC1Atg8VCV.", "aaaaaaaaaaaaaaaaaaaaa"}, {"$1$oTsk88YC$Eh435T1BQzmjQekfqkHof/", "aaaaaaaaaaaaaaaaaaaaaa"}, {"$1$ykxSZEfP$hJrFeGOFk049L.94Mgggj/", "aaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$LBK4p5tD$5/gAIx8/7hpTVwDC/.KQv/", "aaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$fkEasaUI$G7CelOWHkol2nVHN8XQP40", "aaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$gRevVzeY$eMMQrsl5OHL5dP1p/ktJc/", "aaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$164TNEjj$ppoV6Ju6Vu63j1OlM4zit/", "aaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$ErPmhjp2$lZZstb2M455Xhk50eeH4i/", "aaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$NUssS5fT$QaS4Ywt0IwzxbE0FAGnXn0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$NxlTyiJ7$gxkXTEJdeTzY8P6tqKmcz.", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$Cmy9x7gW$kamvHI42Kh1CH4Shy6g6S/", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$IsuapfCX$4Yq0Adq5nNZgl0LwbSl5Y0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$rSZfNcKX$N4XPvGrfhKsyoEcRSaqmG0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, #endif {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; #ifdef SIMD_PARA_MD5 static unsigned char cursalt[SALT_SIZE]; static int CryptType; static MD5_word(*sout); static int omp_para = 1; #endif static void init(struct fmt_main *self) { MD5_std_init(self); saved_key = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*saved_key), MEM_ALIGN_CACHE); #ifdef SIMD_PARA_MD5 sout = mem_calloc(self->params.max_keys_per_crypt, sizeof(*sout) * BINARY_SIZE); #endif } static void done(void) { #ifdef SIMD_PARA_MD5 MEM_FREE(sout); #endif MEM_FREE(saved_key); } static int get_hash_0(int index) { #ifdef SIMD_PARA_MD5 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((MD5_word *) sout)[x + y * SIMD_COEF_32 * 4] & PH_MASK_0; #else init_t(); return MD5_out[index][0] & PH_MASK_0; #endif } static int get_hash_1(int index) { #ifdef SIMD_PARA_MD5 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((MD5_word *) sout)[x + y * SIMD_COEF_32 * 4] & PH_MASK_1; #else init_t(); return MD5_out[index][0] & PH_MASK_1; #endif } static int get_hash_2(int index) { #ifdef SIMD_PARA_MD5 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((MD5_word *) sout)[x + y * SIMD_COEF_32 * 4] & PH_MASK_2; #else init_t(); return MD5_out[index][0] & PH_MASK_2; #endif } static int get_hash_3(int index) { #ifdef SIMD_PARA_MD5 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((MD5_word *) sout)[x + y * SIMD_COEF_32 * 4] & PH_MASK_3; #else init_t(); return MD5_out[index][0] & PH_MASK_3; #endif } static int get_hash_4(int index) { #ifdef SIMD_PARA_MD5 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((MD5_word *) sout)[x + y * SIMD_COEF_32 * 4] & PH_MASK_4; #else init_t(); return MD5_out[index][0] & PH_MASK_4; #endif } static int get_hash_5(int index) { #ifdef SIMD_PARA_MD5 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((MD5_word *) sout)[x + y * SIMD_COEF_32 * 4] & PH_MASK_5; #else init_t(); return MD5_out[index][0] & PH_MASK_5; #endif } static int get_hash_6(int index) { #ifdef SIMD_PARA_MD5 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((MD5_word *) sout)[x + y * SIMD_COEF_32 * 4] & PH_MASK_6; #else init_t(); return MD5_out[index][0] & PH_MASK_6; #endif } static int salt_hash(void *salt) { unsigned int i, h, retval; retval = 0; for (i = 0; i <= 6; i += 2) { h = (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i])]; h ^= ((unsigned char *)salt)[i + 1]; h <<= 6; h ^= (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i + 1])]; h ^= ((unsigned char *)salt)[i]; retval += h; } retval ^= retval >> SALT_HASH_LOG; retval &= SALT_HASH_SIZE - 1; return retval; } static void set_key(char *key, int index) { #ifndef SIMD_PARA_MD5 MD5_std_set_key(key, index); #endif strnfcpy(saved_key[index], key, PLAINTEXT_LENGTH); } static char * get_key(int index) { saved_key[index][PLAINTEXT_LENGTH] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; #ifdef SIMD_PARA_MD5 #else MD5_std_crypt(count); #endif return count; } static int cmp_all(void *binary, int count) { #ifdef SIMD_PARA_MD5 unsigned int x, y; for (y = 0; y < SIMD_PARA_MD5 * omp_para; y++) for (x = 0; x < SIMD_COEF_32; x++) { if (((MD5_word *) binary)[0] == ((MD5_word *) sout)[x + y * SIMD_COEF_32 * 4]) return 1; } return 0; #else #if MD5_std_mt int t, n = (count + (MD5_N - 1)) / MD5_N; #endif for_each_t(n) { #if MD5_X2 if (*(MD5_word *) binary == MD5_out[0][0] || *(MD5_word *) binary == MD5_out[1][0]) return 1; #else if (*(MD5_word *) binary == MD5_out[0][0]) return 1; #endif } return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_PARA_MD5 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; if (((unsigned int *)binary)[0] != ((unsigned int *)sout)[x + y * SIMD_COEF_32 * 4 + 0 * SIMD_COEF_32]) return 0; if (((unsigned int *)binary)[1] != ((unsigned int *)sout)[x + y * SIMD_COEF_32 * 4 + 1 * SIMD_COEF_32]) return 0; if (((unsigned int *)binary)[2] != ((unsigned int *)sout)[x + y * SIMD_COEF_32 * 4 + 2 * SIMD_COEF_32]) return 0; if (((unsigned int *)binary)[3] != ((unsigned int *)sout)[x + y * SIMD_COEF_32 * 4 + 3 * SIMD_COEF_32]) return 0; return 1; #else init_t(); return *(MD5_word *) binary == MD5_out[index][0]; #endif } static int cmp_exact(char *source, int index) { #ifdef SIMD_PARA_MD5 return 1; #else init_t(); return !memcmp(MD5_std_get_binary(source), MD5_out[index], sizeof(MD5_binary)); #endif } static void set_salt(void *salt) { #ifdef SIMD_PARA_MD5 memcpy(cursalt, salt, SALT_SIZE); CryptType = cursalt[8]; cursalt[8] = 0; #endif MD5_std_set_salt(salt); } static void * get_salt(char *ciphertext) { return MD5_std_get_salt(ciphertext); } static void * get_binary(char *ciphertext) { return MD5_std_get_binary(ciphertext); } struct fmt_main fmt_MD5 = { { FORMAT_LABEL, FORMAT_NAME, "MD5 " MD5_ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #if MD5_std_mt || defined(SIMD_PARA_MD5) FMT_OMP | #endif FMT_CASE | FMT_8_BIT, {NULL}, tests }, { init, done, fmt_default_reset, fmt_default_prepare, cryptmd5_common_valid, fmt_default_split, get_binary, get_salt, {NULL}, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } };
#include <string.h> #include "arch.h" #include "misc.h" #include "simd-intrinsics.h" #include "MD5_std.h" #include "common.h" #include "formats.h" #include "cryptmd5_common.h" #if defined(_OPENMP) && defined(SIMD_PARA_MD5) #ifndef OMP_SCALE #define OMP_SCALE 4 #endif #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "md5crypt" #define FORMAT_NAME "crypt(3) $1$" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 15 #define CIPHERTEXT_LENGTH 22 #ifdef SIMD_PARA_MD5 #define BINARY_SIZE 16 #else #define BINARY_SIZE 4 #endif #define BINARY_ALIGN 4 #define SALT_SIZE 9 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT MD5_N #define MAX_KEYS_PER_CRYPT MD5_N static struct fmt_tests tests[] = { {"$1$12345678$aIccj83HRDBo6ux1bVx7D1", "0123456789ABCDE"}, {"$apr1$Q6ZYh...$RV6ft2bZ8j.NGrxLYaJt9.", "test"}, {"$1$12345678$f8QoJuo0DpBRfQSD0vglc1", "12345678"}, {"$1$$qRPK7m23GJusamGpoGLby/", ""}, {"$apr1$a2Jqm...$grFrwEgiQleDr0zR4Jx1b.", "15 chars is max"}, {"$1$$AuJCr07mI7DSew03TmBIv/", "no salt"}, {"$1$`!@#%^&*$E6hD76/pKTS8qToBCkux30", "invalid salt"}, {"$1$12345678$xek.CpjQUVgdf/P2N9KQf/", ""}, {"$1$1234$BdIMOAWFOV2AQlLsrN/Sw.", "1234"}, {"$apr1$rBXqc...$NlXxN9myBOk95T0AyLAsJ0", "john"}, {"$apr1$Grpld/..$qp5GyjwM2dnA5Cdej9b411", "the"}, {"$apr1$GBx.D/..$yfVeeYFCIiEXInfRhBRpy/", "ripper"}, {"$1$bb$19smCEBG0Q1pVil0/HqK./", "aaaaa"}, {"$1$coin$rebm0t9KJ56mgGWJF5o5M0", "lapin"}, {"$1$pouet$/Ecz/vyk.zCYvrr6wB78h0", "canard"}, {"$1$test2$02MCIATVoxq3IhgK6XRkb1", "test1"}, {"$1$aussi$X67z3kXsWo92F15uChx1H1", "felicie"}, {"$1$boire$gf.YM2y3InYEu9.NbVr.v0", "manger"}, {"$1$bas$qvkmmWnVHRCSv/6LQ1doH/", "haut"}, {"$1$gauche$EPvd6LZlrgb0MMFPxUrJN1", "droite"}, /* following hashes are AIX non-standard smd5 hashes */ {"{smd5}s8/xSJ/v$uGam4GB8hOjTLQqvBfxJ2/", "password"}, {"{smd5}alRJaSLb$aKM3H1.h1ycXl5GEVDH1e1", "aixsucks?"}, {"{smd5}eLB0QWeS$Eg.YfWY8clZuCxF0xNrKg.", "0123456789ABCDE"}, /* * following hashes are AIX standard smd5 hashes (with corrected tag) * lpa_options = std_hash=true */ {"$1$JVDbGx8K$T9h8HK4LZxeLPMTAxCfpc1", "password"}, {"$1$1Cu6fEvv$42kuaJ5fMEqyVStPuFG040", "0123456789ABCDE"}, {"$1$ql5x.xXL$vYVDhExol2xUBBpERRWcn1", "jtr>hashcat"}, {"$1$27iyq7Ya$miN09fW1Scj0DHVNyewoU/", ""}, {"$1$84Othc1n$v1cuReaa5lRdGuHaOa76n0", "a"}, {"$1$4zq0BsCR$U2ua9WZtDEhzy4gFSiLxN1", "aa"}, {"$1$DKwjKWxp$PY6PdlPZsXjOppPDoFOz4.", "aaa"}, {"$1$OKDV6ppN$viTVmH48bSePiCrMvXT/./", "aaaa"}, {"$1$QEWsCY0O$xrTTMKTepiHMp7Oxgz0pX/", "aaaaa"}, {"$1$5dfdk2dF$XiJBPNrfKcCgdQ/kcoB40/", "aaaaaa"}, {"$1$Ps6A1Cy6$WsvLg9cQhm9JU0rXkLEtz.", "aaaaaaa"}, {"$1$9IK7nZ4M$4nx7Mdj05KGPJX/mZaDrh.", "aaaaaaaa"}, {"$1$l3pNTqwT$GAc.dcRaxCvC20CFGCjp4/", "aaaaaaaaa"}, {"$1$jSAARhJR$6daQ/ekjAL0MgOUgGJyp10", "aaaaaaaaaa"}, {"$1$wk3Xwqqg$2AtdiucwJvJgbaVT1jWpb0", "aaaaaaaaaaa"}, {"$1$G6Fn69Ei$d7AKJUOIdz/gO4Utc0TQP1", "aaaaaaaaaaaa"}, {"$1$A7XJ7lGK$W5jTnH/4lW4XwZ.6F7n1N.", "aaaaaaaaaaaaa"}, {"$1$Rcm46RfA$LfdIK/OP16yHzMYHSlx/B.", "aaaaaaaaaaaaaa"}, {"$1$4bCSSJMN$TcYKTsukD4SFJE1n4MwMZ/", "aaaaaaaaaaaaaaa"}, #if PLAINTEXT_LENGTH > 15 {"$1$mJxBkkl8$u7OHfWCPmNxvf0um7hH89.", "aaaaaaaaaaaaaaaa"}, {"$1$Ub1gBUt4$TNaLxU7Pq5mk/MiDEb60b/", "aaaaaaaaaaaaaaaaa"}, {"$1$8ot7QScR$x.p4vjIgdFxxS83x29PkJ0", "aaaaaaaaaaaaaaaaaa"}, {"$1$wRi4OjD3$eJjKD2AwLMWfOTRYA30zn.", "aaaaaaaaaaaaaaaaaaa"}, {"$1$lmektrsg$2KSRY4EUFzsYNMg80fG4/0", "aaaaaaaaaaaaaaaaaaaa"}, {"$1$tgVBKBmE$YRvzsi7qHP2MC1Atg8VCV.", "aaaaaaaaaaaaaaaaaaaaa"}, {"$1$oTsk88YC$Eh435T1BQzmjQekfqkHof/", "aaaaaaaaaaaaaaaaaaaaaa"}, {"$1$ykxSZEfP$hJrFeGOFk049L.94Mgggj/", "aaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$LBK4p5tD$5/gAIx8/7hpTVwDC/.KQv/", "aaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$fkEasaUI$G7CelOWHkol2nVHN8XQP40", "aaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$gRevVzeY$eMMQrsl5OHL5dP1p/ktJc/", "aaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$164TNEjj$ppoV6Ju6Vu63j1OlM4zit/", "aaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$ErPmhjp2$lZZstb2M455Xhk50eeH4i/", "aaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$NUssS5fT$QaS4Ywt0IwzxbE0FAGnXn0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$NxlTyiJ7$gxkXTEJdeTzY8P6tqKmcz.", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$Cmy9x7gW$kamvHI42Kh1CH4Shy6g6S/", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$IsuapfCX$4Yq0Adq5nNZgl0LwbSl5Y0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"$1$rSZfNcKX$N4XPvGrfhKsyoEcRSaqmG0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, #endif {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; #ifdef SIMD_PARA_MD5 static unsigned char cursalt[SALT_SIZE]; static int CryptType; static MD5_word(*sout); static int omp_para = 1; #endif static void init(struct fmt_main *self) { MD5_std_init(self); #if defined(_OPENMP) && defined(SIMD_PARA_MD5) omp_para = omp_get_max_threads(); if (omp_para < 1) omp_para = 1; self->params.min_keys_per_crypt = MD5_N * omp_para; omp_para *= OMP_SCALE; self->params.max_keys_per_crypt = MD5_N * omp_para; #elif MD5_std_mt self->params.min_keys_per_crypt = MD5_std_min_kpc; self->params.max_keys_per_crypt = MD5_std_max_kpc; #endif saved_key = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*saved_key), MEM_ALIGN_CACHE); #ifdef SIMD_PARA_MD5 sout = mem_calloc(self->params.max_keys_per_crypt, sizeof(*sout) * BINARY_SIZE); #endif } static void done(void) { #ifdef SIMD_PARA_MD5 MEM_FREE(sout); #endif MEM_FREE(saved_key); } static int get_hash_0(int index) { #ifdef SIMD_PARA_MD5 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((MD5_word *) sout)[x + y * SIMD_COEF_32 * 4] & PH_MASK_0; #else init_t(); return MD5_out[index][0] & PH_MASK_0; #endif } static int get_hash_1(int index) { #ifdef SIMD_PARA_MD5 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((MD5_word *) sout)[x + y * SIMD_COEF_32 * 4] & PH_MASK_1; #else init_t(); return MD5_out[index][0] & PH_MASK_1; #endif } static int get_hash_2(int index) { #ifdef SIMD_PARA_MD5 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((MD5_word *) sout)[x + y * SIMD_COEF_32 * 4] & PH_MASK_2; #else init_t(); return MD5_out[index][0] & PH_MASK_2; #endif } static int get_hash_3(int index) { #ifdef SIMD_PARA_MD5 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((MD5_word *) sout)[x + y * SIMD_COEF_32 * 4] & PH_MASK_3; #else init_t(); return MD5_out[index][0] & PH_MASK_3; #endif } static int get_hash_4(int index) { #ifdef SIMD_PARA_MD5 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((MD5_word *) sout)[x + y * SIMD_COEF_32 * 4] & PH_MASK_4; #else init_t(); return MD5_out[index][0] & PH_MASK_4; #endif } static int get_hash_5(int index) { #ifdef SIMD_PARA_MD5 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((MD5_word *) sout)[x + y * SIMD_COEF_32 * 4] & PH_MASK_5; #else init_t(); return MD5_out[index][0] & PH_MASK_5; #endif } static int get_hash_6(int index) { #ifdef SIMD_PARA_MD5 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; return ((MD5_word *) sout)[x + y * SIMD_COEF_32 * 4] & PH_MASK_6; #else init_t(); return MD5_out[index][0] & PH_MASK_6; #endif } static int salt_hash(void *salt) { unsigned int i, h, retval; retval = 0; for (i = 0; i <= 6; i += 2) { h = (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i])]; h ^= ((unsigned char *)salt)[i + 1]; h <<= 6; h ^= (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i + 1])]; h ^= ((unsigned char *)salt)[i]; retval += h; } retval ^= retval >> SALT_HASH_LOG; retval &= SALT_HASH_SIZE - 1; return retval; } static void set_key(char *key, int index) { #ifndef SIMD_PARA_MD5 MD5_std_set_key(key, index); #endif strnfcpy(saved_key[index], key, PLAINTEXT_LENGTH); } static char * get_key(int index) { saved_key[index][PLAINTEXT_LENGTH] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; #ifdef SIMD_PARA_MD5 #ifdef _OPENMP int t; #pragma omp parallel for for (t = 0; t < omp_para; t++) md5cryptsse((unsigned char *)(&saved_key[t * MD5_N]), cursalt, (char *)(&sout[t * MD5_N * BINARY_SIZE / sizeof(MD5_word)]), CryptType); #else md5cryptsse((unsigned char *)saved_key, cursalt, (char *)sout, CryptType); #endif #else MD5_std_crypt(count); #endif return count; } static int cmp_all(void *binary, int count) { #ifdef SIMD_PARA_MD5 unsigned int x, y; for (y = 0; y < SIMD_PARA_MD5 * omp_para; y++) for (x = 0; x < SIMD_COEF_32; x++) { if (((MD5_word *) binary)[0] == ((MD5_word *) sout)[x + y * SIMD_COEF_32 * 4]) return 1; } return 0; #else #if MD5_std_mt int t, n = (count + (MD5_N - 1)) / MD5_N; #endif for_each_t(n) { #if MD5_X2 if (*(MD5_word *) binary == MD5_out[0][0] || *(MD5_word *) binary == MD5_out[1][0]) return 1; #else if (*(MD5_word *) binary == MD5_out[0][0]) return 1; #endif } return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_PARA_MD5 unsigned int x, y; x = index & (SIMD_COEF_32 - 1); y = (unsigned int)index / SIMD_COEF_32; if (((unsigned int *)binary)[0] != ((unsigned int *)sout)[x + y * SIMD_COEF_32 * 4 + 0 * SIMD_COEF_32]) return 0; if (((unsigned int *)binary)[1] != ((unsigned int *)sout)[x + y * SIMD_COEF_32 * 4 + 1 * SIMD_COEF_32]) return 0; if (((unsigned int *)binary)[2] != ((unsigned int *)sout)[x + y * SIMD_COEF_32 * 4 + 2 * SIMD_COEF_32]) return 0; if (((unsigned int *)binary)[3] != ((unsigned int *)sout)[x + y * SIMD_COEF_32 * 4 + 3 * SIMD_COEF_32]) return 0; return 1; #else init_t(); return *(MD5_word *) binary == MD5_out[index][0]; #endif } static int cmp_exact(char *source, int index) { #ifdef SIMD_PARA_MD5 return 1; #else init_t(); return !memcmp(MD5_std_get_binary(source), MD5_out[index], sizeof(MD5_binary)); #endif } static void set_salt(void *salt) { #ifdef SIMD_PARA_MD5 memcpy(cursalt, salt, SALT_SIZE); CryptType = cursalt[8]; cursalt[8] = 0; #endif MD5_std_set_salt(salt); } static void * get_salt(char *ciphertext) { return MD5_std_get_salt(ciphertext); } static void * get_binary(char *ciphertext) { return MD5_std_get_binary(ciphertext); } struct fmt_main fmt_MD5 = { { FORMAT_LABEL, FORMAT_NAME, "MD5 " MD5_ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #if MD5_std_mt || defined(SIMD_PARA_MD5) FMT_OMP | #endif FMT_CASE | FMT_8_BIT, {NULL}, tests }, { init, done, fmt_default_reset, fmt_default_prepare, cryptmd5_common_valid, fmt_default_split, get_binary, get_salt, {NULL}, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } };
refinePoses.h
#ifndef SP_SEGMENTER_REFINE_POSES #define SP_SEGMENTER_REFINE_POSES std::vector<poseT> RefinePoses(const pcl::PointCloud<myPointXYZ>::Ptr scene, const std::vector<ModelT> &mesh_set, const std::vector<poseT> &all_poses) { int pose_num = all_poses.size(); std::vector<ModelT> est_models(pose_num); pcl::PointCloud<myPointXYZ>::Ptr down_scene(new pcl::PointCloud<myPointXYZ>()); pcl::VoxelGrid<myPointXYZ> sor; sor.setInputCloud(scene); sor.setLeafSize(0.005, 0.005, 0.005); sor.filter(*down_scene); #pragma omp parallel for schedule(dynamic, 1) for(int i = 0 ; i < pose_num ; i++ ){ for( std::size_t j = 0 ; j < mesh_set.size() ; j++ ){ if( mesh_set[j].model_label == all_poses[i].model_name ) { est_models[i].model_label = all_poses[i].model_name; est_models[i].model_cloud = pcl::PointCloud<myPointXYZ>::Ptr (new pcl::PointCloud<myPointXYZ>()); pcl::transformPointCloud(*mesh_set[j].model_cloud, *est_models[i].model_cloud, all_poses[i].shift, all_poses[i].rotation); break; } } } std::vector< pcl::search::KdTree<myPointXYZ>::Ptr > tree_set(est_models.size()); #pragma omp parallel for schedule(dynamic, 1) for( int i = 0 ; i < pose_num ; i++ ) { tree_set[i] = pcl::search::KdTree<myPointXYZ>::Ptr (new pcl::search::KdTree<myPointXYZ>()); tree_set[i]->setInputCloud(est_models[i].model_cloud); } std::vector<int> votes(pose_num, 0); std::vector< std::vector<int> > adj_graph(pose_num); for( int i = 0 ; i < pose_num ; i++ ) adj_graph[i].resize(pose_num, 0); float sqrT = 0.01*0.01; int down_num = down_scene->size(); std::vector< std::vector<int> > bin_vec(down_num); #pragma omp parallel for for(int i = 0 ; i < pose_num ; i++ ) { int count = 0; for( pcl::PointCloud<myPointXYZ>::const_iterator it = down_scene->begin() ; it < down_scene->end() ; it++, count++ ) { std::vector<int> idx (1); std::vector<float> sqrDist (1); int nres = tree_set[i]->nearestKSearch(*it, 1, idx, sqrDist); if ( nres >= 1 && sqrDist[0] <= sqrT ) { #pragma omp critical { bin_vec[count].push_back(i); } votes[i]++; } } } for( int it = 0 ; it < down_num ; it++ ) for( std::vector<int>::iterator ii = bin_vec[it].begin() ; ii < bin_vec[it].end() ; ii++ ) for( std::vector<int>::iterator jj = ii+1 ; jj < bin_vec[it].end() ; jj++ ) { adj_graph[*ii][*jj]++; adj_graph[*jj][*ii]++; } std::vector<bool> dead_flag(pose_num, 0); for( int i = 0 ; i < pose_num ; i++ ){ if( dead_flag[i] == true ) continue; for( int j = i+1 ; j < pose_num ; j++ ) { if( dead_flag[j] == true ) continue; int min_tmp = std::min(votes[i], votes[j]); if( (adj_graph[i][j]+0.0) / min_tmp >= 0.3 ) { std::cerr << votes[i] << " " << i << std::endl; std::cerr << votes[j] << " " << j << std::endl; if( votes[i] > votes[j] ) dead_flag[j] = true; else { dead_flag[i] = true; break; } } } } std::vector<poseT> refined_poses; for( int i = 0 ; i < pose_num ; i++ ) if( dead_flag[i] == false ) refined_poses.push_back(all_poses[i]); return refined_poses; } #endif
#ifndef SP_SEGMENTER_REFINE_POSES #define SP_SEGMENTER_REFINE_POSES std: : vector < poseT > RefinePoses(const pcl: : PointCloud < myPointXYZ >: : Ptr scene, const std: : vector < ModelT > &mesh_set, const std: :vector < poseT > &all_poses) { int pose_num = all_poses.size(); std: :vector < ModelT > est_models(pose_num); pcl: : PointCloud < myPointXYZ >: : Ptr down_scene(new pcl: :PointCloud < myPointXYZ > ()); pcl: :VoxelGrid < myPointXYZ > sor; sor.setInputCloud(scene); sor.setLeafSize(0.005, 0.005, 0.005); sor.filter(*down_scene); for (int i = 0; i < pose_num; i++) { for (std: :size_t j = 0; j < mesh_set.size(); j++) { if (mesh_set[j].model_label == all_poses[i].model_name) { est_models[i].model_label = all_poses[i].model_name; est_models[i].model_cloud = pcl: : PointCloud < myPointXYZ >: : Ptr(new pcl: :PointCloud < myPointXYZ > ()); pcl: : transformPointCloud(*mesh_set[j].model_cloud, *est_models[i].model_cloud, all_poses[i].shift, all_poses[i].rotation); break; } } } std: : vector < pcl: : search: : KdTree < myPointXYZ >: :Ptr > tree_set(est_models.size()); for (int i = 0; i < pose_num; i++) { tree_set[i] = pcl: : search: : KdTree < myPointXYZ >: : Ptr(new pcl: : search: :KdTree < myPointXYZ > ()); tree_set[i]->setInputCloud(est_models[i].model_cloud); } std: :vector < int >votes(pose_num, 0); std: : vector < std: :vector < int >>adj_graph(pose_num); for (int i = 0; i < pose_num; i++) adj_graph[i].resize(pose_num, 0); float sqrT = 0.01 * 0.01; int down_num = down_scene->size(); std: : vector < std: :vector < int >>bin_vec(down_num); for (int i = 0; i < pose_num; i++) { int count = 0; for (pcl: : PointCloud < myPointXYZ >: :const_iterator it = down_scene->begin(); it < down_scene->end(); it++, count++) { std: : vector < int >idx(1); std: : vector < float >sqrDist(1); int nres = tree_set[i]->nearestKSearch(*it, 1, idx, sqrDist); if (nres >= 1 && sqrDist[0] <= sqrT) { bin_vec[count].push_back(i); votes[i]++; } } } for (int it = 0; it < down_num; it++) for (std: : vector < int >: :iterator ii = bin_vec[it].begin(); ii < bin_vec[it].end(); ii++) for (std: : vector < int >: :iterator jj = ii + 1; jj < bin_vec[it].end(); jj++) { adj_graph[*ii][*jj]++; adj_graph[*jj][*ii]++; } std: :vector < bool > dead_flag(pose_num, 0); for (int i = 0; i < pose_num; i++) { if (dead_flag[i] == true) continue; for (int j = i + 1; j < pose_num; j++) { if (dead_flag[j] == true) continue; int min_tmp = std::min(votes[i], votes[j]); if ((adj_graph[i][j] + 0.0) / min_tmp >= 0.3) { std: : cerr << votes[i] << " " << i << std: :endl; std: : cerr << votes[j] << " " << j << std: :endl; if (votes[i] > votes[j]) dead_flag[j] = true; else { dead_flag[i] = true; break; } } } } std: :vector < poseT > refined_poses; for (int i = 0; i < pose_num; i++) if (dead_flag[i] == false) refined_poses.push_back(all_poses[i]); return refined_poses; }
#ifndef SP_SEGMENTER_REFINE_POSES #define SP_SEGMENTER_REFINE_POSES std: : vector < poseT > RefinePoses(const pcl: : PointCloud < myPointXYZ >: : Ptr scene, const std: : vector < ModelT > &mesh_set, const std: :vector < poseT > &all_poses) { int pose_num = all_poses.size(); std: :vector < ModelT > est_models(pose_num); pcl: : PointCloud < myPointXYZ >: : Ptr down_scene(new pcl: :PointCloud < myPointXYZ > ()); pcl: :VoxelGrid < myPointXYZ > sor; sor.setInputCloud(scene); sor.setLeafSize(0.005, 0.005, 0.005); sor.filter(*down_scene); #pragma omp parallel for schedule(dynamic, 1) for (int i = 0; i < pose_num; i++) { for (std: :size_t j = 0; j < mesh_set.size(); j++) { if (mesh_set[j].model_label == all_poses[i].model_name) { est_models[i].model_label = all_poses[i].model_name; est_models[i].model_cloud = pcl: : PointCloud < myPointXYZ >: : Ptr(new pcl: :PointCloud < myPointXYZ > ()); pcl: : transformPointCloud(*mesh_set[j].model_cloud, *est_models[i].model_cloud, all_poses[i].shift, all_poses[i].rotation); break; } } } std: : vector < pcl: : search: : KdTree < myPointXYZ >: :Ptr > tree_set(est_models.size()); #pragma omp parallel for schedule(dynamic, 1) for (int i = 0; i < pose_num; i++) { tree_set[i] = pcl: : search: : KdTree < myPointXYZ >: : Ptr(new pcl: : search: :KdTree < myPointXYZ > ()); tree_set[i]->setInputCloud(est_models[i].model_cloud); } std: :vector < int >votes(pose_num, 0); std: : vector < std: :vector < int >>adj_graph(pose_num); for (int i = 0; i < pose_num; i++) adj_graph[i].resize(pose_num, 0); float sqrT = 0.01 * 0.01; int down_num = down_scene->size(); std: : vector < std: :vector < int >>bin_vec(down_num); #pragma omp parallel for for (int i = 0; i < pose_num; i++) { int count = 0; for (pcl: : PointCloud < myPointXYZ >: :const_iterator it = down_scene->begin(); it < down_scene->end(); it++, count++) { std: : vector < int >idx(1); std: : vector < float >sqrDist(1); int nres = tree_set[i]->nearestKSearch(*it, 1, idx, sqrDist); if (nres >= 1 && sqrDist[0] <= sqrT) { #pragma omp critical { bin_vec[count].push_back(i); } votes[i]++; } } } for (int it = 0; it < down_num; it++) for (std: : vector < int >: :iterator ii = bin_vec[it].begin(); ii < bin_vec[it].end(); ii++) for (std: : vector < int >: :iterator jj = ii + 1; jj < bin_vec[it].end(); jj++) { adj_graph[*ii][*jj]++; adj_graph[*jj][*ii]++; } std: :vector < bool > dead_flag(pose_num, 0); for (int i = 0; i < pose_num; i++) { if (dead_flag[i] == true) continue; for (int j = i + 1; j < pose_num; j++) { if (dead_flag[j] == true) continue; int min_tmp = std::min(votes[i], votes[j]); if ((adj_graph[i][j] + 0.0) / min_tmp >= 0.3) { std: : cerr << votes[i] << " " << i << std: :endl; std: : cerr << votes[j] << " " << j << std: :endl; if (votes[i] > votes[j]) dead_flag[j] = true; else { dead_flag[i] = true; break; } } } } std: :vector < poseT > refined_poses; for (int i = 0; i < pose_num; i++) if (dead_flag[i] == false) refined_poses.push_back(all_poses[i]); return refined_poses; }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(16*t2-Nz+5,8)),t1),2*t1-2*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(8*t1+Ny+7,8)),floord(16*t2+Ny+3,8)),floord(16*t1-16*t2+Nz+Ny+5,8));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(16*t2-Nz-115,128)),ceild(8*t3-Ny-115,128));t4<=min(min(min(min(floord(4*Nt+Nx-9,128),floord(8*t1+Nx+7,128)),floord(16*t2+Nx+3,128)),floord(8*t3+Nx-5,128)),floord(16*t1-16*t2+Nz+Nx+5,128));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),2*t3),Nt-1),2*t1+3),4*t2+2),32*t4+30);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(128*t4,4*t5+4); ubv=min(128*t4+127,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 128; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= floord(Nt - 1, 2); t1++) { lbp = max(ceild(t1, 2), ceild(4 * t1 - Nt + 2, 4)); ubp = min(floord(4 * Nt + Nz - 9, 16), floord(8 * t1 + Nz + 2, 16)); for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(max(max(0, ceild(16 * t2 - Nz + 5, 8)), t1), 2 * t1 - 2 * t2 + 1); t3 <= min(min(min(floord(4 * Nt + Ny - 9, 8), floord(8 * t1 + Ny + 7, 8)), floord(16 * t2 + Ny + 3, 8)), floord(16 * t1 - 16 * t2 + Nz + Ny + 5, 8)); t3++) { for (t4 = max(max(max(0, ceild(t1 - 15, 16)), ceild(16 * t2 - Nz - 115, 128)), ceild(8 * t3 - Ny - 115, 128)); t4 <= min(min(min(min(floord(4 * Nt + Nx - 9, 128), floord(8 * t1 + Nx + 7, 128)), floord(16 * t2 + Nx + 3, 128)), floord(8 * t3 + Nx - 5, 128)), floord(16 * t1 - 16 * t2 + Nz + Nx + 5, 128)); t4++) { for (t5 = max(max(max(max(max(0, ceild(16 * t2 - Nz + 5, 4)), ceild(8 * t3 - Ny + 5, 4)), ceild(128 * t4 - Nx + 5, 4)), 2 * t1), 4 * t1 - 4 * t2 + 1); t5 <= min(min(min(min(min(floord(16 * t1 - 16 * t2 + Nz + 10, 4), 2 * t3), Nt - 1), 2 * t1 + 3), 4 * t2 + 2), 32 * t4 + 30); t5++) { for (t6 = max(max(16 * t2, 4 * t5 + 4), -16 * t1 + 16 * t2 + 8 * t5 - 15); t6 <= min(min(16 * t2 + 15, -16 * t1 + 16 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = max(8 * t3, 4 * t5 + 4); t7 <= min(8 * t3 + 7, 4 * t5 + Ny - 5); t7++) { lbv = max(128 * t4, 4 * t5 + 4); ubv = min(128 * t4 + 127, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((((((((((((coef[0][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef[1][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]))) + (coef[3][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef[4][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[5][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]))) + (coef[6][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef[7][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[8][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]))) + (coef[9][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef[10][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[11][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]))) + (coef[12][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 128; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= floord(Nt - 1, 2); t1++) { lbp = max(ceild(t1, 2), ceild(4 * t1 - Nt + 2, 4)); ubp = min(floord(4 * Nt + Nz - 9, 16), floord(8 * t1 + Nz + 2, 16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(max(max(0, ceild(16 * t2 - Nz + 5, 8)), t1), 2 * t1 - 2 * t2 + 1); t3 <= min(min(min(floord(4 * Nt + Ny - 9, 8), floord(8 * t1 + Ny + 7, 8)), floord(16 * t2 + Ny + 3, 8)), floord(16 * t1 - 16 * t2 + Nz + Ny + 5, 8)); t3++) { for (t4 = max(max(max(0, ceild(t1 - 15, 16)), ceild(16 * t2 - Nz - 115, 128)), ceild(8 * t3 - Ny - 115, 128)); t4 <= min(min(min(min(floord(4 * Nt + Nx - 9, 128), floord(8 * t1 + Nx + 7, 128)), floord(16 * t2 + Nx + 3, 128)), floord(8 * t3 + Nx - 5, 128)), floord(16 * t1 - 16 * t2 + Nz + Nx + 5, 128)); t4++) { for (t5 = max(max(max(max(max(0, ceild(16 * t2 - Nz + 5, 4)), ceild(8 * t3 - Ny + 5, 4)), ceild(128 * t4 - Nx + 5, 4)), 2 * t1), 4 * t1 - 4 * t2 + 1); t5 <= min(min(min(min(min(floord(16 * t1 - 16 * t2 + Nz + 10, 4), 2 * t3), Nt - 1), 2 * t1 + 3), 4 * t2 + 2), 32 * t4 + 30); t5++) { for (t6 = max(max(16 * t2, 4 * t5 + 4), -16 * t1 + 16 * t2 + 8 * t5 - 15); t6 <= min(min(16 * t2 + 15, -16 * t1 + 16 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = max(8 * t3, 4 * t5 + 4); t7 <= min(8 * t3 + 7, 4 * t5 + Ny - 5); t7++) { lbv = max(128 * t4, 4 * t5 + 4); ubv = min(128 * t4 + 127, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((((((((((((coef[0][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef[1][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]))) + (coef[3][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef[4][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[5][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]))) + (coef[6][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef[7][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[8][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]))) + (coef[9][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef[10][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[11][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]))) + (coef[12][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
queue.h
// -*- C++ -*- // Copyright (C) 2007, 2008, 2009, 2010 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/queue.h * @brief Lock-free double-ended queue. * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Johannes Singler. #ifndef _GLIBCXX_PARALLEL_QUEUE_H #define _GLIBCXX_PARALLEL_QUEUE_H 1 #include <parallel/types.h> #include <parallel/base.h> #include <parallel/compatibility.h> /** @brief Decide whether to declare certain variable volatile in this file. */ #define _GLIBCXX_VOLATILE volatile namespace __gnu_parallel { /**@brief Double-ended queue of bounded size, allowing lock-free * atomic access. push_front() and pop_front() must not be called * concurrently to each other, while pop_back() can be called * concurrently at all times. * @c empty(), @c size(), and @c top() are intentionally not provided. * Calling them would not make sense in a concurrent setting. * @param _Tp Contained element type. */ template<typename _Tp> class _RestrictedBoundedConcurrentQueue { private: /** @brief Array of elements, seen as cyclic buffer. */ _Tp* _M_base; /** @brief Maximal number of elements contained at the same time. */ _SequenceIndex _M_max_size; /** @brief Cyclic __begin and __end pointers contained in one atomically changeable value. */ _GLIBCXX_VOLATILE _CASable _M_borders; public: /** @brief Constructor. Not to be called concurrent, of course. * @param _M_max_size Maximal number of elements to be contained. */ _RestrictedBoundedConcurrentQueue(_SequenceIndex __max_size) { _M_max_size = __max_size; _M_base = new _Tp[__max_size]; _M_borders = __encode2(0, 0); #pragma omp flush } /** @brief Destructor. Not to be called concurrent, of course. */ ~_RestrictedBoundedConcurrentQueue() { delete[] _M_base; } /** @brief Pushes one element into the queue at the front end. * Must not be called concurrently with pop_front(). */ void push_front(const _Tp& __t) { _CASable __former_borders = _M_borders; int __former_front, __former_back; __decode2(__former_borders, __former_front, __former_back); *(_M_base + __former_front % _M_max_size) = __t; #if _GLIBCXX_ASSERTIONS // Otherwise: front - back > _M_max_size eventually. _GLIBCXX_PARALLEL_ASSERT(((__former_front + 1) - __former_back) <= _M_max_size); #endif __fetch_and_add(&_M_borders, __encode2(1, 0)); } /** @brief Pops one element from the queue at the front end. * Must not be called concurrently with pop_front(). */ bool pop_front(_Tp& __t) { int __former_front, __former_back; #pragma omp flush __decode2(_M_borders, __former_front, __former_back); while (__former_front > __former_back) { // Chance. _CASable __former_borders = __encode2(__former_front, __former_back); _CASable __new_borders = __encode2(__former_front - 1, __former_back); if (__compare_and_swap(&_M_borders, __former_borders, __new_borders)) { __t = *(_M_base + (__former_front - 1) % _M_max_size); return true; } #pragma omp flush __decode2(_M_borders, __former_front, __former_back); } return false; } /** @brief Pops one element from the queue at the front end. * Must not be called concurrently with pop_front(). */ bool pop_back(_Tp& __t) //queue behavior { int __former_front, __former_back; #pragma omp flush __decode2(_M_borders, __former_front, __former_back); while (__former_front > __former_back) { // Chance. _CASable __former_borders = __encode2(__former_front, __former_back); _CASable __new_borders = __encode2(__former_front, __former_back + 1); if (__compare_and_swap(&_M_borders, __former_borders, __new_borders)) { __t = *(_M_base + __former_back % _M_max_size); return true; } #pragma omp flush __decode2(_M_borders, __former_front, __former_back); } return false; } }; } //namespace __gnu_parallel #undef _GLIBCXX_VOLATILE #endif /* _GLIBCXX_PARALLEL_QUEUE_H */
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include "omp.h" #include <stdio.h> #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; double section1; double section2; }; void bf0(float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw); void bf1(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int t1, const int t2, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw); int ForwardTTI(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict phi_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine) { int (*restrict block_sizes) __attribute__((aligned(64))) = (int (*))block_sizes_vec->data; float (*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__((aligned(64))) = (float (*)[delta_vec->size[1]][delta_vec->size[2]])delta_vec->data; int (*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int (*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float (*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__((aligned(64))) = (float (*)[phi_vec->size[1]][phi_vec->size[2]])phi_vec->data; float (*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float (*)[save_src_u_vec->size[1]])save_src_u_vec->data; float (*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float (*)[save_src_v_vec->size[1]])save_src_v_vec->data; int (*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int (*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int (*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int (*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int (*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int (*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float (*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__((aligned(64))) = (float (*)[theta_vec->size[1]][theta_vec->size[2]])theta_vec->data; float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float (*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float (*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data; float (*r21)[y_size + 1][z_size + 1]; posix_memalign((void **)&r21, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); float (*r20)[y_size + 1][z_size + 1]; posix_memalign((void **)&r20, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); float (*r19)[y_size + 1][z_size + 1]; posix_memalign((void **)&r19, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); float (*r18)[y_size + 1][z_size + 1]; posix_memalign((void **)&r18, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); float (*r17)[y_size + 1][z_size + 1]; posix_memalign((void **)&r17, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); float (*r34)[y_size + 1][z_size + 1]; posix_memalign((void **)&r34, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); float (*r35)[y_size + 1][z_size + 1]; posix_memalign((void **)&r35, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ for (int x = x_m - 1; x <= x_M; x += 1) { for (int y = y_m - 1; y <= y_M; y += 1) { for (int z = z_m - 1; z <= z_M; z += 1) { r21[x + 1][y + 1][z + 1] = cos(phi[x + 4][y + 4][z + 4]); r20[x + 1][y + 1][z + 1] = sin(theta[x + 4][y + 4][z + 4]); r19[x + 1][y + 1][z + 1] = sin(phi[x + 4][y + 4][z + 4]); r18[x + 1][y + 1][z + 1] = cos(theta[x + 4][y + 4][z + 4]); r17[x + 1][y + 1][z + 1] = sqrt(2 * delta[x + 4][y + 4][z + 4] + 1); } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; int xb_size = block_sizes[0]; int sf = 2; int t_blk_size = 2 * sf * (time_M - time_m); printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size); for (int t_blk = time_m; t_blk <= 1 + sf * (time_M - time_m); t_blk += sf * t_blk_size) //for each t block { for (int xb = x_m - 1; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size) { //printf(" Change of outer xblock %d \n", xb); for (int yb = y_m - 1; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size) { //printf(" Timestep tw: %d, Updating x: %d y: %d \n", xb, yb); for (int time = t_blk, t0 = (time) % (3), t1 = (time + 1) % (3), t2 = (time + 2) % (3); time <= 2 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t1 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); struct timeval start_section1, end_section1; gettimeofday(&start_section1, NULL); /* Begin section1 */ bf0((float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, x_size, y_size, z_size, time, t0, x0_blk0_size, x_M, x_m - 1, y0_blk0_size, y_M, y_m - 1, z_M, z_m, nthreads, xb, yb, xb_size, yb_size, tw); //printf("\n BF0 - 1 IS OVER"); /* ============================================== */ bf1(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x_size, y_size, z_size, time, t0, t1, t2, x0_blk0_size, x_M, x_m, y0_blk0_size, y_M, y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, tw); //printf("\n BF1 - 1 IS OVER"); /* End section1 */ gettimeofday(&end_section1, NULL); timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000; } } } } free(r21); free(r20); free(r19); free(r18); free(r17); free(r34); free(r35); return 0; } void bf0(float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw) { float (*restrict r18)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float (*)[y_size + 1][z_size + 1])r18_vec; float (*restrict r19)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float (*)[y_size + 1][z_size + 1])r19_vec; float (*restrict r20)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float (*)[y_size + 1][z_size + 1])r20_vec; float (*restrict r21)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float (*)[y_size + 1][z_size + 1])r21_vec; float (*restrict r34)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float (*)[y_size + 1][z_size + 1])r34_vec; float (*restrict r35)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float (*)[y_size + 1][z_size + 1])r35_vec; float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float (*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float (*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data; for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { //printf(" Change of inner x0_blk0 %d \n", x0_blk0); for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++) { //printf(" bf0 Timestep tw: %d, Updating x: %d \n", tw, x - time + 1); for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++) { //printf(" bf0 Timestep tw: %d, Updating x: %d y: %d \n", tw, x - time + 1, y - time + 1); for (int z = z_m - 1; z <= z_M; z += 1) { //printf(" bf0 Updating x: %d y: %d z: %d \n", x - time + 1, y - time + 1, z + 1); float r39 = -v[t0][x - time + 4][y - time + 4][z + 4]; r35[x - time + 1][y - time + 1][z + 1] = 1.0e-1 F * (-(r39 + v[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r39 + v[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r39 + v[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]); float r40 = -u[t0][x - time + 4][y - time + 4][z + 4]; r34[x - time + 1][y - time + 1][z + 1] = 1.0e-1 F * (-(r40 + u[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r40 + u[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r40 + u[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]); } } } } } } void bf1(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int t1, const int t2, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw) { float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; float (*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__((aligned(64))) = (float (*)[epsilon_vec->size[1]][epsilon_vec->size[2]])epsilon_vec->data; float (*restrict r17)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float (*)[y_size + 1][z_size + 1])r17_vec; float (*restrict r18)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float (*)[y_size + 1][z_size + 1])r18_vec; float (*restrict r19)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float (*)[y_size + 1][z_size + 1])r19_vec; float (*restrict r20)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float (*)[y_size + 1][z_size + 1])r20_vec; float (*restrict r21)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float (*)[y_size + 1][z_size + 1])r21_vec; float (*restrict r34)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float (*)[y_size + 1][z_size + 1])r34_vec; float (*restrict r35)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float (*)[y_size + 1][z_size + 1])r35_vec; float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float (*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float (*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data; float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; int (*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int (*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float (*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float (*)[save_src_u_vec->size[1]])save_src_u_vec->data; float (*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float (*)[save_src_v_vec->size[1]])save_src_v_vec->data; int (*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int (*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int (*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int (*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int (*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int (*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; //printf("In bf1 \n"); if (x1_blk0_size == 0) { return; } for (int x1_blk0 = max((x_m + time), xb - 0); x1_blk0 <= +min((x_M + time), (xb - 0 + xb_size)); x1_blk0 += x1_blk0_size) { //printf(" Change of inner x1_blk0 %d \n", x1_blk0); for (int y1_blk0 = max((y_m + time), yb - 0); y1_blk0 <= +min((y_M + time), (yb - 0 + yb_size)); y1_blk0 += y1_blk0_size) { for (int x = x1_blk0; x <= min(min((x_M + time), (xb - 0 + xb_size - 1)), (x1_blk0 + x1_blk0_size - 1)); x++) { //printf(" bf1 Timestep tw: %d, Updating x: %d \n", tw, x - time + 4); for (int y = y1_blk0; y <= min(min((y_M + time), (yb - 0 + yb_size - 1)), (y1_blk0 + y1_blk0_size - 1)); y++) { //printf(" bf1 Timestep tw: %d, Updating x: %d y: %d \n", tw, x - time + 4, y - time + 4); for (int z = z_m; z <= z_M; z += 1) { //printf(" bf1 Updating x: %d y: %d z: %d \n", x - time + 4, y - time + 4, z + 4); //printf(" bf1 Updating x: %d y: %d z: %d \n", x - time + 4, y - time + 4, z + 4); float r46 = 1.0 / dt; float r45 = 1.0 / (dt * dt); float r44 = r18[x - time + 1][y - time + 1][z] * r35[x - time + 1][y - time + 1][z] - r18[x - time + 1][y - time + 1][z + 1] * r35[x - time + 1][y - time + 1][z + 1] + r19[x - time + 1][y - time][z + 1] * r20[x - time + 1][y - time][z + 1] * r35[x - time + 1][y - time][z + 1] - r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r35[x - time + 1][y - time + 1][z + 1] + r20[x - time][y - time + 1][z + 1] * r21[x - time][y - time + 1][z + 1] * r35[x - time][y - time + 1][z + 1] - r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r35[x - time + 1][y - time + 1][z + 1]; float r43 = pow(vp[x - time + 4][y - time + 4][z + 4], -2); float r42 = 1.0e-1 F * (-r18[x - time + 1][y - time + 1][z] * r34[x - time + 1][y - time + 1][z] + r18[x - time + 1][y - time + 1][z + 1] * r34[x - time + 1][y - time + 1][z + 1] - r19[x - time + 1][y - time][z + 1] * r20[x - time + 1][y - time][z + 1] * r34[x - time + 1][y - time][z + 1] + r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r34[x - time + 1][y - time + 1][z + 1] - r20[x - time][y - time + 1][z + 1] * r21[x - time][y - time + 1][z + 1] * r34[x - time][y - time + 1][z + 1] + r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r34[x - time + 1][y - time + 1][z + 1]) - 8.33333315e-4 F * (u[t0][x - time + 2][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 2][z + 4] + u[t0][x - time + 4][y - time + 4][z + 2] + u[t0][x - time + 4][y - time + 4][z + 6] + u[t0][x - time + 4][y - time + 6][z + 4] + u[t0][x - time + 6][y - time + 4][z + 4]) + 1.3333333e-2 F * (u[t0][x - time + 3][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 3][z + 4] + u[t0][x - time + 4][y - time + 4][z + 3] + u[t0][x - time + 4][y - time + 4][z + 5] + u[t0][x - time + 4][y - time + 5][z + 4] + u[t0][x - time + 5][y - time + 4][z + 4]) - 7.49999983e-2 F * u[t0][x - time + 4][y - time + 4][z + 4]; float r41 = 1.0 / (r43 * r45 + r46 * damp[x - time + 1][y - time + 1][z + 1]); float r32 = r45 * (-2.0 F * u[t0][x - time + 4][y - time + 4][z + 4] + u[t2][x - time + 4][y - time + 4][z + 4]); float r33 = r45 * (-2.0 F * v[t0][x - time + 4][y - time + 4][z + 4] + v[t2][x - time + 4][y - time + 4][z + 4]); u[t1][x - time + 4][y - time + 4][z + 4] = r41 * ((-r32) * r43 + r42 * (2 * epsilon[x - time + 4][y - time + 4][z + 4] + 1) + 1.0e-1 F * r44 * r17[x - time + 1][y - time + 1][z + 1] + r46 * (damp[x - time + 1][y - time + 1][z + 1] * u[t0][x - time + 4][y - time + 4][z + 4])); v[t1][x - time + 4][y - time + 4][z + 4] = r41 * ((-r33) * r43 + r42 * r17[x - time + 1][y - time + 1][z + 1] + 1.0e-1 F * r44 + r46 * (damp[x - time + 1][y - time + 1][z + 1] * v[t0][x - time + 4][y - time + 4][z + 4])); } //int sp_zi_M = nnz_sp_source_mask[x - time][y - time] - 1; for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r22 = save_src_u[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; // u[t1][x - time + 4][y - time + 4][zind + 4] += r22; float r23 = save_src_v[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; // v[t1][x - time + 4][y - time + 4][zind + 4] += r23; //printf("Source injection at time %d , at : x: %d, y: %d, %d, %f, %f \n", tw, x - time + 4, y - time + 4, zind + 4, r22, r23); } } } } } }
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include "omp.h" #include <stdio.h> #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; double section1; double section2; }; void bf0(float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw); void bf1(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int t1, const int t2, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw); int ForwardTTI(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict phi_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine) { int (*restrict block_sizes) __attribute__((aligned(64))) = (int (*))block_sizes_vec->data; float (*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__((aligned(64))) = (float (*)[delta_vec->size[1]][delta_vec->size[2]])delta_vec->data; int (*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int (*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float (*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__((aligned(64))) = (float (*)[phi_vec->size[1]][phi_vec->size[2]])phi_vec->data; float (*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float (*)[save_src_u_vec->size[1]])save_src_u_vec->data; float (*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float (*)[save_src_v_vec->size[1]])save_src_v_vec->data; int (*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int (*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int (*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int (*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int (*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int (*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float (*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__((aligned(64))) = (float (*)[theta_vec->size[1]][theta_vec->size[2]])theta_vec->data; float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float (*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float (*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data; float (*r21)[y_size + 1][z_size + 1]; posix_memalign((void **)&r21, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); float (*r20)[y_size + 1][z_size + 1]; posix_memalign((void **)&r20, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); float (*r19)[y_size + 1][z_size + 1]; posix_memalign((void **)&r19, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); float (*r18)[y_size + 1][z_size + 1]; posix_memalign((void **)&r18, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); float (*r17)[y_size + 1][z_size + 1]; posix_memalign((void **)&r17, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); float (*r34)[y_size + 1][z_size + 1]; posix_memalign((void **)&r34, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); float (*r35)[y_size + 1][z_size + 1]; posix_memalign((void **)&r35, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(static, 1) for (int x = x_m - 1; x <= x_M; x += 1) { for (int y = y_m - 1; y <= y_M; y += 1) { #pragma omp simd aligned(delta, phi, theta : 32) for (int z = z_m - 1; z <= z_M; z += 1) { r21[x + 1][y + 1][z + 1] = cos(phi[x + 4][y + 4][z + 4]); r20[x + 1][y + 1][z + 1] = sin(theta[x + 4][y + 4][z + 4]); r19[x + 1][y + 1][z + 1] = sin(phi[x + 4][y + 4][z + 4]); r18[x + 1][y + 1][z + 1] = cos(theta[x + 4][y + 4][z + 4]); r17[x + 1][y + 1][z + 1] = sqrt(2 * delta[x + 4][y + 4][z + 4] + 1); } } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; int xb_size = block_sizes[0]; int sf = 2; int t_blk_size = 2 * sf * (time_M - time_m); printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size); for (int t_blk = time_m; t_blk <= 1 + sf * (time_M - time_m); t_blk += sf * t_blk_size) //for each t block { for (int xb = x_m - 1; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size) { //printf(" Change of outer xblock %d \n", xb); for (int yb = y_m - 1; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size) { //printf(" Timestep tw: %d, Updating x: %d y: %d \n", xb, yb); for (int time = t_blk, t0 = (time) % (3), t1 = (time + 1) % (3), t2 = (time + 2) % (3); time <= 2 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf)
GB_binop__bshift_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bshift_int64 // A.*B function (eWiseMult): GB_AemultB__bshift_int64 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bshift_int64 // C+=b function (dense accum): GB_Cdense_accumb__bshift_int64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bshift_int64 // C=scalar+B GB_bind1st__bshift_int64 // C=scalar+B' GB_bind1st_tran__bshift_int64 // C=A+scalar GB_bind2nd__bshift_int64 // C=A'+scalar GB_bind2nd_tran__bshift_int64 // C type: int64_t // A type: int64_t // B,b type: int8_t // BinaryOp: cij = GB_bitshift_int64 (aij, bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_bitshift_int64 (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_INT64 || GxB_NO_BSHIFT_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bshift_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bshift_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bshift_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bshift_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bshift_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bshift_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = GB_bitshift_int64 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bshift_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; Cx [p] = GB_bitshift_int64 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_int64 (x, aij) ; \ } GrB_Info GB_bind1st_tran__bshift_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_int64 (aij, y) ; \ } GrB_Info GB_bind2nd_tran__bshift_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bshift_int64 // A.*B function (eWiseMult): GB_AemultB__bshift_int64 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bshift_int64 // C+=b function (dense accum): GB_Cdense_accumb__bshift_int64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bshift_int64 // C=scalar+B GB_bind1st__bshift_int64 // C=scalar+B' GB_bind1st_tran__bshift_int64 // C=A+scalar GB_bind2nd__bshift_int64 // C=A'+scalar GB_bind2nd_tran__bshift_int64 // C type: int64_t // A type: int64_t // B,b type: int8_t // BinaryOp: cij = GB_bitshift_int64 (aij, bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_bitshift_int64 (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_INT64 || GxB_NO_BSHIFT_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bshift_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bshift_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bshift_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bshift_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bshift_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bshift_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = GB_bitshift_int64 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bshift_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; Cx [p] = GB_bitshift_int64 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_int64 (x, aij) ; \ } GrB_Info GB_bind1st_tran__bshift_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_int64 (aij, y) ; \ } GrB_Info GB_bind2nd_tran__bshift_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bshift_int64 // A.*B function (eWiseMult): GB_AemultB__bshift_int64 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bshift_int64 // C+=b function (dense accum): GB_Cdense_accumb__bshift_int64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bshift_int64 // C=scalar+B GB_bind1st__bshift_int64 // C=scalar+B' GB_bind1st_tran__bshift_int64 // C=A+scalar GB_bind2nd__bshift_int64 // C=A'+scalar GB_bind2nd_tran__bshift_int64 // C type: int64_t // A type: int64_t // B,b type: int8_t // BinaryOp: cij = GB_bitshift_int64 (aij, bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_bitshift_int64 (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_INT64 || GxB_NO_BSHIFT_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bshift_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bshift_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bshift_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bshift_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bshift_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bshift_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = GB_bitshift_int64 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bshift_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; Cx [p] = GB_bitshift_int64 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_int64 (x, aij) ; \ } GrB_Info GB_bind1st_tran__bshift_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_int64 (aij, y) ; \ } GrB_Info GB_bind2nd_tran__bshift_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_fp64_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp64_int64 // op(A') function: GB_unop_tran__identity_fp64_int64 // C type: double // A type: int64_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp64_int64 ( double *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp64_int64 // op(A') function: GB_unop_tran__identity_fp64_int64 // C type: double // A type: int64_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp64_int64 ( double *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ; #else for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp64_int64 // op(A') function: GB_unop_tran__identity_fp64_int64 // C type: double // A type: int64_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp64_int64 ( double *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
openmpSample.c
// // openmpSample.c // pstock // // Created by takayoshi on 2016/01/21. // Copyright © 2016年 pgostation. All rights reserved. // #include <stdio.h> #include <sys/time.h> #include <libiomp/omp.h> int xmain(int argc, const char * argv[]) { short a[60000]; struct timeval startTime, endTime; #ifdef _OPENMP printf("omp_get_num_procs:%d\n", omp_get_num_procs()); #endif gettimeofday(&startTime, NULL); #pragma omp parallel for(short j=0;j<15000;j++) { #pragma omp for for(short i=-30000;i<30000;i++) { a[i+30000] += i*5; } } gettimeofday(&endTime, NULL); if(endTime.tv_usec/1000 - startTime.tv_usec/1000>0){ printf("end:%ld.%03d\n", endTime.tv_sec - startTime.tv_sec, endTime.tv_usec/1000 - startTime.tv_usec/1000); }else{ printf("end:%ld.%03d\n", endTime.tv_sec - startTime.tv_sec -1, 1000 + endTime.tv_usec/1000 - startTime.tv_usec/1000); } return 0; } // openmpなし 9.338秒 0.016 // 1thread 11.571秒 0.021 // 2thread 5.326秒 0.010 // 4thread 5.019秒 // no simd 6.573 6.883 // simd 6.571 6.608 // declare simd 6.117 6.341 //
// // openmpSample.c // pstock // // Created by takayoshi on 2016/01/21. // Copyright © 2016年 pgostation. All rights reserved. // #include <stdio.h> #include <sys/time.h> #include <libiomp/omp.h> int xmain(int argc, const char * argv[]) { short a[60000]; struct timeval startTime, endTime; gettimeofday(&startTime, NULL); for(short j=0;j<15000;j++) { for(short i=-30000;i<30000;i++) { a[i+30000] += i*5; } } gettimeofday(&endTime, NULL); if(endTime.tv_usec/1000 - startTime.tv_usec/1000>0){ printf("end:%ld.%03d\n", endTime.tv_sec - startTime.tv_sec, endTime.tv_usec/1000 - startTime.tv_usec/1000); }else{ printf("end:%ld.%03d\n", endTime.tv_sec - startTime.tv_sec -1, 1000 + endTime.tv_usec/1000 - startTime.tv_usec/1000); } return 0; } // openmpなし 9.338秒 0.016 // 1thread 11.571秒 0.021 // 2thread 5.326秒 0.010 // 4thread 5.019秒 // no simd 6.573 6.883 // simd 6.571 6.608 // declare simd 6.117 6.341 //
// // openmpSample.c // pstock // // Created by takayoshi on 2016/01/21. // Copyright © 2016年 pgostation. All rights reserved. // #include <stdio.h> #include <sys/time.h> #include <libiomp/omp.h> int xmain(int argc, const char * argv[]) { short a[60000]; struct timeval startTime, endTime; #ifdef _OPENMP printf("omp_get_num_procs:%d\n", omp_get_num_procs()); #endif gettimeofday(&startTime, NULL); #pragma omp parallel for(short j=0;j<15000;j++) { #pragma omp for for(short i=-30000;i<30000;i++) { a[i+30000] += i*5; } } gettimeofday(&endTime, NULL); if(endTime.tv_usec/1000 - startTime.tv_usec/1000>0){ printf("end:%ld.%03d\n", endTime.tv_sec - startTime.tv_sec, endTime.tv_usec/1000 - startTime.tv_usec/1000); }else{ printf("end:%ld.%03d\n", endTime.tv_sec - startTime.tv_sec -1, 1000 + endTime.tv_usec/1000 - startTime.tv_usec/1000); } return 0; } // openmpなし 9.338秒 0.016 // 1thread 11.571秒 0.021 // 2thread 5.326秒 0.010 // 4thread 5.019秒 // no simd 6.573 6.883 // simd 6.571 6.608 // declare simd 6.117 6.341 //
GB_binop__times_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_int8) // A.*B function (eWiseMult): GB (_AemultB_08__times_int8) // A.*B function (eWiseMult): GB (_AemultB_02__times_int8) // A.*B function (eWiseMult): GB (_AemultB_04__times_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_int8) // A*D function (colscale): GB (_AxD__times_int8) // D*A function (rowscale): GB (_DxB__times_int8) // C+=B function (dense accum): GB (_Cdense_accumB__times_int8) // C+=b function (dense accum): GB (_Cdense_accumb__times_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_int8) // C=scalar+B GB (_bind1st__times_int8) // C=scalar+B' GB (_bind1st_tran__times_int8) // C=A+scalar GB (_bind2nd__times_int8) // C=A'+scalar GB (_bind2nd_tran__times_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_INT8 || GxB_NO_TIMES_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__times_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__times_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_int8) // A.*B function (eWiseMult): GB (_AemultB_08__times_int8) // A.*B function (eWiseMult): GB (_AemultB_02__times_int8) // A.*B function (eWiseMult): GB (_AemultB_04__times_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_int8) // A*D function (colscale): GB (_AxD__times_int8) // D*A function (rowscale): GB (_DxB__times_int8) // C+=B function (dense accum): GB (_Cdense_accumB__times_int8) // C+=b function (dense accum): GB (_Cdense_accumb__times_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_int8) // C=scalar+B GB (_bind1st__times_int8) // C=scalar+B' GB (_bind1st_tran__times_int8) // C=A+scalar GB (_bind2nd__times_int8) // C=A'+scalar GB (_bind2nd_tran__times_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_INT8 || GxB_NO_TIMES_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__times_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__times_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_int8) // A.*B function (eWiseMult): GB (_AemultB_08__times_int8) // A.*B function (eWiseMult): GB (_AemultB_02__times_int8) // A.*B function (eWiseMult): GB (_AemultB_04__times_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_int8) // A*D function (colscale): GB (_AxD__times_int8) // D*A function (rowscale): GB (_DxB__times_int8) // C+=B function (dense accum): GB (_Cdense_accumB__times_int8) // C+=b function (dense accum): GB (_Cdense_accumb__times_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_int8) // C=scalar+B GB (_bind1st__times_int8) // C=scalar+B' GB (_bind1st_tran__times_int8) // C=A+scalar GB (_bind2nd__times_int8) // C=A'+scalar GB (_bind2nd_tran__times_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_INT8 || GxB_NO_TIMES_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__times_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__times_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_3x3_pack8to4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack8to4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to4, int inch, int outch) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 4b-8a-inch/8a-64-outch/4b kernel_tm_pack8to4.create(2 * inch / 8, 64, outch / 8 + (outch % 8) / 4, (size_t)2u * 32, 32); int p = 0; for (; p + 7 < outch; p += 8) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); const Mat k4 = kernel_tm.channel(p + 4); const Mat k5 = kernel_tm.channel(p + 5); const Mat k6 = kernel_tm.channel(p + 6); const Mat k7 = kernel_tm.channel(p + 7); Mat g0 = kernel_tm_pack8to4.channel(p / 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00[1] = (__fp16)k1.row(q + i)[k]; g00[2] = (__fp16)k2.row(q + i)[k]; g00[3] = (__fp16)k3.row(q + i)[k]; g00[4] = (__fp16)k4.row(q + i)[k]; g00[5] = (__fp16)k5.row(q + i)[k]; g00[6] = (__fp16)k6.row(q + i)[k]; g00[7] = (__fp16)k7.row(q + i)[k]; g00 += 8; } } } } for (; p + 3 < outch; p += 4) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); Mat g0 = kernel_tm_pack8to4.channel(p / 8 + (p % 8) / 4); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00[1] = (__fp16)k1.row(q + i)[k]; g00[2] = (__fp16)k2.row(q + i)[k]; g00[3] = (__fp16)k3.row(q + i)[k]; g00 += 4; } } } } } static void conv3x3s1_winograd64_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const __fp16* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[8][8][8]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { float16x8_t _r00 = vld1q_f16(r0); float16x8_t _r01 = vld1q_f16(r0 + 8); float16x8_t _r02 = vld1q_f16(r0 + 16); float16x8_t _r03 = vld1q_f16(r0 + 24); float16x8_t _r04 = vld1q_f16(r0 + 32); float16x8_t _r05 = vld1q_f16(r0 + 40); float16x8_t _r06 = vld1q_f16(r0 + 48); float16x8_t _r07 = vld1q_f16(r0 + 56); float16x8_t _tmp0m = vfmaq_n_f16(vsubq_f16(_r00, _r06), vsubq_f16(_r04, _r02), 5.25f); float16x8_t _tmp7m = vfmaq_n_f16(vsubq_f16(_r07, _r01), vsubq_f16(_r03, _r05), 5.25f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_r02, _r06), _r04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float16x8_t _tmp1m = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _tmp2m = vsubq_f16(_tmp12a, _tmp12b); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_r06, _r02, 0.25f), _r04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float16x8_t _tmp3m = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _tmp4m = vsubq_f16(_tmp34a, _tmp34b); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_r06, vfmsq_n_f16(_r02, _r04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float16x8_t _tmp5m = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _tmp6m = vsubq_f16(_tmp56a, _tmp56b); vst1q_f16(tmp[5][m], _tmp5m); vst1q_f16(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 8; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * 8; __fp16* r0_tm_1 = r0_tm_0 + tiles * 8; __fp16* r0_tm_2 = r0_tm_0 + tiles * 16; __fp16* r0_tm_3 = r0_tm_0 + tiles * 24; __fp16* r0_tm_4 = r0_tm_0 + tiles * 32; __fp16* r0_tm_5 = r0_tm_0 + tiles * 40; __fp16* r0_tm_6 = r0_tm_0 + tiles * 48; __fp16* r0_tm_7 = r0_tm_0 + tiles * 56; for (int m = 0; m < 8; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp06 = vld1q_f16(tmp[m][6]); float16x8_t _tmp07 = vld1q_f16(tmp[m][7]); float16x8_t _r0tm0 = vfmaq_n_f16(vsubq_f16(_tmp00, _tmp06), vsubq_f16(_tmp04, _tmp02), 5.25f); float16x8_t _r0tm7 = vfmaq_n_f16(vsubq_f16(_tmp07, _tmp01), vsubq_f16(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_tmp02, _tmp06), _tmp04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float16x8_t _r0tm1 = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _r0tm2 = vsubq_f16(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float16x8_t _r0tm3 = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _r0tm4 = vsubq_f16(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_tmp06, vfmsq_n_f16(_tmp02, _tmp04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float16x8_t _r0tm5 = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _r0tm6 = vsubq_f16(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1q_f16(r0_tm_0, _r0tm0); vst1q_f16(r0_tm_1, _r0tm1); vst1q_f16(r0_tm_2, _r0tm2); vst1q_f16(r0_tm_3, _r0tm3); vst1q_f16(r0_tm_4, _r0tm4); vst1q_f16(r0_tm_5, _r0tm5); vst1q_f16(r0_tm_6, _r0tm6); vst1q_f16(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 64; r0_tm_1 += tiles * 64; r0_tm_2 += tiles * 64; r0_tm_3 += tiles * 64; r0_tm_4 += tiles * 64; r0_tm_5 += tiles * 64; r0_tm_6 += tiles * 64; r0_tm_7 += tiles * 64; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tm2p = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u * 4, 4, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; __fp16* output0_tm = top_blob_tm.channel(p); __fp16* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(p / 2); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[5] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v0.h[7] \n" "fmla v24.8h, v17.8h, v1.h[0] \n" "fmla v25.8h, v17.8h, v1.h[1] \n" "fmla v26.8h, v17.8h, v1.h[2] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[4] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[6] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v2.h[0] \n" "fmla v25.8h, v18.8h, v2.h[1] \n" "fmla v26.8h, v18.8h, v2.h[2] \n" "fmla v27.8h, v18.8h, v2.h[3] \n" "fmla v28.8h, v18.8h, v2.h[4] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[6] \n" "fmla v31.8h, v18.8h, v2.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" "fmla v24.8h, v19.8h, v3.h[0] \n" "fmla v25.8h, v19.8h, v3.h[1] \n" "fmla v26.8h, v19.8h, v3.h[2] \n" "fmla v27.8h, v19.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v3.h[4] \n" "fmla v29.8h, v19.8h, v3.h[5] \n" "fmla v30.8h, v19.8h, v3.h[6] \n" "fmla v31.8h, v19.8h, v3.h[7] \n" "fmla v24.8h, v20.8h, v4.h[0] \n" "fmla v25.8h, v20.8h, v4.h[1] \n" "fmla v26.8h, v20.8h, v4.h[2] \n" "fmla v27.8h, v20.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v4.h[5] \n" "fmla v30.8h, v20.8h, v4.h[6] \n" "fmla v31.8h, v20.8h, v4.h[7] \n" "fmla v24.8h, v21.8h, v5.h[0] \n" "fmla v25.8h, v21.8h, v5.h[1] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[3] \n" "fmla v28.8h, v21.8h, v5.h[4] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v5.h[7] \n" "fmla v24.8h, v22.8h, v6.h[0] \n" "fmla v25.8h, v22.8h, v6.h[1] \n" "fmla v26.8h, v22.8h, v6.h[2] \n" "fmla v27.8h, v22.8h, v6.h[3] \n" "fmla v28.8h, v22.8h, v6.h[4] \n" "fmla v29.8h, v22.8h, v6.h[5] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v23.8h, v7.h[0] \n" "fmla v25.8h, v23.8h, v7.h[1] \n" "fmla v26.8h, v23.8h, v7.h[2] \n" "fmla v27.8h, v23.8h, v7.h[3] \n" "fmla v28.8h, v23.8h, v7.h[4] \n" "fmla v29.8h, v23.8h, v7.h[5] \n" "fmla v30.8h, v23.8h, v7.h[6] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "ext v28.16b, v28.16b, v28.16b, #8 \n" "ext v29.16b, v29.16b, v29.16b, #8 \n" "ext v30.16b, v30.16b, v30.16b, #8 \n" "ext v31.16b, v31.16b, v31.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v24.8h, v17.8h, v0.h[4] \n" "fmla v25.8h, v17.8h, v0.h[5] \n" "fmla v26.8h, v17.8h, v0.h[6] \n" "fmla v27.8h, v17.8h, v0.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v1.h[0] \n" "fmla v25.8h, v18.8h, v1.h[1] \n" "fmla v26.8h, v18.8h, v1.h[2] \n" "fmla v27.8h, v18.8h, v1.h[3] \n" "fmla v24.8h, v19.8h, v1.h[4] \n" "fmla v25.8h, v19.8h, v1.h[5] \n" "fmla v26.8h, v19.8h, v1.h[6] \n" "fmla v27.8h, v19.8h, v1.h[7] \n" "fmla v24.8h, v20.8h, v2.h[0] \n" "fmla v25.8h, v20.8h, v2.h[1] \n" "fmla v26.8h, v20.8h, v2.h[2] \n" "fmla v27.8h, v20.8h, v2.h[3] \n" "fmla v24.8h, v21.8h, v2.h[4] \n" "fmla v25.8h, v21.8h, v2.h[5] \n" "fmla v26.8h, v21.8h, v2.h[6] \n" "fmla v27.8h, v21.8h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v22.8h, v3.h[0] \n" "fmla v25.8h, v22.8h, v3.h[1] \n" "fmla v26.8h, v22.8h, v3.h[2] \n" "fmla v27.8h, v22.8h, v3.h[3] \n" "fmla v24.8h, v23.8h, v3.h[4] \n" "fmla v25.8h, v23.8h, v3.h[5] \n" "fmla v26.8h, v23.8h, v3.h[6] \n" "fmla v27.8h, v23.8h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); float16x8_t _sum0 = vdupq_n_f16((__fp16)0.f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x8_t _k0 = vld1q_f16(kptr); float16x8_t _k1 = vld1q_f16(kptr + 8); float16x8_t _k2 = vld1q_f16(kptr + 16); float16x8_t _k3 = vld1q_f16(kptr + 24); float16x8_t _k4 = vld1q_f16(kptr + 32); float16x8_t _k5 = vld1q_f16(kptr + 40); float16x8_t _k6 = vld1q_f16(kptr + 48); float16x8_t _k7 = vld1q_f16(kptr + 56); _sum0 = vfmaq_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfmaq_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfmaq_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfmaq_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfmaq_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfmaq_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfmaq_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfmaq_laneq_f16(_sum0, _k7, _r0, 7); kptr += 64; r0 += 8; } vst1_f16(output0_tm, vget_low_f16(_sum0)); vst1_f16(output1_tm, vget_high_f16(_sum0)); output0_tm += 4; output1_tm += 4; } } } remain_outch_start += nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v28.4h, v16.4h, v0.h[4] \n" "fmla v29.4h, v16.4h, v0.h[5] \n" "fmla v30.4h, v16.4h, v0.h[6] \n" "fmla v31.4h, v16.4h, v0.h[7] \n" "fmla v24.4h, v17.4h, v1.h[0] \n" "fmla v25.4h, v17.4h, v1.h[1] \n" "fmla v26.4h, v17.4h, v1.h[2] \n" "fmla v27.4h, v17.4h, v1.h[3] \n" "fmla v28.4h, v17.4h, v1.h[4] \n" "fmla v29.4h, v17.4h, v1.h[5] \n" "fmla v30.4h, v17.4h, v1.h[6] \n" "fmla v31.4h, v17.4h, v1.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v2.h[0] \n" "fmla v25.4h, v18.4h, v2.h[1] \n" "fmla v26.4h, v18.4h, v2.h[2] \n" "fmla v27.4h, v18.4h, v2.h[3] \n" "fmla v28.4h, v18.4h, v2.h[4] \n" "fmla v29.4h, v18.4h, v2.h[5] \n" "fmla v30.4h, v18.4h, v2.h[6] \n" "fmla v31.4h, v18.4h, v2.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" "fmla v24.4h, v19.4h, v3.h[0] \n" "fmla v25.4h, v19.4h, v3.h[1] \n" "fmla v26.4h, v19.4h, v3.h[2] \n" "fmla v27.4h, v19.4h, v3.h[3] \n" "fmla v28.4h, v19.4h, v3.h[4] \n" "fmla v29.4h, v19.4h, v3.h[5] \n" "fmla v30.4h, v19.4h, v3.h[6] \n" "fmla v31.4h, v19.4h, v3.h[7] \n" "fmla v24.4h, v20.4h, v4.h[0] \n" "fmla v25.4h, v20.4h, v4.h[1] \n" "fmla v26.4h, v20.4h, v4.h[2] \n" "fmla v27.4h, v20.4h, v4.h[3] \n" "fmla v28.4h, v20.4h, v4.h[4] \n" "fmla v29.4h, v20.4h, v4.h[5] \n" "fmla v30.4h, v20.4h, v4.h[6] \n" "fmla v31.4h, v20.4h, v4.h[7] \n" "fmla v24.4h, v21.4h, v5.h[0] \n" "fmla v25.4h, v21.4h, v5.h[1] \n" "fmla v26.4h, v21.4h, v5.h[2] \n" "fmla v27.4h, v21.4h, v5.h[3] \n" "fmla v28.4h, v21.4h, v5.h[4] \n" "fmla v29.4h, v21.4h, v5.h[5] \n" "fmla v30.4h, v21.4h, v5.h[6] \n" "fmla v31.4h, v21.4h, v5.h[7] \n" "fmla v24.4h, v22.4h, v6.h[0] \n" "fmla v25.4h, v22.4h, v6.h[1] \n" "fmla v26.4h, v22.4h, v6.h[2] \n" "fmla v27.4h, v22.4h, v6.h[3] \n" "fmla v28.4h, v22.4h, v6.h[4] \n" "fmla v29.4h, v22.4h, v6.h[5] \n" "fmla v30.4h, v22.4h, v6.h[6] \n" "fmla v31.4h, v22.4h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v23.4h, v7.h[0] \n" "fmla v25.4h, v23.4h, v7.h[1] \n" "fmla v26.4h, v23.4h, v7.h[2] \n" "fmla v27.4h, v23.4h, v7.h[3] \n" "fmla v28.4h, v23.4h, v7.h[4] \n" "fmla v29.4h, v23.4h, v7.h[5] \n" "fmla v30.4h, v23.4h, v7.h[6] \n" "fmla v31.4h, v23.4h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v24.4h, v17.4h, v0.h[4] \n" "fmla v25.4h, v17.4h, v0.h[5] \n" "fmla v26.4h, v17.4h, v0.h[6] \n" "fmla v27.4h, v17.4h, v0.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v1.h[0] \n" "fmla v25.4h, v18.4h, v1.h[1] \n" "fmla v26.4h, v18.4h, v1.h[2] \n" "fmla v27.4h, v18.4h, v1.h[3] \n" "fmla v24.4h, v19.4h, v1.h[4] \n" "fmla v25.4h, v19.4h, v1.h[5] \n" "fmla v26.4h, v19.4h, v1.h[6] \n" "fmla v27.4h, v19.4h, v1.h[7] \n" "fmla v24.4h, v20.4h, v2.h[0] \n" "fmla v25.4h, v20.4h, v2.h[1] \n" "fmla v26.4h, v20.4h, v2.h[2] \n" "fmla v27.4h, v20.4h, v2.h[3] \n" "fmla v24.4h, v21.4h, v2.h[4] \n" "fmla v25.4h, v21.4h, v2.h[5] \n" "fmla v26.4h, v21.4h, v2.h[6] \n" "fmla v27.4h, v21.4h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v22.4h, v3.h[0] \n" "fmla v25.4h, v22.4h, v3.h[1] \n" "fmla v26.4h, v22.4h, v3.h[2] \n" "fmla v27.4h, v22.4h, v3.h[3] \n" "fmla v24.4h, v23.4h, v3.h[4] \n" "fmla v25.4h, v23.4h, v3.h[5] \n" "fmla v26.4h, v23.4h, v3.h[6] \n" "fmla v27.4h, v23.4h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); float16x4_t _sum0 = vdup_n_f16((__fp16)0.f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x4_t _k0 = vld1_f16(kptr); float16x4_t _k1 = vld1_f16(kptr + 4); float16x4_t _k2 = vld1_f16(kptr + 8); float16x4_t _k3 = vld1_f16(kptr + 12); float16x4_t _k4 = vld1_f16(kptr + 16); float16x4_t _k5 = vld1_f16(kptr + 20); float16x4_t _k6 = vld1_f16(kptr + 24); float16x4_t _k7 = vld1_f16(kptr + 28); _sum0 = vfma_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfma_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfma_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfma_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfma_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfma_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfma_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfma_laneq_f16(_sum0, _k7, _r0, 7); kptr += 32; r0 += 8; } vst1_f16(output0_tm, _sum0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u * 4, 4, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; float16x4_t _bias0 = bias ? vld1_f16((const __fp16*)bias + p * 4) : vdup_n_f16(0.f); __fp16 tmp[6][8][4]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * 4; const __fp16* output0_tm_1 = output0_tm_0 + tiles * 4; const __fp16* output0_tm_2 = output0_tm_0 + tiles * 8; const __fp16* output0_tm_3 = output0_tm_0 + tiles * 12; const __fp16* output0_tm_4 = output0_tm_0 + tiles * 16; const __fp16* output0_tm_5 = output0_tm_0 + tiles * 20; const __fp16* output0_tm_6 = output0_tm_0 + tiles * 24; const __fp16* output0_tm_7 = output0_tm_0 + tiles * 28; __fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 4; // TODO neon optimize for (int m = 0; m < 8; m++) { float16x4_t _out0tm0 = vld1_f16(output0_tm_0); float16x4_t _out0tm1 = vld1_f16(output0_tm_1); float16x4_t _out0tm2 = vld1_f16(output0_tm_2); float16x4_t _out0tm3 = vld1_f16(output0_tm_3); float16x4_t _out0tm4 = vld1_f16(output0_tm_4); float16x4_t _out0tm5 = vld1_f16(output0_tm_5); float16x4_t _out0tm6 = vld1_f16(output0_tm_6); float16x4_t _out0tm7 = vld1_f16(output0_tm_7); float16x4_t _tmp024a = vadd_f16(_out0tm1, _out0tm2); float16x4_t _tmp135a = vsub_f16(_out0tm1, _out0tm2); // float tmp024a = output0_tm[1] + output0_tm[2]; // float tmp135a = output0_tm[1] - output0_tm[2]; float16x4_t _tmp024b = vadd_f16(_out0tm3, _out0tm4); float16x4_t _tmp135b = vsub_f16(_out0tm3, _out0tm4); // float tmp024b = output0_tm[3] + output0_tm[4]; // float tmp135b = output0_tm[3] - output0_tm[4]; float16x4_t _tmp024c = vadd_f16(_out0tm5, _out0tm6); float16x4_t _tmp135c = vsub_f16(_out0tm5, _out0tm6); // float tmp024c = output0_tm[5] + output0_tm[6]; // float tmp135c = output0_tm[5] - output0_tm[6]; float16x4_t _tmp0m = vadd_f16(vadd_f16(_out0tm0, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f)); float16x4_t _tmp2m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float16x4_t _tmp4m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1_f16(tmp[0][m], _tmp0m); vst1_f16(tmp[2][m], _tmp2m); vst1_f16(tmp[4][m], _tmp4m); // tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; // tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; // tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; float16x4_t _tmp1m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float16x4_t _tmp3m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float16x4_t _tmp5m = vadd_f16(vadd_f16(_out0tm7, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f)); vst1_f16(tmp[1][m], _tmp1m); vst1_f16(tmp[3][m], _tmp3m); vst1_f16(tmp[5][m], _tmp5m); // tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; // tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; // tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float16x4_t _tmp00 = vld1_f16(tmp[m][0]); float16x4_t _tmp01 = vld1_f16(tmp[m][1]); float16x4_t _tmp02 = vld1_f16(tmp[m][2]); float16x4_t _tmp03 = vld1_f16(tmp[m][3]); float16x4_t _tmp04 = vld1_f16(tmp[m][4]); float16x4_t _tmp05 = vld1_f16(tmp[m][5]); float16x4_t _tmp06 = vld1_f16(tmp[m][6]); float16x4_t _tmp07 = vld1_f16(tmp[m][7]); float16x4_t _tmp024a = vadd_f16(_tmp01, _tmp02); float16x4_t _tmp135a = vsub_f16(_tmp01, _tmp02); // float tmp024a = tmp0[1] + tmp0[2]; // float tmp135a = tmp0[1] - tmp0[2]; float16x4_t _tmp024b = vadd_f16(_tmp03, _tmp04); float16x4_t _tmp135b = vsub_f16(_tmp03, _tmp04); // float tmp024b = tmp0[3] + tmp0[4]; // float tmp135b = tmp0[3] - tmp0[4]; float16x4_t _tmp024c = vadd_f16(_tmp05, _tmp06); float16x4_t _tmp135c = vsub_f16(_tmp05, _tmp06); // float tmp024c = tmp0[5] + tmp0[6]; // float tmp135c = tmp0[5] - tmp0[6]; float16x4_t _out00 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp00, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f))); float16x4_t _out02 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float16x4_t _out04 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1_f16(output0, _out00); vst1_f16(output0 + 8, _out02); vst1_f16(output0 + 16, _out04); // output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; // output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; // output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; float16x4_t _out01 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float16x4_t _out03 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float16x4_t _out05 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp07, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f))); vst1_f16(output0 + 4, _out01); vst1_f16(output0 + 12, _out03); vst1_f16(output0 + 20, _out05); // output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; // output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; // output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack8to4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to4, int inch, int outch) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 4b-8a-inch/8a-64-outch/4b kernel_tm_pack8to4.create(2 * inch / 8, 64, outch / 8 + (outch % 8) / 4, (size_t)2u * 32, 32); int p = 0; for (; p + 7 < outch; p += 8) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); const Mat k4 = kernel_tm.channel(p + 4); const Mat k5 = kernel_tm.channel(p + 5); const Mat k6 = kernel_tm.channel(p + 6); const Mat k7 = kernel_tm.channel(p + 7); Mat g0 = kernel_tm_pack8to4.channel(p / 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00[1] = (__fp16)k1.row(q + i)[k]; g00[2] = (__fp16)k2.row(q + i)[k]; g00[3] = (__fp16)k3.row(q + i)[k]; g00[4] = (__fp16)k4.row(q + i)[k]; g00[5] = (__fp16)k5.row(q + i)[k]; g00[6] = (__fp16)k6.row(q + i)[k]; g00[7] = (__fp16)k7.row(q + i)[k]; g00 += 8; } } } } for (; p + 3 < outch; p += 4) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); Mat g0 = kernel_tm_pack8to4.channel(p / 8 + (p % 8) / 4); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00[1] = (__fp16)k1.row(q + i)[k]; g00[2] = (__fp16)k2.row(q + i)[k]; g00[3] = (__fp16)k3.row(q + i)[k]; g00 += 4; } } } } } static void conv3x3s1_winograd64_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const __fp16* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[8][8][8]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { float16x8_t _r00 = vld1q_f16(r0); float16x8_t _r01 = vld1q_f16(r0 + 8); float16x8_t _r02 = vld1q_f16(r0 + 16); float16x8_t _r03 = vld1q_f16(r0 + 24); float16x8_t _r04 = vld1q_f16(r0 + 32); float16x8_t _r05 = vld1q_f16(r0 + 40); float16x8_t _r06 = vld1q_f16(r0 + 48); float16x8_t _r07 = vld1q_f16(r0 + 56); float16x8_t _tmp0m = vfmaq_n_f16(vsubq_f16(_r00, _r06), vsubq_f16(_r04, _r02), 5.25f); float16x8_t _tmp7m = vfmaq_n_f16(vsubq_f16(_r07, _r01), vsubq_f16(_r03, _r05), 5.25f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_r02, _r06), _r04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float16x8_t _tmp1m = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _tmp2m = vsubq_f16(_tmp12a, _tmp12b); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_r06, _r02, 0.25f), _r04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float16x8_t _tmp3m = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _tmp4m = vsubq_f16(_tmp34a, _tmp34b); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_r06, vfmsq_n_f16(_r02, _r04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float16x8_t _tmp5m = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _tmp6m = vsubq_f16(_tmp56a, _tmp56b); vst1q_f16(tmp[5][m], _tmp5m); vst1q_f16(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 8; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * 8; __fp16* r0_tm_1 = r0_tm_0 + tiles * 8; __fp16* r0_tm_2 = r0_tm_0 + tiles * 16; __fp16* r0_tm_3 = r0_tm_0 + tiles * 24; __fp16* r0_tm_4 = r0_tm_0 + tiles * 32; __fp16* r0_tm_5 = r0_tm_0 + tiles * 40; __fp16* r0_tm_6 = r0_tm_0 + tiles * 48; __fp16* r0_tm_7 = r0_tm_0 + tiles * 56; for (int m = 0; m < 8; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp06 = vld1q_f16(tmp[m][6]); float16x8_t _tmp07 = vld1q_f16(tmp[m][7]); float16x8_t _r0tm0 = vfmaq_n_f16(vsubq_f16(_tmp00, _tmp06), vsubq_f16(_tmp04, _tmp02), 5.25f); float16x8_t _r0tm7 = vfmaq_n_f16(vsubq_f16(_tmp07, _tmp01), vsubq_f16(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_tmp02, _tmp06), _tmp04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float16x8_t _r0tm1 = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _r0tm2 = vsubq_f16(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float16x8_t _r0tm3 = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _r0tm4 = vsubq_f16(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_tmp06, vfmsq_n_f16(_tmp02, _tmp04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float16x8_t _r0tm5 = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _r0tm6 = vsubq_f16(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1q_f16(r0_tm_0, _r0tm0); vst1q_f16(r0_tm_1, _r0tm1); vst1q_f16(r0_tm_2, _r0tm2); vst1q_f16(r0_tm_3, _r0tm3); vst1q_f16(r0_tm_4, _r0tm4); vst1q_f16(r0_tm_5, _r0tm5); vst1q_f16(r0_tm_6, _r0tm6); vst1q_f16(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 64; r0_tm_1 += tiles * 64; r0_tm_2 += tiles * 64; r0_tm_3 += tiles * 64; r0_tm_4 += tiles * 64; r0_tm_5 += tiles * 64; r0_tm_6 += tiles * 64; r0_tm_7 += tiles * 64; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tm2p = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u * 4, 4, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; __fp16* output0_tm = top_blob_tm.channel(p); __fp16* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(p / 2); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[5] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v0.h[7] \n" "fmla v24.8h, v17.8h, v1.h[0] \n" "fmla v25.8h, v17.8h, v1.h[1] \n" "fmla v26.8h, v17.8h, v1.h[2] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[4] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[6] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v2.h[0] \n" "fmla v25.8h, v18.8h, v2.h[1] \n" "fmla v26.8h, v18.8h, v2.h[2] \n" "fmla v27.8h, v18.8h, v2.h[3] \n" "fmla v28.8h, v18.8h, v2.h[4] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[6] \n" "fmla v31.8h, v18.8h, v2.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" "fmla v24.8h, v19.8h, v3.h[0] \n" "fmla v25.8h, v19.8h, v3.h[1] \n" "fmla v26.8h, v19.8h, v3.h[2] \n" "fmla v27.8h, v19.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v3.h[4] \n" "fmla v29.8h, v19.8h, v3.h[5] \n" "fmla v30.8h, v19.8h, v3.h[6] \n" "fmla v31.8h, v19.8h, v3.h[7] \n" "fmla v24.8h, v20.8h, v4.h[0] \n" "fmla v25.8h, v20.8h, v4.h[1] \n" "fmla v26.8h, v20.8h, v4.h[2] \n" "fmla v27.8h, v20.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v4.h[5] \n" "fmla v30.8h, v20.8h, v4.h[6] \n" "fmla v31.8h, v20.8h, v4.h[7] \n" "fmla v24.8h, v21.8h, v5.h[0] \n" "fmla v25.8h, v21.8h, v5.h[1] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[3] \n" "fmla v28.8h, v21.8h, v5.h[4] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v5.h[7] \n" "fmla v24.8h, v22.8h, v6.h[0] \n" "fmla v25.8h, v22.8h, v6.h[1] \n" "fmla v26.8h, v22.8h, v6.h[2] \n" "fmla v27.8h, v22.8h, v6.h[3] \n" "fmla v28.8h, v22.8h, v6.h[4] \n" "fmla v29.8h, v22.8h, v6.h[5] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v23.8h, v7.h[0] \n" "fmla v25.8h, v23.8h, v7.h[1] \n" "fmla v26.8h, v23.8h, v7.h[2] \n" "fmla v27.8h, v23.8h, v7.h[3] \n" "fmla v28.8h, v23.8h, v7.h[4] \n" "fmla v29.8h, v23.8h, v7.h[5] \n" "fmla v30.8h, v23.8h, v7.h[6] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "ext v28.16b, v28.16b, v28.16b, #8 \n" "ext v29.16b, v29.16b, v29.16b, #8 \n" "ext v30.16b, v30.16b, v30.16b, #8 \n" "ext v31.16b, v31.16b, v31.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v24.8h, v17.8h, v0.h[4] \n" "fmla v25.8h, v17.8h, v0.h[5] \n" "fmla v26.8h, v17.8h, v0.h[6] \n" "fmla v27.8h, v17.8h, v0.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v1.h[0] \n" "fmla v25.8h, v18.8h, v1.h[1] \n" "fmla v26.8h, v18.8h, v1.h[2] \n" "fmla v27.8h, v18.8h, v1.h[3] \n" "fmla v24.8h, v19.8h, v1.h[4] \n" "fmla v25.8h, v19.8h, v1.h[5] \n" "fmla v26.8h, v19.8h, v1.h[6] \n" "fmla v27.8h, v19.8h, v1.h[7] \n" "fmla v24.8h, v20.8h, v2.h[0] \n" "fmla v25.8h, v20.8h, v2.h[1] \n" "fmla v26.8h, v20.8h, v2.h[2] \n" "fmla v27.8h, v20.8h, v2.h[3] \n" "fmla v24.8h, v21.8h, v2.h[4] \n" "fmla v25.8h, v21.8h, v2.h[5] \n" "fmla v26.8h, v21.8h, v2.h[6] \n" "fmla v27.8h, v21.8h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v22.8h, v3.h[0] \n" "fmla v25.8h, v22.8h, v3.h[1] \n" "fmla v26.8h, v22.8h, v3.h[2] \n" "fmla v27.8h, v22.8h, v3.h[3] \n" "fmla v24.8h, v23.8h, v3.h[4] \n" "fmla v25.8h, v23.8h, v3.h[5] \n" "fmla v26.8h, v23.8h, v3.h[6] \n" "fmla v27.8h, v23.8h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); float16x8_t _sum0 = vdupq_n_f16((__fp16)0.f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x8_t _k0 = vld1q_f16(kptr); float16x8_t _k1 = vld1q_f16(kptr + 8); float16x8_t _k2 = vld1q_f16(kptr + 16); float16x8_t _k3 = vld1q_f16(kptr + 24); float16x8_t _k4 = vld1q_f16(kptr + 32); float16x8_t _k5 = vld1q_f16(kptr + 40); float16x8_t _k6 = vld1q_f16(kptr + 48); float16x8_t _k7 = vld1q_f16(kptr + 56); _sum0 = vfmaq_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfmaq_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfmaq_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfmaq_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfmaq_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfmaq_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfmaq_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfmaq_laneq_f16(_sum0, _k7, _r0, 7); kptr += 64; r0 += 8; } vst1_f16(output0_tm, vget_low_f16(_sum0)); vst1_f16(output1_tm, vget_high_f16(_sum0)); output0_tm += 4; output1_tm += 4; } } } remain_outch_start += nn_outch << 1; for (int p = remain_outch_start; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v28.4h, v16.4h, v0.h[4] \n" "fmla v29.4h, v16.4h, v0.h[5] \n" "fmla v30.4h, v16.4h, v0.h[6] \n" "fmla v31.4h, v16.4h, v0.h[7] \n" "fmla v24.4h, v17.4h, v1.h[0] \n" "fmla v25.4h, v17.4h, v1.h[1] \n" "fmla v26.4h, v17.4h, v1.h[2] \n" "fmla v27.4h, v17.4h, v1.h[3] \n" "fmla v28.4h, v17.4h, v1.h[4] \n" "fmla v29.4h, v17.4h, v1.h[5] \n" "fmla v30.4h, v17.4h, v1.h[6] \n" "fmla v31.4h, v17.4h, v1.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v2.h[0] \n" "fmla v25.4h, v18.4h, v2.h[1] \n" "fmla v26.4h, v18.4h, v2.h[2] \n" "fmla v27.4h, v18.4h, v2.h[3] \n" "fmla v28.4h, v18.4h, v2.h[4] \n" "fmla v29.4h, v18.4h, v2.h[5] \n" "fmla v30.4h, v18.4h, v2.h[6] \n" "fmla v31.4h, v18.4h, v2.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" "fmla v24.4h, v19.4h, v3.h[0] \n" "fmla v25.4h, v19.4h, v3.h[1] \n" "fmla v26.4h, v19.4h, v3.h[2] \n" "fmla v27.4h, v19.4h, v3.h[3] \n" "fmla v28.4h, v19.4h, v3.h[4] \n" "fmla v29.4h, v19.4h, v3.h[5] \n" "fmla v30.4h, v19.4h, v3.h[6] \n" "fmla v31.4h, v19.4h, v3.h[7] \n" "fmla v24.4h, v20.4h, v4.h[0] \n" "fmla v25.4h, v20.4h, v4.h[1] \n" "fmla v26.4h, v20.4h, v4.h[2] \n" "fmla v27.4h, v20.4h, v4.h[3] \n" "fmla v28.4h, v20.4h, v4.h[4] \n" "fmla v29.4h, v20.4h, v4.h[5] \n" "fmla v30.4h, v20.4h, v4.h[6] \n" "fmla v31.4h, v20.4h, v4.h[7] \n" "fmla v24.4h, v21.4h, v5.h[0] \n" "fmla v25.4h, v21.4h, v5.h[1] \n" "fmla v26.4h, v21.4h, v5.h[2] \n" "fmla v27.4h, v21.4h, v5.h[3] \n" "fmla v28.4h, v21.4h, v5.h[4] \n" "fmla v29.4h, v21.4h, v5.h[5] \n" "fmla v30.4h, v21.4h, v5.h[6] \n" "fmla v31.4h, v21.4h, v5.h[7] \n" "fmla v24.4h, v22.4h, v6.h[0] \n" "fmla v25.4h, v22.4h, v6.h[1] \n" "fmla v26.4h, v22.4h, v6.h[2] \n" "fmla v27.4h, v22.4h, v6.h[3] \n" "fmla v28.4h, v22.4h, v6.h[4] \n" "fmla v29.4h, v22.4h, v6.h[5] \n" "fmla v30.4h, v22.4h, v6.h[6] \n" "fmla v31.4h, v22.4h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v23.4h, v7.h[0] \n" "fmla v25.4h, v23.4h, v7.h[1] \n" "fmla v26.4h, v23.4h, v7.h[2] \n" "fmla v27.4h, v23.4h, v7.h[3] \n" "fmla v28.4h, v23.4h, v7.h[4] \n" "fmla v29.4h, v23.4h, v7.h[5] \n" "fmla v30.4h, v23.4h, v7.h[6] \n" "fmla v31.4h, v23.4h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v24.4h, v17.4h, v0.h[4] \n" "fmla v25.4h, v17.4h, v0.h[5] \n" "fmla v26.4h, v17.4h, v0.h[6] \n" "fmla v27.4h, v17.4h, v0.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v1.h[0] \n" "fmla v25.4h, v18.4h, v1.h[1] \n" "fmla v26.4h, v18.4h, v1.h[2] \n" "fmla v27.4h, v18.4h, v1.h[3] \n" "fmla v24.4h, v19.4h, v1.h[4] \n" "fmla v25.4h, v19.4h, v1.h[5] \n" "fmla v26.4h, v19.4h, v1.h[6] \n" "fmla v27.4h, v19.4h, v1.h[7] \n" "fmla v24.4h, v20.4h, v2.h[0] \n" "fmla v25.4h, v20.4h, v2.h[1] \n" "fmla v26.4h, v20.4h, v2.h[2] \n" "fmla v27.4h, v20.4h, v2.h[3] \n" "fmla v24.4h, v21.4h, v2.h[4] \n" "fmla v25.4h, v21.4h, v2.h[5] \n" "fmla v26.4h, v21.4h, v2.h[6] \n" "fmla v27.4h, v21.4h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v22.4h, v3.h[0] \n" "fmla v25.4h, v22.4h, v3.h[1] \n" "fmla v26.4h, v22.4h, v3.h[2] \n" "fmla v27.4h, v22.4h, v3.h[3] \n" "fmla v24.4h, v23.4h, v3.h[4] \n" "fmla v25.4h, v23.4h, v3.h[5] \n" "fmla v26.4h, v23.4h, v3.h[6] \n" "fmla v27.4h, v23.4h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); float16x4_t _sum0 = vdup_n_f16((__fp16)0.f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x4_t _k0 = vld1_f16(kptr); float16x4_t _k1 = vld1_f16(kptr + 4); float16x4_t _k2 = vld1_f16(kptr + 8); float16x4_t _k3 = vld1_f16(kptr + 12); float16x4_t _k4 = vld1_f16(kptr + 16); float16x4_t _k5 = vld1_f16(kptr + 20); float16x4_t _k6 = vld1_f16(kptr + 24); float16x4_t _k7 = vld1_f16(kptr + 28); _sum0 = vfma_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfma_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfma_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfma_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfma_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfma_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfma_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfma_laneq_f16(_sum0, _k7, _r0, 7); kptr += 32; r0 += 8; } vst1_f16(output0_tm, _sum0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u * 4, 4, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; float16x4_t _bias0 = bias ? vld1_f16((const __fp16*)bias + p * 4) : vdup_n_f16(0.f); __fp16 tmp[6][8][4]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * 4; const __fp16* output0_tm_1 = output0_tm_0 + tiles * 4; const __fp16* output0_tm_2 = output0_tm_0 + tiles * 8; const __fp16* output0_tm_3 = output0_tm_0 + tiles * 12; const __fp16* output0_tm_4 = output0_tm_0 + tiles * 16; const __fp16* output0_tm_5 = output0_tm_0 + tiles * 20; const __fp16* output0_tm_6 = output0_tm_0 + tiles * 24; const __fp16* output0_tm_7 = output0_tm_0 + tiles * 28; __fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 4; // TODO neon optimize for (int m = 0; m < 8; m++) { float16x4_t _out0tm0 = vld1_f16(output0_tm_0); float16x4_t _out0tm1 = vld1_f16(output0_tm_1); float16x4_t _out0tm2 = vld1_f16(output0_tm_2); float16x4_t _out0tm3 = vld1_f16(output0_tm_3); float16x4_t _out0tm4 = vld1_f16(output0_tm_4); float16x4_t _out0tm5 = vld1_f16(output0_tm_5); float16x4_t _out0tm6 = vld1_f16(output0_tm_6); float16x4_t _out0tm7 = vld1_f16(output0_tm_7); float16x4_t _tmp024a = vadd_f16(_out0tm1, _out0tm2); float16x4_t _tmp135a = vsub_f16(_out0tm1, _out0tm2); // float tmp024a = output0_tm[1] + output0_tm[2]; // float tmp135a = output0_tm[1] - output0_tm[2]; float16x4_t _tmp024b = vadd_f16(_out0tm3, _out0tm4); float16x4_t _tmp135b = vsub_f16(_out0tm3, _out0tm4); // float tmp024b = output0_tm[3] + output0_tm[4]; // float tmp135b = output0_tm[3] - output0_tm[4]; float16x4_t _tmp024c = vadd_f16(_out0tm5, _out0tm6); float16x4_t _tmp135c = vsub_f16(_out0tm5, _out0tm6); // float tmp024c = output0_tm[5] + output0_tm[6]; // float tmp135c = output0_tm[5] - output0_tm[6]; float16x4_t _tmp0m = vadd_f16(vadd_f16(_out0tm0, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f)); float16x4_t _tmp2m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float16x4_t _tmp4m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1_f16(tmp[0][m], _tmp0m); vst1_f16(tmp[2][m], _tmp2m); vst1_f16(tmp[4][m], _tmp4m); // tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; // tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; // tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; float16x4_t _tmp1m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float16x4_t _tmp3m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float16x4_t _tmp5m = vadd_f16(vadd_f16(_out0tm7, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f)); vst1_f16(tmp[1][m], _tmp1m); vst1_f16(tmp[3][m], _tmp3m); vst1_f16(tmp[5][m], _tmp5m); // tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; // tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; // tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float16x4_t _tmp00 = vld1_f16(tmp[m][0]); float16x4_t _tmp01 = vld1_f16(tmp[m][1]); float16x4_t _tmp02 = vld1_f16(tmp[m][2]); float16x4_t _tmp03 = vld1_f16(tmp[m][3]); float16x4_t _tmp04 = vld1_f16(tmp[m][4]); float16x4_t _tmp05 = vld1_f16(tmp[m][5]); float16x4_t _tmp06 = vld1_f16(tmp[m][6]); float16x4_t _tmp07 = vld1_f16(tmp[m][7]); float16x4_t _tmp024a = vadd_f16(_tmp01, _tmp02); float16x4_t _tmp135a = vsub_f16(_tmp01, _tmp02); // float tmp024a = tmp0[1] + tmp0[2]; // float tmp135a = tmp0[1] - tmp0[2]; float16x4_t _tmp024b = vadd_f16(_tmp03, _tmp04); float16x4_t _tmp135b = vsub_f16(_tmp03, _tmp04); // float tmp024b = tmp0[3] + tmp0[4]; // float tmp135b = tmp0[3] - tmp0[4]; float16x4_t _tmp024c = vadd_f16(_tmp05, _tmp06); float16x4_t _tmp135c = vsub_f16(_tmp05, _tmp06); // float tmp024c = tmp0[5] + tmp0[6]; // float tmp135c = tmp0[5] - tmp0[6]; float16x4_t _out00 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp00, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f))); float16x4_t _out02 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float16x4_t _out04 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1_f16(output0, _out00); vst1_f16(output0 + 8, _out02); vst1_f16(output0 + 16, _out04); // output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; // output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; // output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; float16x4_t _out01 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float16x4_t _out03 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float16x4_t _out05 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp07, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f))); vst1_f16(output0 + 4, _out01); vst1_f16(output0 + 12, _out03); vst1_f16(output0 + 20, _out05); // output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; // output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; // output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "base.h" #include "thd_info.h" #include "util.h" /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ val_t rand_val(void) { /* TODO: modify this to work based on the size of idx_t */ val_t v = 3.0 * ((val_t) rand() / (val_t) RAND_MAX); if (rand() % 2 == 0) { v *= -1; } return v; } idx_t rand_idx(void) { /* TODO: modify this to work based on the size of idx_t */ return (idx_t) (rand() << 16) | rand(); } void fill_rand( val_t * const restrict vals, idx_t const nelems) { for (idx_t i = 0; i < nelems; ++i) { vals[i] = rand_val(); } } char * bytes_str( size_t const bytes) { double size = (double)bytes; int suff = 0; const char *suffix[5] = {"B", "KB", "MB", "GB", "TB"}; while (size > 1024 && suff < 5) { size /= 1024.; ++suff; } char *ret = NULL; if (asprintf(&ret, "%0.2f%s", size, suffix[suff]) == -1) { fprintf(stderr, "SPLATT: asprintf failed with %zu bytes.\n", bytes); ret = NULL; } return ret; } idx_t argmax_elem( idx_t const *const arr, idx_t const N) { idx_t mkr = 0; for (idx_t i = 1; i < N; ++i) { if (arr[i] > arr[mkr]) { mkr = i; } } return mkr; } idx_t argmin_elem( idx_t const *const arr, idx_t const N) { idx_t mkr = 0; for (idx_t i = 1; i < N; ++i) { if (arr[i] < arr[mkr]) { mkr = i; } } return mkr; } int * get_primes( int N, int *nprimes) { int size = 10; int *p = (int *)splatt_malloc(size * sizeof(int)); int np = 0; while (N != 1) { int i; for (i = 2; i <= N; ++i) { if (N % i == 0) { /* found the next prime */ break; } } /* realloc if necessary */ if (size == np) { p = (int *)realloc(p, size * 2 * sizeof(int)); } p[np++] = i; N /= i; } *nprimes = np; return p; } void par_memcpy( void *const restrict dst, void const *const restrict src, size_t const bytes) { int nthreads = splatt_omp_get_num_threads(); int tid = splatt_omp_get_thread_num(); size_t n_per_thread = (bytes + nthreads - 1) / nthreads; size_t n_begin = SS_MIN(n_per_thread * tid, bytes); size_t n_end = SS_MIN(n_begin + n_per_thread, bytes); memcpy((char *)dst + n_begin, (char *)src + n_begin, n_end - n_begin); }
XT_OffsetError.c
#include <stdio.h> #include "nrutil.h" #include "XT_Constants.h" #include "XT_Structures.h" #include <mpi.h> #include <math.h> #include "XT_IOMisc.h" #include "invert.h" #include "allocate.h" void gen_offset_constraint_windows (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr) { int32_t r_size, t_size, num = 0, i, j, k, l, dim[4], N_t_node, N_r, N_t, node_rank, node_num, node_idx, k_idx, l_idx; char constraint_file[100] = "proj_constraint"; node_rank = TomoInputsPtr->node_rank; node_num = TomoInputsPtr->node_num; N_r = SinogramPtr->N_r; N_t_node = SinogramPtr->N_t; N_t = N_t_node*node_num; r_size = 2*N_r/((int32_t)(sqrt(N_r) + 0.5)); t_size = 2*N_t/((int32_t)(sqrt(N_t) + 0.5)); for (i = 0; i <= N_r - r_size/2; i = i + r_size/2) for (j = 0; j <= N_t - t_size/2; j = j + t_size/2) num++; SinogramPtr->off_constraint = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, num, N_r, N_t_node); memset(&(SinogramPtr->off_constraint[0][0][0]), 0, num*N_r*N_t_node*sizeof(Real_arr_t)); for (num = 0, i = 0; i <= N_r - r_size/2; i = i + r_size/2) for (j = 0; j <= N_t - t_size/2; j = j + t_size/2) { for (k = i; k < i + r_size; k++) for (l = j; l < j + t_size; l++) { node_idx = node_rank*N_t_node; k_idx = k % N_r; l_idx = l % N_t; if (l_idx >= node_idx && l_idx < node_idx + N_t_node) { SinogramPtr->off_constraint[num][k_idx][l_idx-node_idx] = (k-i) < r_size/2 ? (k-i+1): r_size-(k-i); SinogramPtr->off_constraint[num][k_idx][l_idx-node_idx] *= (l-j) < t_size/2 ? (l-j+1): t_size-(l-j); } } num++; } SinogramPtr->off_constraint_num = num; dim[0] = 1; dim[1] = num; dim[2] = SinogramPtr->N_r; dim[3] = SinogramPtr->N_t; sprintf(constraint_file, "%s_n%d", constraint_file, node_rank); if (TomoInputsPtr->Write2Tiff == 1) WriteMultiDimArray2Tiff (constraint_file, dim, 0, 1, 2, 3, &(SinogramPtr->off_constraint[0][0][0]), 0, 0, 1, TomoInputsPtr->debug_file_ptr); fprintf(TomoInputsPtr->debug_file_ptr, "gen_offset_constraint_windows: r_size = %d, t_size = %d, number of constraints = %d\n", r_size, t_size, SinogramPtr->off_constraint_num); /* SinogramPtr->off_constraint_size = SinogramPtr->N_r; SinogramPtr->off_constraint = (Real_t**)multialloc(sizeof(Real_t), 2, 1, SinogramPtr->N_r); for (j = 0; j < SinogramPtr->N_r; j++) SinogramPtr->off_constraint[0][j] = 1; SinogramPtr->off_constraint_num = 1;*/ } void constrained_quad_opt (Real_t** Lambda, Real_t** b, Real_arr_t*** A, Real_arr_t** x, int32_t Nr, int32_t Nt, int32_t M, TomoInputs* TomoInputsPtr) { Real_t **D, **Dinv; Real_t *temp, *temp2; int32_t i, j, k, l; D = (Real_t**)multialloc(sizeof(Real_t), 2, M, M); Dinv = (Real_t**)multialloc(sizeof(Real_t), 2, M, M); temp = (Real_t*)get_spc(M, sizeof(Real_t)); temp2 = (Real_t*)get_spc(M, sizeof(Real_t)); memset(&(D[0][0]), 0, M*M*sizeof(Real_t)); memset(&(Dinv[0][0]), 0, M*M*sizeof(Real_t)); #pragma omp parallel for collapse(2) private(k, l) for (i = 0; i < M; i++) for (j = 0; j < M; j++) for (k = 0; k < Nr; k++) for (l = 0; l < Nt; l++) { D[i][j] += A[i][k][l]*A[j][k][l]/Lambda[k][l]; /*sum += A[i][k]*A[j][k]/Lambda[k];*/ } /* TomoInputsPtr->t0_mpired1 = time(NULL); */ MPI_Allreduce(&(D[0][0]), &(Dinv[0][0]), M*M, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); /* TomoInputsPtr->time_mpired1 += difftime(time(NULL), TomoInputsPtr->t0_mpired1);*/ /* printf("Checksum is %f\n", sum);*/ invert2(Dinv, M); #pragma omp parallel for private(j, k) for (i = 0; i < M; i++) { temp[i] = 0; for (j = 0; j < Nr; j++) for (k = 0; k < Nt; k++) temp[i] += A[i][j][k]*b[j][k]/Lambda[j][k]; } /* TomoInputsPtr->t0_mpired1 = time(NULL);*/ MPI_Allreduce(&(temp[0]), &(temp2[0]), M, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); /* TomoInputsPtr->time_mpired1 += difftime(time(NULL), TomoInputsPtr->t0_mpired1);*/ #pragma omp parallel for private(j) for (i = 0; i < M; i++) { temp[i] = 0; for (j = 0; j < M; j++) temp[i] += Dinv[i][j]*temp2[j]; } /* TomoInputsPtr->t0_mpired1 = time(NULL); MPI_Allreduce(&(temp[0]), &(temp2[0]), M, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); TomoInputsPtr->time_mpired1 += difftime(time(NULL), TomoInputsPtr->t0_mpired1);*/ #pragma omp parallel for collapse(2) private(k) for (i = 0; i < Nr; i++) for (j = 0; j < Nt; j++) { x[i][j] = 0; for (k = 0; k < M; k++) x[i][j] += A[k][i][j]*temp[k]; } #pragma omp parallel for collapse(2) for (i = 0; i < Nr; i++) for (j = 0; j < Nt; j++) x[i][j] = (b[i][j] - x[i][j])/Lambda[i][j]; free(temp); free(temp2); multifree(D, 2); multifree(Dinv, 2); } void compute_d_constraint (Real_arr_t*** A, Real_arr_t **d, int32_t Nr, int32_t Nt, int32_t M, FILE* debug_file_ptr) { int32_t i, j, k; Real_t *temp, *val; temp = (Real_t*)get_spc(M, sizeof(Real_t)); val = (Real_t*)get_spc(M, sizeof(Real_t)); #pragma omp parallel for private(j, k) for (i = 0; i < M; i++) { temp[i] = 0; for (j = 0; j < Nr; j++) for (k = 0; k < Nt; k++) temp[i] += A[i][j][k]*d[j][k]; } MPI_Allreduce(&(temp[0]), &(val[0]), M, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); for (i = 0; i < M; i++) fprintf(debug_file_ptr, "compute_d_constraint: The i th constraint on offset error is %f\n", val[i]); free(temp); free(val); }
#include <stdio.h> #include "nrutil.h" #include "XT_Constants.h" #include "XT_Structures.h" #include <mpi.h> #include <math.h> #include "XT_IOMisc.h" #include "invert.h" #include "allocate.h" void gen_offset_constraint_windows(Sinogram * SinogramPtr, TomoInputs * TomoInputsPtr) { int32_t r_size, t_size, num = 0, i, j, k, l, dim[4], N_t_node, N_r, N_t, node_rank, node_num, node_idx, k_idx, l_idx; char constraint_file[100] = "proj_constraint"; node_rank = TomoInputsPtr->node_rank; node_num = TomoInputsPtr->node_num; N_r = SinogramPtr->N_r; N_t_node = SinogramPtr->N_t; N_t = N_t_node * node_num; r_size = 2 * N_r / ((int32_t) (sqrt(N_r) + 0.5)); t_size = 2 * N_t / ((int32_t) (sqrt(N_t) + 0.5)); for (i = 0; i <= N_r - r_size / 2; i = i + r_size / 2) for (j = 0; j <= N_t - t_size / 2; j = j + t_size / 2) num++; SinogramPtr->off_constraint = (Real_arr_t ***) multialloc(sizeof(Real_arr_t), 3, num, N_r, N_t_node); memset(&(SinogramPtr->off_constraint[0][0][0]), 0, num * N_r * N_t_node * sizeof(Real_arr_t)); for (num = 0, i = 0; i <= N_r - r_size / 2; i = i + r_size / 2) for (j = 0; j <= N_t - t_size / 2; j = j + t_size / 2) { for (k = i; k < i + r_size; k++) for (l = j; l < j + t_size; l++) { node_idx = node_rank * N_t_node; k_idx = k % N_r; l_idx = l % N_t; if (l_idx >= node_idx && l_idx < node_idx + N_t_node) { SinogramPtr->off_constraint[num][k_idx][l_idx - node_idx] = (k - i) < r_size / 2 ? (k - i + 1) : r_size - (k - i); SinogramPtr->off_constraint[num][k_idx][l_idx - node_idx] *= (l - j) < t_size / 2 ? (l - j + 1) : t_size - (l - j); } } num++; } SinogramPtr->off_constraint_num = num; dim[0] = 1; dim[1] = num; dim[2] = SinogramPtr->N_r; dim[3] = SinogramPtr->N_t; sprintf(constraint_file, "%s_n%d", constraint_file, node_rank); if (TomoInputsPtr->Write2Tiff == 1) WriteMultiDimArray2Tiff(constraint_file, dim, 0, 1, 2, 3, &(SinogramPtr->off_constraint[0][0][0]), 0, 0, 1, TomoInputsPtr->debug_file_ptr); fprintf(TomoInputsPtr->debug_file_ptr, "gen_offset_constraint_windows: r_size = %d, t_size = %d, number of constraints = %d\n", r_size, t_size, SinogramPtr->off_constraint_num); /* * SinogramPtr->off_constraint_size = SinogramPtr->N_r; * SinogramPtr->off_constraint = (Real_t**)multialloc(sizeof(Real_t), 2, * 1, SinogramPtr->N_r); for (j = 0; j < SinogramPtr->N_r; j++) * SinogramPtr->off_constraint[0][j] = 1; SinogramPtr->off_constraint_num * = 1; */ } void constrained_quad_opt(Real_t ** Lambda, Real_t ** b, Real_arr_t *** A, Real_arr_t ** x, int32_t Nr, int32_t Nt, int32_t M, TomoInputs * TomoInputsPtr) { Real_t **D, **Dinv; Real_t *temp, *temp2; int32_t i, j, k, l; D = (Real_t **) multialloc(sizeof(Real_t), 2, M, M); Dinv = (Real_t **) multialloc(sizeof(Real_t), 2, M, M); temp = (Real_t *) get_spc(M, sizeof(Real_t)); temp2 = (Real_t *) get_spc(M, sizeof(Real_t)); memset(&(D[0][0]), 0, M * M * sizeof(Real_t)); memset(&(Dinv[0][0]), 0, M * M * sizeof(Real_t)); for (i = 0; i < M; i++) for (j = 0; j < M; j++) for (k = 0; k < Nr; k++) for (l = 0; l < Nt; l++) { D[i][j] += A[i][k][l] * A[j][k][l] / Lambda[k][l]; /* sum += A[i][k]*A[j][k]/Lambda[k]; */ } /* TomoInputsPtr->t0_mpired1 = time(NULL); */ MPI_Allreduce(&(D[0][0]), &(Dinv[0][0]), M * M, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); /* * TomoInputsPtr->time_mpired1 += difftime(time(NULL), * TomoInputsPtr->t0_mpired1); */ /* printf("Checksum is %f\n", sum); */ invert2(Dinv, M); for (i = 0; i < M; i++) { temp[i] = 0; for (j = 0; j < Nr; j++) for (k = 0; k < Nt; k++) temp[i] += A[i][j][k] * b[j][k] / Lambda[j][k]; } /* TomoInputsPtr->t0_mpired1 = time(NULL); */ MPI_Allreduce(&(temp[0]), &(temp2[0]), M, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); /* * TomoInputsPtr->time_mpired1 += difftime(time(NULL), * TomoInputsPtr->t0_mpired1); */ for (i = 0; i < M; i++) { temp[i] = 0; for (j = 0; j < M; j++) temp[i] += Dinv[i][j] * temp2[j]; } /* * TomoInputsPtr->t0_mpired1 = time(NULL); MPI_Allreduce(&(temp[0]), * &(temp2[0]), M, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); * TomoInputsPtr->time_mpired1 += difftime(time(NULL), * TomoInputsPtr->t0_mpired1); */ for (i = 0; i < Nr; i++) for (j = 0; j < Nt; j++) { x[i][j] = 0; for (k = 0; k < M; k++) x[i][j] += A[k][i][j] * temp[k]; } for (i = 0; i < Nr; i++) for (j = 0; j < Nt; j++) x[i][j] = (b[i][j] - x[i][j]) / Lambda[i][j]; free(temp); free(temp2); multifree(D, 2); multifree(Dinv, 2); } void compute_d_constraint(Real_arr_t *** A, Real_arr_t ** d, int32_t Nr, int32_t Nt, int32_t M, FILE * debug_file_ptr) { int32_t i, j, k; Real_t *temp, *val; temp = (Real_t *) get_spc(M, sizeof(Real_t)); val = (Real_t *) get_spc(M, sizeof(Real_t)); for (i = 0; i < M; i++) { temp[i] = 0; for (j = 0; j < Nr; j++) for (k = 0; k < Nt; k++) temp[i] += A[i][j][k] * d[j][k]; } MPI_Allreduce(&(temp[0]), &(val[0]), M, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); for (i = 0; i < M; i++) fprintf(debug_file_ptr, "compute_d_constraint: The i th constraint on offset error is %f\n", val[i]); free(temp); free(val); }
#include <stdio.h> #include "nrutil.h" #include "XT_Constants.h" #include "XT_Structures.h" #include <mpi.h> #include <math.h> #include "XT_IOMisc.h" #include "invert.h" #include "allocate.h" void gen_offset_constraint_windows(Sinogram * SinogramPtr, TomoInputs * TomoInputsPtr) { int32_t r_size, t_size, num = 0, i, j, k, l, dim[4], N_t_node, N_r, N_t, node_rank, node_num, node_idx, k_idx, l_idx; char constraint_file[100] = "proj_constraint"; node_rank = TomoInputsPtr->node_rank; node_num = TomoInputsPtr->node_num; N_r = SinogramPtr->N_r; N_t_node = SinogramPtr->N_t; N_t = N_t_node * node_num; r_size = 2 * N_r / ((int32_t) (sqrt(N_r) + 0.5)); t_size = 2 * N_t / ((int32_t) (sqrt(N_t) + 0.5)); for (i = 0; i <= N_r - r_size / 2; i = i + r_size / 2) for (j = 0; j <= N_t - t_size / 2; j = j + t_size / 2) num++; SinogramPtr->off_constraint = (Real_arr_t ***) multialloc(sizeof(Real_arr_t), 3, num, N_r, N_t_node); memset(&(SinogramPtr->off_constraint[0][0][0]), 0, num * N_r * N_t_node * sizeof(Real_arr_t)); for (num = 0, i = 0; i <= N_r - r_size / 2; i = i + r_size / 2) for (j = 0; j <= N_t - t_size / 2; j = j + t_size / 2) { for (k = i; k < i + r_size; k++) for (l = j; l < j + t_size; l++) { node_idx = node_rank * N_t_node; k_idx = k % N_r; l_idx = l % N_t; if (l_idx >= node_idx && l_idx < node_idx + N_t_node) { SinogramPtr->off_constraint[num][k_idx][l_idx - node_idx] = (k - i) < r_size / 2 ? (k - i + 1) : r_size - (k - i); SinogramPtr->off_constraint[num][k_idx][l_idx - node_idx] *= (l - j) < t_size / 2 ? (l - j + 1) : t_size - (l - j); } } num++; } SinogramPtr->off_constraint_num = num; dim[0] = 1; dim[1] = num; dim[2] = SinogramPtr->N_r; dim[3] = SinogramPtr->N_t; sprintf(constraint_file, "%s_n%d", constraint_file, node_rank); if (TomoInputsPtr->Write2Tiff == 1) WriteMultiDimArray2Tiff(constraint_file, dim, 0, 1, 2, 3, &(SinogramPtr->off_constraint[0][0][0]), 0, 0, 1, TomoInputsPtr->debug_file_ptr); fprintf(TomoInputsPtr->debug_file_ptr, "gen_offset_constraint_windows: r_size = %d, t_size = %d, number of constraints = %d\n", r_size, t_size, SinogramPtr->off_constraint_num); /* * SinogramPtr->off_constraint_size = SinogramPtr->N_r; * SinogramPtr->off_constraint = (Real_t**)multialloc(sizeof(Real_t), 2, * 1, SinogramPtr->N_r); for (j = 0; j < SinogramPtr->N_r; j++) * SinogramPtr->off_constraint[0][j] = 1; SinogramPtr->off_constraint_num * = 1; */ } void constrained_quad_opt(Real_t ** Lambda, Real_t ** b, Real_arr_t *** A, Real_arr_t ** x, int32_t Nr, int32_t Nt, int32_t M, TomoInputs * TomoInputsPtr) { Real_t **D, **Dinv; Real_t *temp, *temp2; int32_t i, j, k, l; D = (Real_t **) multialloc(sizeof(Real_t), 2, M, M); Dinv = (Real_t **) multialloc(sizeof(Real_t), 2, M, M); temp = (Real_t *) get_spc(M, sizeof(Real_t)); temp2 = (Real_t *) get_spc(M, sizeof(Real_t)); memset(&(D[0][0]), 0, M * M * sizeof(Real_t)); memset(&(Dinv[0][0]), 0, M * M * sizeof(Real_t)); #pragma omp parallel for collapse(2) private(k, l) for (i = 0; i < M; i++) for (j = 0; j < M; j++) for (k = 0; k < Nr; k++) for (l = 0; l < Nt; l++) { D[i][j] += A[i][k][l] * A[j][k][l] / Lambda[k][l]; /* sum += A[i][k]*A[j][k]/Lambda[k]; */ } /* TomoInputsPtr->t0_mpired1 = time(NULL); */ MPI_Allreduce(&(D[0][0]), &(Dinv[0][0]), M * M, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); /* * TomoInputsPtr->time_mpired1 += difftime(time(NULL), * TomoInputsPtr->t0_mpired1); */ /* printf("Checksum is %f\n", sum); */ invert2(Dinv, M); #pragma omp parallel for private(j, k) for (i = 0; i < M; i++) { temp[i] = 0; for (j = 0; j < Nr; j++) for (k = 0; k < Nt; k++) temp[i] += A[i][j][k] * b[j][k] / Lambda[j][k]; } /* TomoInputsPtr->t0_mpired1 = time(NULL); */ MPI_Allreduce(&(temp[0]), &(temp2[0]), M, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); /* * TomoInputsPtr->time_mpired1 += difftime(time(NULL), * TomoInputsPtr->t0_mpired1); */ #pragma omp parallel for private(j) for (i = 0; i < M; i++) { temp[i] = 0; for (j = 0; j < M; j++) temp[i] += Dinv[i][j] * temp2[j]; } /* * TomoInputsPtr->t0_mpired1 = time(NULL); MPI_Allreduce(&(temp[0]), * &(temp2[0]), M, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); * TomoInputsPtr->time_mpired1 += difftime(time(NULL), * TomoInputsPtr->t0_mpired1); */ #pragma omp parallel for collapse(2) private(k) for (i = 0; i < Nr; i++) for (j = 0; j < Nt; j++) { x[i][j] = 0; for (k = 0; k < M; k++) x[i][j] += A[k][i][j] * temp[k]; } #pragma omp parallel for collapse(2) for (i = 0; i < Nr; i++) for (j = 0; j < Nt; j++) x[i][j] = (b[i][j] - x[i][j]) / Lambda[i][j]; free(temp); free(temp2); multifree(D, 2); multifree(Dinv, 2); } void compute_d_constraint(Real_arr_t *** A, Real_arr_t ** d, int32_t Nr, int32_t Nt, int32_t M, FILE * debug_file_ptr) { int32_t i, j, k; Real_t *temp, *val; temp = (Real_t *) get_spc(M, sizeof(Real_t)); val = (Real_t *) get_spc(M, sizeof(Real_t)); #pragma omp parallel for private(j, k) for (i = 0; i < M; i++) { temp[i] = 0; for (j = 0; j < Nr; j++) for (k = 0; k < Nt; k++) temp[i] += A[i][j][k] * d[j][k]; } MPI_Allreduce(&(temp[0]), &(val[0]), M, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); for (i = 0; i < M; i++) fprintf(debug_file_ptr, "compute_d_constraint: The i th constraint on offset error is %f\n", val[i]); free(temp); free(val); }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/DarwinSDKInfo.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Tracks expected type during expression parsing, for use in code completion. /// The type is tied to a particular token, all functions that update or consume /// the type take a start location of the token they are looking at as a /// parameter. This avoids updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Handles e.g. BaseType{ .D = Tok... void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType, const Designation &D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. /// /// The callback should also emit signature help as a side-effect, but only /// if the completion point has been reached. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); /// Get the expected type associated with this location, if any. /// /// If the location is a function argument, determining the expected type /// involves considering all function overloads and the arguments so far. /// In this case, signature help for these function overloads will be reported /// as a side-effect (only if the completion point has been reached). QualType get(SourceLocation Tok) const { if (!Enabled || Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: bool Enabled; /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 32; static const uint64_t MaximumAlignment = 1ull << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; // #pragma pack and align. class AlignPackInfo { public: // `Native` represents default align mode, which may vary based on the // platform. enum Mode : unsigned char { Native, Natural, Packed, Mac68k }; // #pragma pack info constructor AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL) : PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) { assert(Num == PackNumber && "The pack number has been truncated."); } // #pragma align info constructor AlignPackInfo(AlignPackInfo::Mode M, bool IsXL) : PackAttr(false), AlignMode(M), PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {} explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {} AlignPackInfo() : AlignPackInfo(Native, false) {} // When a AlignPackInfo itself cannot be used, this returns an 32-bit // integer encoding for it. This should only be passed to // AlignPackInfo::getFromRawEncoding, it should not be inspected directly. static uint32_t getRawEncoding(const AlignPackInfo &Info) { std::uint32_t Encoding{}; if (Info.IsXLStack()) Encoding |= IsXLMask; Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1; if (Info.IsPackAttr()) Encoding |= PackAttrMask; Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4; return Encoding; } static AlignPackInfo getFromRawEncoding(unsigned Encoding) { bool IsXL = static_cast<bool>(Encoding & IsXLMask); AlignPackInfo::Mode M = static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1); int PackNumber = (Encoding & PackNumMask) >> 4; if (Encoding & PackAttrMask) return AlignPackInfo(M, PackNumber, IsXL); return AlignPackInfo(M, IsXL); } bool IsPackAttr() const { return PackAttr; } bool IsAlignAttr() const { return !PackAttr; } Mode getAlignMode() const { return AlignMode; } unsigned getPackNumber() const { return PackNumber; } bool IsPackSet() const { // #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack // attriute on a decl. return PackNumber != UninitPackVal && PackNumber != 0; } bool IsXLStack() const { return XLStack; } bool operator==(const AlignPackInfo &Info) const { return std::tie(AlignMode, PackNumber, PackAttr, XLStack) == std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr, Info.XLStack); } bool operator!=(const AlignPackInfo &Info) const { return !(*this == Info); } private: /// \brief True if this is a pragma pack attribute, /// not a pragma align attribute. bool PackAttr; /// \brief The alignment mode that is in effect. Mode AlignMode; /// \brief The pack number of the stack. unsigned char PackNumber; /// \brief True if it is a XL #pragma align/pack stack. bool XLStack; /// \brief Uninitialized pack value. static constexpr unsigned char UninitPackVal = -1; // Masks to encode and decode an AlignPackInfo. static constexpr uint32_t IsXLMask{0x0000'0001}; static constexpr uint32_t AlignModeMask{0x0000'0006}; static constexpr uint32_t PackAttrMask{0x00000'0008}; static constexpr uint32_t PackNumMask{0x0000'01F0}; }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; PragmaStack<AlignPackInfo> AlignPackStack; // The current #pragma align/pack values and locations at each #include. struct AlignPackIncludeState { AlignPackInfo CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<FPOptionsOverride> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FpPragmaStack.CurrentValue; } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The namespace where coroutine components are defined. In standard, /// they are defined in std namespace. And in the previous implementation, /// they are defined in std::experimental namespace. NamespaceDecl *CoroTraitsNamespaceCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// In addition of being constant evaluated, the current expression /// occurs in an immediate function context - either a consteval function /// or a consteval if function. ImmediateFunctionContext, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; // A context can be nested in both a discarded statement context and // an immediate function context, so they need to be tracked independently. bool InDiscardedStatement; bool InImmediateFunctionContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext), InDiscardedStatement(false), InImmediateFunctionContext(false) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated || Context == ExpressionEvaluationContext::ImmediateFunctionContext; } bool isImmediateFunctionContext() const { return Context == ExpressionEvaluationContext::ImmediateFunctionContext || (Context == ExpressionEvaluationContext::DiscardedStatement && InImmediateFunctionContext); } bool isDiscardedStatementContext() const { return Context == ExpressionEvaluationContext::DiscardedStatement || (Context == ExpressionEvaluationContext::ImmediateFunctionContext && InDiscardedStatement); } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl *, 2> Pair; public: SpecialMemberOverloadResult() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. const TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; class GlobalMethodPool { public: using Lists = std::pair<ObjCMethodList, ObjCMethodList>; using iterator = llvm::DenseMap<Selector, Lists>::iterator; iterator begin() { return Methods.begin(); } iterator end() { return Methods.end(); } iterator find(Selector Sel) { return Methods.find(Sel); } std::pair<iterator, bool> insert(std::pair<Selector, Lists> &&Val) { return Methods.insert(Val); } int count(Selector Sel) const { return Methods.count(Sel); } bool empty() const { return Methods.empty(); } private: llvm::DenseMap<Selector, Lists> Methods; }; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S); ~FPFeaturesStateRAII(); FPOptionsOverride getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; FPOptionsOverride OldOverrides; LangOptions::FPEvalMethodKind OldEvalMethod; SourceLocation OldFPPragmaLocation; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; /// Increment when we find a reference; decrement when we find an ignored /// assignment. Ultimately the value is 0 if every reference is an ignored /// assignment. llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments; private: Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo; bool WarnedDarwinSDKInfoMissing = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); /// This virtual key function only exists to limit the emission of debug info /// describing the Sema class. GCC and Clang only emit debug info for a class /// with a vtable when the vtable is emitted. Sema is final and not /// polymorphic, but the debug info size savings are so significant that it is /// worth adding a vtable just to take advantage of this optimization. virtual void anchor(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc, StringRef Platform); DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(); ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. ImmediateDiagBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class ImmediateDiagBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op // in that case anwyay. ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default; ~ImmediateDiagBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First clear the diagnostic // builder itself so it won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template <typename T> friend const ImmediateDiagBuilder & operator<<(const ImmediateDiagBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const ImmediateDiagBuilder &operator<<(T &&V) const { const DiagnosticBuilder &BaseDiag = *this; BaseDiag << std::move(V); return *this; } }; /// A generic diagnostic builder for errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class SemaDiagnosticBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D); SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default; ~SemaDiagnosticBuilder(); bool isImmediate() const { return ImmediateDiag.hasValue(); } /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (SemaDiagnosticBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a SemaDiagnosticBuilder yourself. operator bool() const { return isImmediate(); } template <typename T> friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const SemaDiagnosticBuilder &operator<<(T &&V) const { if (ImmediateDiag.hasValue()) *ImmediateDiag << std::move(V); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V); return *this; } friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) { if (Diag.ImmediateDiag.hasValue()) PD.Emit(*Diag.ImmediateDiag); else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD; return Diag; } void AddFixItHint(const FixItHint &Hint) const { if (ImmediateDiag.hasValue()) ImmediateDiag->AddFixItHint(Hint); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint); } friend ExprResult ExprError(const SemaDiagnosticBuilder &) { return ExprError(); } friend StmtResult StmtError(const SemaDiagnosticBuilder &) { return StmtError(); } operator ExprResult() const { return ExprError(); } operator StmtResult() const { return StmtError(); } operator TypeResult() const { return TypeError(); } operator DeclResult() const { return DeclResult(true); } operator MemInitResult() const { return MemInitResult(true); } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<ImmediateDiagBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Is the last error level diagnostic immediate. This is used to determined /// whether the next info diagnostic should be immediate. bool IsLastErrorImmediate = true; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint = false); /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint = false); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h /// Whether deferrable diagnostics should be deferred. bool DeferDiags = false; /// RAII class to control scope of DeferDiags. class DeferDiagsRAII { Sema &S; bool SavedDeferDiags = false; public: DeferDiagsRAII(Sema &S, bool DeferDiags) : S(S), SavedDeferDiags(S.DeferDiags) { S.DeferDiags = DeferDiags; } ~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; } }; /// Whether uncompilable error has occurred. This includes error happens /// in deferred diagnostics. bool hasUncompilableErrorOccurred() const; bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void setFunctionHasMustTail(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// Retrieve the current function, if any, that should be analyzed for /// potential availability violations. sema::FunctionScopeInfo *getCurFunctionAvailabilityContext(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildBitIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal argument for the /// swift_name attribute applied to decl \p D. Raise a diagnostic if the name /// is invalid for the given declaration. /// /// \p AL is used to provide caret diagnostics in case of a malformed name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc, const ParsedAttr &AL, bool IsAsync); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool IsPartition = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// The global module fragment of the current translation unit. clang::Module *GlobalModuleFragment = nullptr; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } /// Helper function to judge if we are in module purview. /// Return false if we are not in a module. bool isCurrentModulePurview() const { return getCurrentModule() ? getCurrentModule()->isModulePurview() : false; } /// Enter the scope of the global module. Module *PushGlobalModuleFragment(SourceLocation BeginLoc, bool IsImplicit); /// Leave the scope of the global module. void PopGlobalModuleFragment(); VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); // When loading a non-modular PCH files, this is used to restore module // visibility. void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) { VisibleModules.setVisible(Mod, ImportLoc); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } /// Get the type of expression E, triggering instantiation to complete the /// type if necessary -- that is, if the expression refers to a templated /// static data member of incomplete array type. /// /// May still return an incomplete type if instantiation was not possible or /// if the type is incomplete for a different reason. Use /// RequireCompleteExprType instead if a diagnostic is expected for an /// incomplete expression type. QualType getCompletedType(Expr *E); void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); // Returns the underlying type of a decltype with the given expression. QualType getDecltypeForExpr(Expr *E); QualType BuildTypeofExprType(Expr *E); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as an overload set, and an expression /// representing that overload set has been formed. /// ActOnNameClassifiedAsOverloadSet should be called to form a suitable /// expression referencing the overload set. NC_OverloadSet, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification OverloadSet(ExprResult E) { NameClassification Result(NC_OverloadSet); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_OverloadSet); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Act on the result of classifying a name as an overload set. ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); void warnOnReservedIdentifier(const NamedDecl *D); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo, QualType &T, SourceLocation Loc, unsigned FailedFoldDiagID); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const BindingDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); ExprResult ActOnRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// An enumeration to represent the transition of states in parsing module /// fragments and imports. If we are not parsing a C++20 TU, or we find /// an error in state transition, the state is set to NotACXX20Module. enum class ModuleImportState { FirstDecl, ///< Parsing the first decl in a TU. GlobalFragment, ///< after 'module;' but before 'module X;' ImportAllowed, ///< after 'module X;' but before any non-import decl. ImportFinished, ///< after any non-import decl. PrivateFragment, ///< after 'module :private;'. NotACXX20Module ///< Not a C++20 TU, or an invalid state was found. }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, ModuleIdPath Partition, ModuleImportState &ImportState); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module toplevel name as an access path. /// \param Partition The module partition name as an access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path, ModuleIdPath Partition = {}); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, bool IsAbstract, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, /// Merge availability attributes for an implementation of /// an optional protocol requirement. AMK_OptionalProtocolImplementation }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); ErrorAttr *mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI, StringRef NewUserDiagnostic); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA, StringRef Name); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL); EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D, const EnforceTCBLeafAttr &AL); BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(QualType Param, QualType Arg); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool IsStringInit(Expr *Init, const ArrayType *AT); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_ArrayBound, ///< Array bound in array declarator or new-expression. CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier. CCEK_Noexcept ///< Condition in a noexcept(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE, NamedDecl *Dest = nullptr); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); void AddOverloadedCallCandidates( LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass, NestedNameSpecifierLoc NNSLoc, DeclarationNameInfo DNI, const UnresolvedSetImpl &Fns, bool PerformADL = true); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base, MultiExprArg Args); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplatePack, }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id, bool IsUDSuffix); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing, StringLiteral *StringLit = nullptr); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); bool CheckRedeclarationExported(NamedDecl *New, NamedDecl *Old); bool CheckRedeclarationInModule(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID, SourceLocation Loc); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Handles semantic checking for features that are common to all attributes, /// such as checking whether a parameter was properly specified, or the /// correct number of arguments were passed, etc. Returns true if the /// attribute has been diagnosed. bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A, bool SkipArgCountCheck = false); bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A, bool SkipArgCountCheck = false); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const AttributeCommonInfo &CI, const Expr *E, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); llvm::Error isValidSectionSpecifier(StringRef Str); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkTargetClonesAttrString(SourceLocation LiteralLoc, StringRef Str, const StringLiteral *Literal, bool &HasDefault, bool &HasCommas, SmallVectorImpl<StringRef> &Strings); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Process the attributes before creating an attributed statement. Returns /// the semantic attributes that have been processed. void ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesWithRange &InAttrs, SmallVectorImpl<const Attr *> &OutAttrs); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnAfterCompoundStatementLeadingPragmas(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult BuildAttributedStmt(SourceLocation AttrsLoc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); struct NamedReturnInfo { const VarDecl *Candidate; enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable }; Status S; bool isMoveEligible() const { return S != None; }; bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; } }; enum class SimplerImplicitMoveMode { ForceOff, Normal, ForceOn }; NamedReturnInfo getNamedReturnInfo( Expr *&E, SimplerImplicitMoveMode Mode = SimplerImplicitMoveMode::Normal); NamedReturnInfo getNamedReturnInfo(const VarDecl *VD); const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info, QualType ReturnType); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const NamedReturnInfo &NRInfo, Expr *Value, bool SupressSimplerImplicitMoves = false); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, bool AllowRecovery = false); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, NamedReturnInfo &NRInfo, bool SupressSimplerImplicitMoves); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// If VD is set but not otherwise used, diagnose, for a parameter or a /// variable. void DiagnoseUnusedButSetDecl(const VarDecl *VD); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); TypeSourceInfo *TransformToPotentiallyEvaluated(TypeSourceInfo *TInfo); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false, ArrayRef<const Expr *> StopAt = None); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the statements's reachability /// analysis. /// /// \param Stmts If Stmts is non-empty, delay reporting the diagnostic until /// the function body is parsed, and then do a basic reachability analysis to /// determine if the statement is reachable. If it is unreachable, the /// diagnostic will not be emitted. bool DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts, const PartialDiagnostic &PD); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseDependentMemberLookup(LookupResult &R); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE = nullptr); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, TypeSourceInfo *TSI); ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, ParsedType ParsedTy); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, MultiExprArg ArgExprs, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id, MultiExprArg CallArgs); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, UnresolvedSetImpl &Functions); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); NamespaceDecl *getCachedCoroNamespace() { return CoroTraitsNamespaceCache; } CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void FilterUsingLookup(Scope *S, LookupResult &lookup); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc, const LookupResult *R = nullptr, const UsingDecl *UD = nullptr); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation, bool IsUsingIfExists); NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, SourceLocation NameLoc, EnumDecl *ED); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, const DeclSpec &); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, QualType DeclInitType, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr *> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); // Checks that the vector type should be initialized from a scalar // by splatting the value rather than populating a single element. // This is the case for AltiVecVector types as well as with // AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified. bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy); // Checks if the -faltivec-src-compat=gcc option is specified. // If so, AltiVecVector, AltiVecBool and AltiVecPixel types are // treated the same way as they are when trying to initialize // these vectors on gcc (an error is emitted). bool CheckAltivecInitFromScalar(SourceRange R, QualType VecTy, QualType SrcTy); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee, SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); // Complete an enum decl, maybe without a scope spec. bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L, CXXScopeSpec *SS = nullptr); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc, ExprResult RequiresClause); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType, CallingConv CC); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occurred and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occurred and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, const SourceRange &, DeclAccessPair FoundDecl); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, ArrayRef<Expr *> ArgExprs, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. static NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool BuildTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc, bool AllowUnexpandedPack); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool RequireStructuralType(QualType T, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); /// Get the specialization of the given variable template corresponding to /// the specified argument list, or a null-but-valid result if the arguments /// are dependent. DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); /// Form a reference to the specialization of the given variable template /// corresponding to the specified argument list, or a null-but-valid result /// if the arguments are dependent. ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occurred, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression. UPPC_Block, /// A type constraint. UPPC_TypeConstraint, // A requirement in a requires-expression. UPPC_Requirement, // A requires-clause. UPPC_RequiresClause, }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given requirees-expression contains an unexpanded reference to one /// of its own parameter packs, diagnose the error. /// /// \param RE The requiress-expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); // Substitute auto in TypeWithAuto for a Dependent auto type QualType SubstAutoTypeDependent(QualType TypeWithAuto); // Substitute auto in TypeWithAuto for a Dependent auto type TypeSourceInfo * SubstAutoTypeSourceInfoDependent(TypeSourceInfo *TypeWithAuto); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } bool isImmediateFunctionContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isImmediateFunctionContext(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaAlignPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaAlignPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, NamedDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } void ActOnPragmaFPEvalMethod(SourceLocation Loc, LangOptions::FPEvalMethodKind Value); /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called on well formed '\#pragma clang fp' that has option 'exceptions'. void ActOnPragmaFPExceptions(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// Called to set constant rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D. void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Annot, MutableArrayRef<Expr *> Args); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); /// Lookup 'coroutine_traits' in std namespace and std::experimental /// namespace. The namespace found is recorded in Namespace. ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc, NamespaceDecl *&Namespace); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; struct DeclareTargetContextInfo { struct MapInfo { OMPDeclareTargetDeclAttr::MapTypeTy MT; SourceLocation Loc; }; /// Explicitly listed variables and functions in a 'to' or 'link' clause. llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped; /// The 'device_type' as parsed from the clause. OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any; /// The directive kind, `begin declare target` or `declare target`. OpenMPDirectiveKind Kind; /// The directive with indirect clause. Optional<Expr *> Indirect; /// The directive location. SourceLocation Loc; DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc) : Kind(Kind), Loc(Loc) {} }; /// Number of nested '#pragma omp declare target' directives. SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true, bool SuppressExprDiags = false); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Analyzes and checks a loop nest for use by a loop transformation. /// /// \param Kind The loop transformation directive kind. /// \param NumLoops How many nested loops the directive is expecting. /// \param AStmt Associated statement of the transformation directive. /// \param LoopHelpers [out] The loop analysis result. /// \param Body [out] The body code nested in \p NumLoops loop. /// \param OriginalInits [out] Collection of statements and declarations that /// must have been executed/declared before entering the /// loop. /// /// \return Whether there was any error. bool checkTransformableLoopNest( OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers, Stmt *&Body, SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>> &OriginalInits); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// Return the OMPTraitInfo for the surrounding scope, if any. OMPTraitInfo *getOMPTraitInfoForSurroundingScope() { return OMPDeclareVariantScopes.empty() ? nullptr : OMPDeclareVariantScopes.back().TI; } /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The current `omp begin/end assumes` scopes. SmallVector<AssumptionAttr *, 4> OMPAssumeScoped; /// All `omp assumes` we encountered so far. SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal; public: /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. Return all base functions in \p Bases. void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, SmallVectorImpl<FunctionDecl *> &Bases); /// Register \p D as specialization of all base functions in \p Bases in the /// current `omp begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( Decl *D, SmallVectorImpl<FunctionDecl *> &Bases); /// Act on \p D, a function definition inside of an `omp [begin/end] assumes`. void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D); /// Can we exit an OpenMP declare variant scope at the moment. bool isInOpenMPDeclareVariantScope() const { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); /// Called on well-formed '\#pragma omp metadirective' after parsing /// of the associated statement. StmtResult ActOnOpenMPMetaDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp [begin] assume[s]'. void ActOnOpenMPAssumesDirective(SourceLocation Loc, OpenMPDirectiveKind DKind, ArrayRef<std::string> Assumptions, bool SkippedClauses); /// Check if there is an active global `omp begin assumes` directive. bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); } /// Check if there is an active global `omp assumes` directive. bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); } /// Called on well-formed '#pragma omp end assumes'. void ActOnOpenMPEndAssumesDirective(); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; const ValueDecl *getOpenMPDeclareMapperVarName() const; /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Called at the end of target region i.e. '#pragma omp end declare target'. const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective(); /// Called once a target context is completed, that can be when a /// '#pragma omp end declare target' was encountered or when a /// '#pragma omp declare target' without declaration-definition-seq was /// encountered. void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, DeclareTargetContextInfo &DTCI); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true if currently in OpenMP task with untied clause context. bool isInOpenMPTaskUntiedContext() const; /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return !DeclareTargetNesting.empty(); } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to /// an OpenMP loop directive. StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt); /// Process a canonical OpenMP loop nest that can either be a canonical /// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an /// OpenMP loop transformation construct. StmtResult ActOnOpenMPLoopnest(Stmt *AStmt); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '#pragma omp tile' after parsing of its clauses and /// the associated statement. StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '#pragma omp unroll' after parsing of its clauses /// and the associated statement. StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp interop'. StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp dispatch' after parsing of the // /associated statement. StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp masked' after parsing of the // /associated statement. StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp loop' after parsing of the /// associated statement. StmtResult ActOnOpenMPGenericLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \param NumAppendArgs The number of omp_interop_t arguments to account for /// in checking. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, unsigned NumAppendArgs, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. /// \param AdjustArgsNothing The list of 'nothing' arguments. /// \param AdjustArgsNeedDevicePtr The list of 'need_device_ptr' arguments. /// \param AppendArgs The list of 'append_args' arguments. /// \param AdjustArgsLoc The Location of an 'adjust_args' clause. /// \param AppendArgsLoc The Location of an 'append_args' clause. /// \param SR The SourceRange of the 'declare variant' directive. void ActOnOpenMPDeclareVariantDirective( FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, ArrayRef<Expr *> AdjustArgsNothing, ArrayRef<Expr *> AdjustArgsNeedDevicePtr, ArrayRef<OMPDeclareVariantAttr::InteropType> AppendArgs, SourceLocation AdjustArgsLoc, SourceLocation AppendArgsLoc, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'align' clause. OMPClause *ActOnOpenMPAlignClause(Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'sizes' clause. OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'full' clauses. OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-form 'partial' clauses. OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'when' clause. OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'compare' clause. OMPClause *ActOnOpenMPCompareClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'init' clause. OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'use' clause. OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'novariants' clause. OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'nocontext' clause. OMPClause *ActOnOpenMPNocontextClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'filter' clause. OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause *ActOnOpenMPMapClause( ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, bool NoDiagnose = false, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause * ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Called on a well-formed 'bind' clause. OMPClause *ActOnOpenMPBindClause(OpenMPBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_PRValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. In the success case, /// the statement is rewritten to remove implicit nodes from the return /// value. bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA); private: /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. bool checkMustTailAttr(const Stmt *St, const Attr &MTA); public: /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool isValidSveBitcast(QualType srcType, QualType destType); bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy); bool areVectorTypesSameSize(QualType srcType, QualType destType); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckMatrixCast - Check type constraints for matrix casts. // We allow casting between matrixes of the same dimensions i.e. when they // have the same number of rows and column. Returns true if the cast is // invalid. bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy, CastKind &Kind); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; QualType PreferredConditionType(ConditionKind K) const { return K == ConditionKind::Switch ? Context.IntTy : Context.BoolTy; } ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK, bool MissingOK = false); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T); virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) = 0; virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc); virtual ~VerifyICEDiagnoser() {} }; enum AllowFoldKind { NoFold, AllowFold, }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, AllowFoldKind CanFold = NoFold) { return VerifyIntegerConstantExpression(E, nullptr, CanFold); } /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics /// unless \p EmitOnBothSides is true. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD = nullptr); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, const PartialDiagnostic &PD, FunctionDecl *FD = nullptr) { return targetDiag(Loc, PD.getDiagID(), FD) << PD; } /// Check if the type is allowed to be used for the current target. void checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); enum CUDAVariableTarget { CVT_Device, /// Emitted on device side with a shadow variable on host side CVT_Host, /// Emitted on host side only CVT_Both, /// Emitted on both sides with different addresses CVT_Unified, /// Emitted as a unified address, e.g. managed variables }; /// Determines whether the given variable is emitted on host or device side. CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); enum class AttributeCompletion { Attribute, Scope, None, }; void CodeCompleteAttribute( AttributeCommonInfo::Syntax Syntax, AttributeCompletion Completion = AttributeCompletion::Attribute, const IdentifierInfo *Scope = nullptr); /// Determines the preferred type of the current function argument, by /// examining the signatures of all possible overloads. /// Returns null if unknown or ambiguous, or if code completion is off. /// /// If the code completion point has been reached, also reports the function /// signatures that were considered. /// /// FIXME: rename to GuessCallArgumentType to reduce confusion. QualType ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc, bool Braced); QualType ProduceCtorInitMemberSignatureHelp( Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc, bool Braced); QualType ProduceTemplateArgumentSignatureHelp( TemplateTy, ArrayRef<ParsedTemplateArgument>, SourceLocation LAngleLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, StringRef ParamName, QualType ArgTy, QualType ParamTy); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum); bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinComplex(CallExpr *TheCall); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinArithmeticFence(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, const char *TypeDesc); bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc); bool SemaBuiltinElementwiseMath(CallExpr *TheCall); bool PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall); bool PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckFreeArguments(const CallExpr *E); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Nullable_result = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); void deepTypeCheckForSYCLDevice(SourceLocation UsedAt, llvm::DenseSet<QualType> Visited, ValueDecl *DeclToCheck); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; template <> void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, AlignPackInfo Value); } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getHashValue()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/DarwinSDKInfo.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Tracks expected type during expression parsing, for use in code completion. /// The type is tied to a particular token, all functions that update or consume /// the type take a start location of the token they are looking at as a /// parameter. This avoids updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Handles e.g. BaseType{ .D = Tok... void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType, const Designation &D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. /// /// The callback should also emit signature help as a side-effect, but only /// if the completion point has been reached. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); /// Get the expected type associated with this location, if any. /// /// If the location is a function argument, determining the expected type /// involves considering all function overloads and the arguments so far. /// In this case, signature help for these function overloads will be reported /// as a side-effect (only if the completion point has been reached). QualType get(SourceLocation Tok) const { if (!Enabled || Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: bool Enabled; /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 32; static const uint64_t MaximumAlignment = 1ull << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; // #pragma pack and align. class AlignPackInfo { public: // `Native` represents default align mode, which may vary based on the // platform. enum Mode : unsigned char { Native, Natural, Packed, Mac68k }; // #pragma pack info constructor AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL) : PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) { assert(Num == PackNumber && "The pack number has been truncated."); } // #pragma align info constructor AlignPackInfo(AlignPackInfo::Mode M, bool IsXL) : PackAttr(false), AlignMode(M), PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {} explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {} AlignPackInfo() : AlignPackInfo(Native, false) {} // When a AlignPackInfo itself cannot be used, this returns an 32-bit // integer encoding for it. This should only be passed to // AlignPackInfo::getFromRawEncoding, it should not be inspected directly. static uint32_t getRawEncoding(const AlignPackInfo &Info) { std::uint32_t Encoding{}; if (Info.IsXLStack()) Encoding |= IsXLMask; Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1; if (Info.IsPackAttr()) Encoding |= PackAttrMask; Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4; return Encoding; } static AlignPackInfo getFromRawEncoding(unsigned Encoding) { bool IsXL = static_cast<bool>(Encoding & IsXLMask); AlignPackInfo::Mode M = static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1); int PackNumber = (Encoding & PackNumMask) >> 4; if (Encoding & PackAttrMask) return AlignPackInfo(M, PackNumber, IsXL); return AlignPackInfo(M, IsXL); } bool IsPackAttr() const { return PackAttr; } bool IsAlignAttr() const { return !PackAttr; } Mode getAlignMode() const { return AlignMode; } unsigned getPackNumber() const { return PackNumber; } bool IsPackSet() const { // #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack // attriute on a decl. return PackNumber != UninitPackVal && PackNumber != 0; } bool IsXLStack() const { return XLStack; } bool operator==(const AlignPackInfo &Info) const { return std::tie(AlignMode, PackNumber, PackAttr, XLStack) == std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr, Info.XLStack); } bool operator!=(const AlignPackInfo &Info) const { return !(*this == Info); } private: /// \brief True if this is a pragma pack attribute, /// not a pragma align attribute. bool PackAttr; /// \brief The alignment mode that is in effect. Mode AlignMode; /// \brief The pack number of the stack. unsigned char PackNumber; /// \brief True if it is a XL #pragma align/pack stack. bool XLStack; /// \brief Uninitialized pack value. static constexpr unsigned char UninitPackVal = -1; // Masks to encode and decode an AlignPackInfo. static constexpr uint32_t IsXLMask{0x0000'0001}; static constexpr uint32_t AlignModeMask{0x0000'0006}; static constexpr uint32_t PackAttrMask{0x00000'0008}; static constexpr uint32_t PackNumMask{0x0000'01F0}; }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; PragmaStack<AlignPackInfo> AlignPackStack; // The current #pragma align/pack values and locations at each #include. struct AlignPackIncludeState { AlignPackInfo CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<FPOptionsOverride> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FpPragmaStack.CurrentValue; } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The namespace where coroutine components are defined. In standard, /// they are defined in std namespace. And in the previous implementation, /// they are defined in std::experimental namespace. NamespaceDecl *CoroTraitsNamespaceCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// In addition of being constant evaluated, the current expression /// occurs in an immediate function context - either a consteval function /// or a consteval if function. ImmediateFunctionContext, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; // A context can be nested in both a discarded statement context and // an immediate function context, so they need to be tracked independently. bool InDiscardedStatement; bool InImmediateFunctionContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext), InDiscardedStatement(false), InImmediateFunctionContext(false) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated || Context == ExpressionEvaluationContext::ImmediateFunctionContext; } bool isImmediateFunctionContext() const { return Context == ExpressionEvaluationContext::ImmediateFunctionContext || (Context == ExpressionEvaluationContext::DiscardedStatement && InImmediateFunctionContext); } bool isDiscardedStatementContext() const { return Context == ExpressionEvaluationContext::DiscardedStatement || (Context == ExpressionEvaluationContext::ImmediateFunctionContext && InDiscardedStatement); } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl *, 2> Pair; public: SpecialMemberOverloadResult() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. const TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; class GlobalMethodPool { public: using Lists = std::pair<ObjCMethodList, ObjCMethodList>; using iterator = llvm::DenseMap<Selector, Lists>::iterator; iterator begin() { return Methods.begin(); } iterator end() { return Methods.end(); } iterator find(Selector Sel) { return Methods.find(Sel); } std::pair<iterator, bool> insert(std::pair<Selector, Lists> &&Val) { return Methods.insert(Val); } int count(Selector Sel) const { return Methods.count(Sel); } bool empty() const { return Methods.empty(); } private: llvm::DenseMap<Selector, Lists> Methods; }; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S); ~FPFeaturesStateRAII(); FPOptionsOverride getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; FPOptionsOverride OldOverrides; LangOptions::FPEvalMethodKind OldEvalMethod; SourceLocation OldFPPragmaLocation; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; /// Increment when we find a reference; decrement when we find an ignored /// assignment. Ultimately the value is 0 if every reference is an ignored /// assignment. llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments; private: Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo; bool WarnedDarwinSDKInfoMissing = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); /// This virtual key function only exists to limit the emission of debug info /// describing the Sema class. GCC and Clang only emit debug info for a class /// with a vtable when the vtable is emitted. Sema is final and not /// polymorphic, but the debug info size savings are so significant that it is /// worth adding a vtable just to take advantage of this optimization. virtual void anchor(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc, StringRef Platform); DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(); ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. ImmediateDiagBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class ImmediateDiagBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op // in that case anwyay. ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default; ~ImmediateDiagBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First clear the diagnostic // builder itself so it won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template <typename T> friend const ImmediateDiagBuilder & operator<<(const ImmediateDiagBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const ImmediateDiagBuilder &operator<<(T &&V) const { const DiagnosticBuilder &BaseDiag = *this; BaseDiag << std::move(V); return *this; } }; /// A generic diagnostic builder for errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class SemaDiagnosticBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D); SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default; ~SemaDiagnosticBuilder(); bool isImmediate() const { return ImmediateDiag.hasValue(); } /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (SemaDiagnosticBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a SemaDiagnosticBuilder yourself. operator bool() const { return isImmediate(); } template <typename T> friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const SemaDiagnosticBuilder &operator<<(T &&V) const { if (ImmediateDiag.hasValue()) *ImmediateDiag << std::move(V); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V); return *this; } friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) { if (Diag.ImmediateDiag.hasValue()) PD.Emit(*Diag.ImmediateDiag); else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD; return Diag; } void AddFixItHint(const FixItHint &Hint) const { if (ImmediateDiag.hasValue()) ImmediateDiag->AddFixItHint(Hint); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint); } friend ExprResult ExprError(const SemaDiagnosticBuilder &) { return ExprError(); } friend StmtResult StmtError(const SemaDiagnosticBuilder &) { return StmtError(); } operator ExprResult() const { return ExprError(); } operator StmtResult() const { return StmtError(); } operator TypeResult() const { return TypeError(); } operator DeclResult() const { return DeclResult(true); } operator MemInitResult() const { return MemInitResult(true); } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<ImmediateDiagBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Is the last error level diagnostic immediate. This is used to determined /// whether the next info diagnostic should be immediate. bool IsLastErrorImmediate = true; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint = false); /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint = false); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h /// Whether deferrable diagnostics should be deferred. bool DeferDiags = false; /// RAII class to control scope of DeferDiags. class DeferDiagsRAII { Sema &S; bool SavedDeferDiags = false; public: DeferDiagsRAII(Sema &S, bool DeferDiags) : S(S), SavedDeferDiags(S.DeferDiags) { S.DeferDiags = DeferDiags; } ~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; } }; /// Whether uncompilable error has occurred. This includes error happens /// in deferred diagnostics. bool hasUncompilableErrorOccurred() const; bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void setFunctionHasMustTail(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// Retrieve the current function, if any, that should be analyzed for /// potential availability violations. sema::FunctionScopeInfo *getCurFunctionAvailabilityContext(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildBitIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal argument for the /// swift_name attribute applied to decl \p D. Raise a diagnostic if the name /// is invalid for the given declaration. /// /// \p AL is used to provide caret diagnostics in case of a malformed name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc, const ParsedAttr &AL, bool IsAsync); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool IsPartition = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// The global module fragment of the current translation unit. clang::Module *GlobalModuleFragment = nullptr; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } /// Helper function to judge if we are in module purview. /// Return false if we are not in a module. bool isCurrentModulePurview() const { return getCurrentModule() ? getCurrentModule()->isModulePurview() : false; } /// Enter the scope of the global module. Module *PushGlobalModuleFragment(SourceLocation BeginLoc, bool IsImplicit); /// Leave the scope of the global module. void PopGlobalModuleFragment(); VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); // When loading a non-modular PCH files, this is used to restore module // visibility. void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) { VisibleModules.setVisible(Mod, ImportLoc); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } /// Get the type of expression E, triggering instantiation to complete the /// type if necessary -- that is, if the expression refers to a templated /// static data member of incomplete array type. /// /// May still return an incomplete type if instantiation was not possible or /// if the type is incomplete for a different reason. Use /// RequireCompleteExprType instead if a diagnostic is expected for an /// incomplete expression type. QualType getCompletedType(Expr *E); void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); // Returns the underlying type of a decltype with the given expression. QualType getDecltypeForExpr(Expr *E); QualType BuildTypeofExprType(Expr *E); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as an overload set, and an expression /// representing that overload set has been formed. /// ActOnNameClassifiedAsOverloadSet should be called to form a suitable /// expression referencing the overload set. NC_OverloadSet, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification OverloadSet(ExprResult E) { NameClassification Result(NC_OverloadSet); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_OverloadSet); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Act on the result of classifying a name as an overload set. ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); void warnOnReservedIdentifier(const NamedDecl *D); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo, QualType &T, SourceLocation Loc, unsigned FailedFoldDiagID); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const BindingDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); ExprResult ActOnRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// An enumeration to represent the transition of states in parsing module /// fragments and imports. If we are not parsing a C++20 TU, or we find /// an error in state transition, the state is set to NotACXX20Module. enum class ModuleImportState { FirstDecl, ///< Parsing the first decl in a TU. GlobalFragment, ///< after 'module;' but before 'module X;' ImportAllowed, ///< after 'module X;' but before any non-import decl. ImportFinished, ///< after any non-import decl. PrivateFragment, ///< after 'module :private;'. NotACXX20Module ///< Not a C++20 TU, or an invalid state was found. }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, ModuleIdPath Partition, ModuleImportState &ImportState); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module toplevel name as an access path. /// \param Partition The module partition name as an access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path, ModuleIdPath Partition = {}); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, bool IsAbstract, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, /// Merge availability attributes for an implementation of /// an optional protocol requirement. AMK_OptionalProtocolImplementation }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); ErrorAttr *mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI, StringRef NewUserDiagnostic); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA, StringRef Name); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL); EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D, const EnforceTCBLeafAttr &AL); BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(QualType Param, QualType Arg); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool IsStringInit(Expr *Init, const ArrayType *AT); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_ArrayBound, ///< Array bound in array declarator or new-expression. CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier. CCEK_Noexcept ///< Condition in a noexcept(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE, NamedDecl *Dest = nullptr); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); void AddOverloadedCallCandidates( LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass, NestedNameSpecifierLoc NNSLoc, DeclarationNameInfo DNI, const UnresolvedSetImpl &Fns, bool PerformADL = true); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base, MultiExprArg Args); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplatePack, }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id, bool IsUDSuffix); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing, StringLiteral *StringLit = nullptr); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); bool CheckRedeclarationExported(NamedDecl *New, NamedDecl *Old); bool CheckRedeclarationInModule(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID, SourceLocation Loc); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Handles semantic checking for features that are common to all attributes, /// such as checking whether a parameter was properly specified, or the /// correct number of arguments were passed, etc. Returns true if the /// attribute has been diagnosed. bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A, bool SkipArgCountCheck = false); bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A, bool SkipArgCountCheck = false); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const AttributeCommonInfo &CI, const Expr *E, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); llvm::Error isValidSectionSpecifier(StringRef Str); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkTargetClonesAttrString(SourceLocation LiteralLoc, StringRef Str, const StringLiteral *Literal, bool &HasDefault, bool &HasCommas, SmallVectorImpl<StringRef> &Strings); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Process the attributes before creating an attributed statement. Returns /// the semantic attributes that have been processed. void ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesWithRange &InAttrs, SmallVectorImpl<const Attr *> &OutAttrs); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnAfterCompoundStatementLeadingPragmas(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult BuildAttributedStmt(SourceLocation AttrsLoc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); struct NamedReturnInfo { const VarDecl *Candidate; enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable }; Status S; bool isMoveEligible() const { return S != None; }; bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; } }; enum class SimplerImplicitMoveMode { ForceOff, Normal, ForceOn }; NamedReturnInfo getNamedReturnInfo( Expr *&E, SimplerImplicitMoveMode Mode = SimplerImplicitMoveMode::Normal); NamedReturnInfo getNamedReturnInfo(const VarDecl *VD); const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info, QualType ReturnType); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const NamedReturnInfo &NRInfo, Expr *Value, bool SupressSimplerImplicitMoves = false); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, bool AllowRecovery = false); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, NamedReturnInfo &NRInfo, bool SupressSimplerImplicitMoves); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// If VD is set but not otherwise used, diagnose, for a parameter or a /// variable. void DiagnoseUnusedButSetDecl(const VarDecl *VD); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); TypeSourceInfo *TransformToPotentiallyEvaluated(TypeSourceInfo *TInfo); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false, ArrayRef<const Expr *> StopAt = None); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the statements's reachability /// analysis. /// /// \param Stmts If Stmts is non-empty, delay reporting the diagnostic until /// the function body is parsed, and then do a basic reachability analysis to /// determine if the statement is reachable. If it is unreachable, the /// diagnostic will not be emitted. bool DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts, const PartialDiagnostic &PD); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseDependentMemberLookup(LookupResult &R); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE = nullptr); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, TypeSourceInfo *TSI); ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, ParsedType ParsedTy); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, MultiExprArg ArgExprs, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id, MultiExprArg CallArgs); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, UnresolvedSetImpl &Functions); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); NamespaceDecl *getCachedCoroNamespace() { return CoroTraitsNamespaceCache; } CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void FilterUsingLookup(Scope *S, LookupResult &lookup); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc, const LookupResult *R = nullptr, const UsingDecl *UD = nullptr); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation, bool IsUsingIfExists); NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, SourceLocation NameLoc, EnumDecl *ED); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, const DeclSpec &); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, QualType DeclInitType, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr *> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); // Checks that the vector type should be initialized from a scalar // by splatting the value rather than populating a single element. // This is the case for AltiVecVector types as well as with // AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified. bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy); // Checks if the -faltivec-src-compat=gcc option is specified. // If so, AltiVecVector, AltiVecBool and AltiVecPixel types are // treated the same way as they are when trying to initialize // these vectors on gcc (an error is emitted). bool CheckAltivecInitFromScalar(SourceRange R, QualType VecTy, QualType SrcTy); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee, SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); // Complete an enum decl, maybe without a scope spec. bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L, CXXScopeSpec *SS = nullptr); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc, ExprResult RequiresClause); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType, CallingConv CC); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occurred and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occurred and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, const SourceRange &, DeclAccessPair FoundDecl); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, ArrayRef<Expr *> ArgExprs, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. static NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool BuildTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc, bool AllowUnexpandedPack); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool RequireStructuralType(QualType T, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); /// Get the specialization of the given variable template corresponding to /// the specified argument list, or a null-but-valid result if the arguments /// are dependent. DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); /// Form a reference to the specialization of the given variable template /// corresponding to the specified argument list, or a null-but-valid result /// if the arguments are dependent. ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occurred, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression. UPPC_Block, /// A type constraint. UPPC_TypeConstraint, // A requirement in a requires-expression. UPPC_Requirement, // A requires-clause. UPPC_RequiresClause, }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given requirees-expression contains an unexpanded reference to one /// of its own parameter packs, diagnose the error. /// /// \param RE The requiress-expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); // Substitute auto in TypeWithAuto for a Dependent auto type QualType SubstAutoTypeDependent(QualType TypeWithAuto); // Substitute auto in TypeWithAuto for a Dependent auto type TypeSourceInfo * SubstAutoTypeSourceInfoDependent(TypeSourceInfo *TypeWithAuto); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } bool isImmediateFunctionContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isImmediateFunctionContext(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaAlignPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaAlignPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, NamedDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } void ActOnPragmaFPEvalMethod(SourceLocation Loc, LangOptions::FPEvalMethodKind Value); /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called on well formed '\#pragma clang fp' that has option 'exceptions'. void ActOnPragmaFPExceptions(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// Called to set constant rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D. void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Annot, MutableArrayRef<Expr *> Args); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); /// Lookup 'coroutine_traits' in std namespace and std::experimental /// namespace. The namespace found is recorded in Namespace. ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc, NamespaceDecl *&Namespace); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; struct DeclareTargetContextInfo { struct MapInfo { OMPDeclareTargetDeclAttr::MapTypeTy MT; SourceLocation Loc; }; /// Explicitly listed variables and functions in a 'to' or 'link' clause. llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped; /// The 'device_type' as parsed from the clause. OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any; /// The directive kind, `begin declare target` or `declare target`. OpenMPDirectiveKind Kind; /// The directive with indirect clause. Optional<Expr *> Indirect; /// The directive location. SourceLocation Loc; DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc) : Kind(Kind), Loc(Loc) {} }; /// Number of nested ' SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true, bool SuppressExprDiags = false); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Analyzes and checks a loop nest for use by a loop transformation. /// /// \param Kind The loop transformation directive kind. /// \param NumLoops How many nested loops the directive is expecting. /// \param AStmt Associated statement of the transformation directive. /// \param LoopHelpers [out] The loop analysis result. /// \param Body [out] The body code nested in \p NumLoops loop. /// \param OriginalInits [out] Collection of statements and declarations that /// must have been executed/declared before entering the /// loop. /// /// \return Whether there was any error. bool checkTransformableLoopNest( OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers, Stmt *&Body, SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>> &OriginalInits); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// Return the OMPTraitInfo for the surrounding scope, if any. OMPTraitInfo *getOMPTraitInfoForSurroundingScope() { return OMPDeclareVariantScopes.empty() ? nullptr : OMPDeclareVariantScopes.back().TI; } /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The current `omp begin/end assumes` scopes. SmallVector<AssumptionAttr *, 4> OMPAssumeScoped; /// All `omp assumes` we encountered so far. SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal; public: /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. Return all base functions in \p Bases. void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, SmallVectorImpl<FunctionDecl *> &Bases); /// Register \p D as specialization of all base functions in \p Bases in the /// current `omp begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( Decl *D, SmallVectorImpl<FunctionDecl *> &Bases); /// Act on \p D, a function definition inside of an `omp [begin/end] assumes`. void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D); /// Can we exit an OpenMP declare variant scope at the moment. bool isInOpenMPDeclareVariantScope() const { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPMetaDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed ' DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed ' DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed ' void ActOnOpenMPAssumesDirective(SourceLocation Loc, OpenMPDirectiveKind DKind, ArrayRef<std::string> Assumptions, bool SkippedClauses); /// Check if there is an active global `omp begin assumes` directive. bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); } /// Check if there is an active global `omp assumes` directive. bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); } /// Called on well-formed ' void ActOnOpenMPEndAssumesDirective(); /// Called on well-formed ' DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of ' DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of ' DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of ' DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of ' ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; const ValueDecl *getOpenMPDeclareMapperVarName() const; /// Called on the start of target region i.e. ' bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Called at the end of target region i.e. ' const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective(); /// Called once a target context is completed, that can be when a /// ' /// ' /// encountered. void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// Called on correct id-expression from the ' void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, DeclareTargetContextInfo &DTCI); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true if currently in OpenMP task with untied clause context. bool isInOpenMPTaskUntiedContext() const; /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return !DeclareTargetNesting.empty(); } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to /// an OpenMP loop directive. StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt); /// Process a canonical OpenMP loop nest that can either be a canonical /// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an /// OpenMP loop transformation construct. StmtResult ActOnOpenMPLoopnest(Stmt *AStmt); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed ' /// the associated statement. StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed ' /// and the associated statement. StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPTaskwaitDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\ StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ // /associated statement. StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ // /associated statement. StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPGenericLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\ /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\ /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \param NumAppendArgs The number of omp_interop_t arguments to account for /// in checking. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, unsigned NumAppendArgs, SourceRange SR); /// Called on well-formed '\ /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. /// \param AdjustArgsNothing The list of 'nothing' arguments. /// \param AdjustArgsNeedDevicePtr The list of 'need_device_ptr' arguments. /// \param AppendArgs The list of 'append_args' arguments. /// \param AdjustArgsLoc The Location of an 'adjust_args' clause. /// \param AppendArgsLoc The Location of an 'append_args' clause. /// \param SR The SourceRange of the 'declare variant' directive. void ActOnOpenMPDeclareVariantDirective( FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, ArrayRef<Expr *> AdjustArgsNothing, ArrayRef<Expr *> AdjustArgsNeedDevicePtr, ArrayRef<OMPDeclareVariantAttr::InteropType> AppendArgs, SourceLocation AdjustArgsLoc, SourceLocation AppendArgsLoc, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'align' clause. OMPClause *ActOnOpenMPAlignClause(Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'sizes' clause. OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'full' clauses. OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-form 'partial' clauses. OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'when' clause. OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'compare' clause. OMPClause *ActOnOpenMPCompareClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'init' clause. OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'use' clause. OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'novariants' clause. OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'nocontext' clause. OMPClause *ActOnOpenMPNocontextClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'filter' clause. OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause *ActOnOpenMPMapClause( ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, bool NoDiagnose = false, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause * ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Called on a well-formed 'bind' clause. OMPClause *ActOnOpenMPBindClause(OpenMPBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_PRValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. In the success case, /// the statement is rewritten to remove implicit nodes from the return /// value. bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA); private: /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. bool checkMustTailAttr(const Stmt *St, const Attr &MTA); public: /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool isValidSveBitcast(QualType srcType, QualType destType); bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy); bool areVectorTypesSameSize(QualType srcType, QualType destType); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckMatrixCast - Check type constraints for matrix casts. // We allow casting between matrixes of the same dimensions i.e. when they // have the same number of rows and column. Returns true if the cast is // invalid. bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy, CastKind &Kind); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; QualType PreferredConditionType(ConditionKind K) const { return K == ConditionKind::Switch ? Context.IntTy : Context.BoolTy; } ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK, bool MissingOK = false); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T); virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) = 0; virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc); virtual ~VerifyICEDiagnoser() {} }; enum AllowFoldKind { NoFold, AllowFold, }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, AllowFoldKind CanFold = NoFold) { return VerifyIntegerConstantExpression(E, nullptr, CanFold); } /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics /// unless \p EmitOnBothSides is true. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD = nullptr); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, const PartialDiagnostic &PD, FunctionDecl *FD = nullptr) { return targetDiag(Loc, PD.getDiagID(), FD) << PD; } /// Check if the type is allowed to be used for the current target. void checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); enum CUDAVariableTarget { CVT_Device, /// Emitted on device side with a shadow variable on host side CVT_Host, /// Emitted on host side only CVT_Both, /// Emitted on both sides with different addresses CVT_Unified, /// Emitted as a unified address, e.g. managed variables }; /// Determines whether the given variable is emitted on host or device side. CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); enum class AttributeCompletion { Attribute, Scope, None, }; void CodeCompleteAttribute( AttributeCommonInfo::Syntax Syntax, AttributeCompletion Completion = AttributeCompletion::Attribute, const IdentifierInfo *Scope = nullptr); /// Determines the preferred type of the current function argument, by /// examining the signatures of all possible overloads. /// Returns null if unknown or ambiguous, or if code completion is off. /// /// If the code completion point has been reached, also reports the function /// signatures that were considered. /// /// FIXME: rename to GuessCallArgumentType to reduce confusion. QualType ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc, bool Braced); QualType ProduceCtorInitMemberSignatureHelp( Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc, bool Braced); QualType ProduceTemplateArgumentSignatureHelp( TemplateTy, ArrayRef<ParsedTemplateArgument>, SourceLocation LAngleLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, StringRef ParamName, QualType ArgTy, QualType ParamTy); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum); bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinComplex(CallExpr *TheCall); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinArithmeticFence(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, const char *TypeDesc); bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc); bool SemaBuiltinElementwiseMath(CallExpr *TheCall); bool PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall); bool PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckFreeArguments(const CallExpr *E); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Nullable_result = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); void deepTypeCheckForSYCLDevice(SourceLocation UsedAt, llvm::DenseSet<QualType> Visited, ValueDecl *DeclToCheck); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; template <> void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, AlignPackInfo Value); } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getHashValue()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/DarwinSDKInfo.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Tracks expected type during expression parsing, for use in code completion. /// The type is tied to a particular token, all functions that update or consume /// the type take a start location of the token they are looking at as a /// parameter. This avoids updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Handles e.g. BaseType{ .D = Tok... void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType, const Designation &D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. /// /// The callback should also emit signature help as a side-effect, but only /// if the completion point has been reached. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); /// Get the expected type associated with this location, if any. /// /// If the location is a function argument, determining the expected type /// involves considering all function overloads and the arguments so far. /// In this case, signature help for these function overloads will be reported /// as a side-effect (only if the completion point has been reached). QualType get(SourceLocation Tok) const { if (!Enabled || Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: bool Enabled; /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 32; static const uint64_t MaximumAlignment = 1ull << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; // #pragma pack and align. class AlignPackInfo { public: // `Native` represents default align mode, which may vary based on the // platform. enum Mode : unsigned char { Native, Natural, Packed, Mac68k }; // #pragma pack info constructor AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL) : PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) { assert(Num == PackNumber && "The pack number has been truncated."); } // #pragma align info constructor AlignPackInfo(AlignPackInfo::Mode M, bool IsXL) : PackAttr(false), AlignMode(M), PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {} explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {} AlignPackInfo() : AlignPackInfo(Native, false) {} // When a AlignPackInfo itself cannot be used, this returns an 32-bit // integer encoding for it. This should only be passed to // AlignPackInfo::getFromRawEncoding, it should not be inspected directly. static uint32_t getRawEncoding(const AlignPackInfo &Info) { std::uint32_t Encoding{}; if (Info.IsXLStack()) Encoding |= IsXLMask; Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1; if (Info.IsPackAttr()) Encoding |= PackAttrMask; Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4; return Encoding; } static AlignPackInfo getFromRawEncoding(unsigned Encoding) { bool IsXL = static_cast<bool>(Encoding & IsXLMask); AlignPackInfo::Mode M = static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1); int PackNumber = (Encoding & PackNumMask) >> 4; if (Encoding & PackAttrMask) return AlignPackInfo(M, PackNumber, IsXL); return AlignPackInfo(M, IsXL); } bool IsPackAttr() const { return PackAttr; } bool IsAlignAttr() const { return !PackAttr; } Mode getAlignMode() const { return AlignMode; } unsigned getPackNumber() const { return PackNumber; } bool IsPackSet() const { // #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack // attriute on a decl. return PackNumber != UninitPackVal && PackNumber != 0; } bool IsXLStack() const { return XLStack; } bool operator==(const AlignPackInfo &Info) const { return std::tie(AlignMode, PackNumber, PackAttr, XLStack) == std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr, Info.XLStack); } bool operator!=(const AlignPackInfo &Info) const { return !(*this == Info); } private: /// \brief True if this is a pragma pack attribute, /// not a pragma align attribute. bool PackAttr; /// \brief The alignment mode that is in effect. Mode AlignMode; /// \brief The pack number of the stack. unsigned char PackNumber; /// \brief True if it is a XL #pragma align/pack stack. bool XLStack; /// \brief Uninitialized pack value. static constexpr unsigned char UninitPackVal = -1; // Masks to encode and decode an AlignPackInfo. static constexpr uint32_t IsXLMask{0x0000'0001}; static constexpr uint32_t AlignModeMask{0x0000'0006}; static constexpr uint32_t PackAttrMask{0x00000'0008}; static constexpr uint32_t PackNumMask{0x0000'01F0}; }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; PragmaStack<AlignPackInfo> AlignPackStack; // The current #pragma align/pack values and locations at each #include. struct AlignPackIncludeState { AlignPackInfo CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<FPOptionsOverride> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FpPragmaStack.CurrentValue; } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The namespace where coroutine components are defined. In standard, /// they are defined in std namespace. And in the previous implementation, /// they are defined in std::experimental namespace. NamespaceDecl *CoroTraitsNamespaceCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// In addition of being constant evaluated, the current expression /// occurs in an immediate function context - either a consteval function /// or a consteval if function. ImmediateFunctionContext, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; // A context can be nested in both a discarded statement context and // an immediate function context, so they need to be tracked independently. bool InDiscardedStatement; bool InImmediateFunctionContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext), InDiscardedStatement(false), InImmediateFunctionContext(false) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated || Context == ExpressionEvaluationContext::ImmediateFunctionContext; } bool isImmediateFunctionContext() const { return Context == ExpressionEvaluationContext::ImmediateFunctionContext || (Context == ExpressionEvaluationContext::DiscardedStatement && InImmediateFunctionContext); } bool isDiscardedStatementContext() const { return Context == ExpressionEvaluationContext::DiscardedStatement || (Context == ExpressionEvaluationContext::ImmediateFunctionContext && InDiscardedStatement); } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl *, 2> Pair; public: SpecialMemberOverloadResult() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. const TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; class GlobalMethodPool { public: using Lists = std::pair<ObjCMethodList, ObjCMethodList>; using iterator = llvm::DenseMap<Selector, Lists>::iterator; iterator begin() { return Methods.begin(); } iterator end() { return Methods.end(); } iterator find(Selector Sel) { return Methods.find(Sel); } std::pair<iterator, bool> insert(std::pair<Selector, Lists> &&Val) { return Methods.insert(Val); } int count(Selector Sel) const { return Methods.count(Sel); } bool empty() const { return Methods.empty(); } private: llvm::DenseMap<Selector, Lists> Methods; }; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S); ~FPFeaturesStateRAII(); FPOptionsOverride getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; FPOptionsOverride OldOverrides; LangOptions::FPEvalMethodKind OldEvalMethod; SourceLocation OldFPPragmaLocation; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; /// Increment when we find a reference; decrement when we find an ignored /// assignment. Ultimately the value is 0 if every reference is an ignored /// assignment. llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments; private: Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo; bool WarnedDarwinSDKInfoMissing = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); /// This virtual key function only exists to limit the emission of debug info /// describing the Sema class. GCC and Clang only emit debug info for a class /// with a vtable when the vtable is emitted. Sema is final and not /// polymorphic, but the debug info size savings are so significant that it is /// worth adding a vtable just to take advantage of this optimization. virtual void anchor(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc, StringRef Platform); DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(); ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. ImmediateDiagBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class ImmediateDiagBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op // in that case anwyay. ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default; ~ImmediateDiagBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First clear the diagnostic // builder itself so it won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template <typename T> friend const ImmediateDiagBuilder & operator<<(const ImmediateDiagBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const ImmediateDiagBuilder &operator<<(T &&V) const { const DiagnosticBuilder &BaseDiag = *this; BaseDiag << std::move(V); return *this; } }; /// A generic diagnostic builder for errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class SemaDiagnosticBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D); SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default; ~SemaDiagnosticBuilder(); bool isImmediate() const { return ImmediateDiag.hasValue(); } /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (SemaDiagnosticBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a SemaDiagnosticBuilder yourself. operator bool() const { return isImmediate(); } template <typename T> friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const SemaDiagnosticBuilder &operator<<(T &&V) const { if (ImmediateDiag.hasValue()) *ImmediateDiag << std::move(V); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V); return *this; } friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) { if (Diag.ImmediateDiag.hasValue()) PD.Emit(*Diag.ImmediateDiag); else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD; return Diag; } void AddFixItHint(const FixItHint &Hint) const { if (ImmediateDiag.hasValue()) ImmediateDiag->AddFixItHint(Hint); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint); } friend ExprResult ExprError(const SemaDiagnosticBuilder &) { return ExprError(); } friend StmtResult StmtError(const SemaDiagnosticBuilder &) { return StmtError(); } operator ExprResult() const { return ExprError(); } operator StmtResult() const { return StmtError(); } operator TypeResult() const { return TypeError(); } operator DeclResult() const { return DeclResult(true); } operator MemInitResult() const { return MemInitResult(true); } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<ImmediateDiagBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Is the last error level diagnostic immediate. This is used to determined /// whether the next info diagnostic should be immediate. bool IsLastErrorImmediate = true; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint = false); /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint = false); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h /// Whether deferrable diagnostics should be deferred. bool DeferDiags = false; /// RAII class to control scope of DeferDiags. class DeferDiagsRAII { Sema &S; bool SavedDeferDiags = false; public: DeferDiagsRAII(Sema &S, bool DeferDiags) : S(S), SavedDeferDiags(S.DeferDiags) { S.DeferDiags = DeferDiags; } ~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; } }; /// Whether uncompilable error has occurred. This includes error happens /// in deferred diagnostics. bool hasUncompilableErrorOccurred() const; bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void setFunctionHasMustTail(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// Retrieve the current function, if any, that should be analyzed for /// potential availability violations. sema::FunctionScopeInfo *getCurFunctionAvailabilityContext(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildBitIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal argument for the /// swift_name attribute applied to decl \p D. Raise a diagnostic if the name /// is invalid for the given declaration. /// /// \p AL is used to provide caret diagnostics in case of a malformed name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc, const ParsedAttr &AL, bool IsAsync); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool IsPartition = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// The global module fragment of the current translation unit. clang::Module *GlobalModuleFragment = nullptr; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } /// Helper function to judge if we are in module purview. /// Return false if we are not in a module. bool isCurrentModulePurview() const { return getCurrentModule() ? getCurrentModule()->isModulePurview() : false; } /// Enter the scope of the global module. Module *PushGlobalModuleFragment(SourceLocation BeginLoc, bool IsImplicit); /// Leave the scope of the global module. void PopGlobalModuleFragment(); VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); // When loading a non-modular PCH files, this is used to restore module // visibility. void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) { VisibleModules.setVisible(Mod, ImportLoc); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } /// Get the type of expression E, triggering instantiation to complete the /// type if necessary -- that is, if the expression refers to a templated /// static data member of incomplete array type. /// /// May still return an incomplete type if instantiation was not possible or /// if the type is incomplete for a different reason. Use /// RequireCompleteExprType instead if a diagnostic is expected for an /// incomplete expression type. QualType getCompletedType(Expr *E); void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); // Returns the underlying type of a decltype with the given expression. QualType getDecltypeForExpr(Expr *E); QualType BuildTypeofExprType(Expr *E); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as an overload set, and an expression /// representing that overload set has been formed. /// ActOnNameClassifiedAsOverloadSet should be called to form a suitable /// expression referencing the overload set. NC_OverloadSet, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification OverloadSet(ExprResult E) { NameClassification Result(NC_OverloadSet); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_OverloadSet); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Act on the result of classifying a name as an overload set. ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); void warnOnReservedIdentifier(const NamedDecl *D); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo, QualType &T, SourceLocation Loc, unsigned FailedFoldDiagID); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const BindingDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); ExprResult ActOnRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// An enumeration to represent the transition of states in parsing module /// fragments and imports. If we are not parsing a C++20 TU, or we find /// an error in state transition, the state is set to NotACXX20Module. enum class ModuleImportState { FirstDecl, ///< Parsing the first decl in a TU. GlobalFragment, ///< after 'module;' but before 'module X;' ImportAllowed, ///< after 'module X;' but before any non-import decl. ImportFinished, ///< after any non-import decl. PrivateFragment, ///< after 'module :private;'. NotACXX20Module ///< Not a C++20 TU, or an invalid state was found. }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, ModuleIdPath Partition, ModuleImportState &ImportState); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module toplevel name as an access path. /// \param Partition The module partition name as an access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path, ModuleIdPath Partition = {}); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, bool IsAbstract, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, /// Merge availability attributes for an implementation of /// an optional protocol requirement. AMK_OptionalProtocolImplementation }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); ErrorAttr *mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI, StringRef NewUserDiagnostic); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA, StringRef Name); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL); EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D, const EnforceTCBLeafAttr &AL); BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(QualType Param, QualType Arg); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool IsStringInit(Expr *Init, const ArrayType *AT); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_ArrayBound, ///< Array bound in array declarator or new-expression. CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier. CCEK_Noexcept ///< Condition in a noexcept(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE, NamedDecl *Dest = nullptr); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); void AddOverloadedCallCandidates( LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass, NestedNameSpecifierLoc NNSLoc, DeclarationNameInfo DNI, const UnresolvedSetImpl &Fns, bool PerformADL = true); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base, MultiExprArg Args); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplatePack, }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id, bool IsUDSuffix); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing, StringLiteral *StringLit = nullptr); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); bool CheckRedeclarationExported(NamedDecl *New, NamedDecl *Old); bool CheckRedeclarationInModule(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID, SourceLocation Loc); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Handles semantic checking for features that are common to all attributes, /// such as checking whether a parameter was properly specified, or the /// correct number of arguments were passed, etc. Returns true if the /// attribute has been diagnosed. bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A, bool SkipArgCountCheck = false); bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A, bool SkipArgCountCheck = false); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const AttributeCommonInfo &CI, const Expr *E, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); llvm::Error isValidSectionSpecifier(StringRef Str); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkTargetClonesAttrString(SourceLocation LiteralLoc, StringRef Str, const StringLiteral *Literal, bool &HasDefault, bool &HasCommas, SmallVectorImpl<StringRef> &Strings); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Process the attributes before creating an attributed statement. Returns /// the semantic attributes that have been processed. void ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesWithRange &InAttrs, SmallVectorImpl<const Attr *> &OutAttrs); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnAfterCompoundStatementLeadingPragmas(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult BuildAttributedStmt(SourceLocation AttrsLoc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); struct NamedReturnInfo { const VarDecl *Candidate; enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable }; Status S; bool isMoveEligible() const { return S != None; }; bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; } }; enum class SimplerImplicitMoveMode { ForceOff, Normal, ForceOn }; NamedReturnInfo getNamedReturnInfo( Expr *&E, SimplerImplicitMoveMode Mode = SimplerImplicitMoveMode::Normal); NamedReturnInfo getNamedReturnInfo(const VarDecl *VD); const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info, QualType ReturnType); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const NamedReturnInfo &NRInfo, Expr *Value, bool SupressSimplerImplicitMoves = false); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, bool AllowRecovery = false); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, NamedReturnInfo &NRInfo, bool SupressSimplerImplicitMoves); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// If VD is set but not otherwise used, diagnose, for a parameter or a /// variable. void DiagnoseUnusedButSetDecl(const VarDecl *VD); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); TypeSourceInfo *TransformToPotentiallyEvaluated(TypeSourceInfo *TInfo); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false, ArrayRef<const Expr *> StopAt = None); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the statements's reachability /// analysis. /// /// \param Stmts If Stmts is non-empty, delay reporting the diagnostic until /// the function body is parsed, and then do a basic reachability analysis to /// determine if the statement is reachable. If it is unreachable, the /// diagnostic will not be emitted. bool DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts, const PartialDiagnostic &PD); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseDependentMemberLookup(LookupResult &R); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE = nullptr); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, TypeSourceInfo *TSI); ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, ParsedType ParsedTy); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, MultiExprArg ArgExprs, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id, MultiExprArg CallArgs); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, UnresolvedSetImpl &Functions); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); NamespaceDecl *getCachedCoroNamespace() { return CoroTraitsNamespaceCache; } CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void FilterUsingLookup(Scope *S, LookupResult &lookup); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc, const LookupResult *R = nullptr, const UsingDecl *UD = nullptr); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation, bool IsUsingIfExists); NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, SourceLocation NameLoc, EnumDecl *ED); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, const DeclSpec &); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, QualType DeclInitType, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr *> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); // Checks that the vector type should be initialized from a scalar // by splatting the value rather than populating a single element. // This is the case for AltiVecVector types as well as with // AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified. bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy); // Checks if the -faltivec-src-compat=gcc option is specified. // If so, AltiVecVector, AltiVecBool and AltiVecPixel types are // treated the same way as they are when trying to initialize // these vectors on gcc (an error is emitted). bool CheckAltivecInitFromScalar(SourceRange R, QualType VecTy, QualType SrcTy); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee, SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); // Complete an enum decl, maybe without a scope spec. bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L, CXXScopeSpec *SS = nullptr); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc, ExprResult RequiresClause); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType, CallingConv CC); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occurred and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occurred and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, const SourceRange &, DeclAccessPair FoundDecl); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, ArrayRef<Expr *> ArgExprs, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. static NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool BuildTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc, bool AllowUnexpandedPack); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool RequireStructuralType(QualType T, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); /// Get the specialization of the given variable template corresponding to /// the specified argument list, or a null-but-valid result if the arguments /// are dependent. DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); /// Form a reference to the specialization of the given variable template /// corresponding to the specified argument list, or a null-but-valid result /// if the arguments are dependent. ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occurred, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression. UPPC_Block, /// A type constraint. UPPC_TypeConstraint, // A requirement in a requires-expression. UPPC_Requirement, // A requires-clause. UPPC_RequiresClause, }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given requirees-expression contains an unexpanded reference to one /// of its own parameter packs, diagnose the error. /// /// \param RE The requiress-expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); // Substitute auto in TypeWithAuto for a Dependent auto type QualType SubstAutoTypeDependent(QualType TypeWithAuto); // Substitute auto in TypeWithAuto for a Dependent auto type TypeSourceInfo * SubstAutoTypeSourceInfoDependent(TypeSourceInfo *TypeWithAuto); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } bool isImmediateFunctionContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isImmediateFunctionContext(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaAlignPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaAlignPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, NamedDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } void ActOnPragmaFPEvalMethod(SourceLocation Loc, LangOptions::FPEvalMethodKind Value); /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called on well formed '\#pragma clang fp' that has option 'exceptions'. void ActOnPragmaFPExceptions(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// Called to set constant rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D. void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Annot, MutableArrayRef<Expr *> Args); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); /// Lookup 'coroutine_traits' in std namespace and std::experimental /// namespace. The namespace found is recorded in Namespace. ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc, NamespaceDecl *&Namespace); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; struct DeclareTargetContextInfo { struct MapInfo { OMPDeclareTargetDeclAttr::MapTypeTy MT; SourceLocation Loc; }; /// Explicitly listed variables and functions in a 'to' or 'link' clause. llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped; /// The 'device_type' as parsed from the clause. OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any; /// The directive kind, `begin declare target` or `declare target`. OpenMPDirectiveKind Kind; /// The directive with indirect clause. Optional<Expr *> Indirect; /// The directive location. SourceLocation Loc; DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc) : Kind(Kind), Loc(Loc) {} }; /// Number of nested '#pragma omp declare target' directives. SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true, bool SuppressExprDiags = false); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Analyzes and checks a loop nest for use by a loop transformation. /// /// \param Kind The loop transformation directive kind. /// \param NumLoops How many nested loops the directive is expecting. /// \param AStmt Associated statement of the transformation directive. /// \param LoopHelpers [out] The loop analysis result. /// \param Body [out] The body code nested in \p NumLoops loop. /// \param OriginalInits [out] Collection of statements and declarations that /// must have been executed/declared before entering the /// loop. /// /// \return Whether there was any error. bool checkTransformableLoopNest( OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers, Stmt *&Body, SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>> &OriginalInits); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// Return the OMPTraitInfo for the surrounding scope, if any. OMPTraitInfo *getOMPTraitInfoForSurroundingScope() { return OMPDeclareVariantScopes.empty() ? nullptr : OMPDeclareVariantScopes.back().TI; } /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The current `omp begin/end assumes` scopes. SmallVector<AssumptionAttr *, 4> OMPAssumeScoped; /// All `omp assumes` we encountered so far. SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal; public: /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. Return all base functions in \p Bases. void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, SmallVectorImpl<FunctionDecl *> &Bases); /// Register \p D as specialization of all base functions in \p Bases in the /// current `omp begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( Decl *D, SmallVectorImpl<FunctionDecl *> &Bases); /// Act on \p D, a function definition inside of an `omp [begin/end] assumes`. void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D); /// Can we exit an OpenMP declare variant scope at the moment. bool isInOpenMPDeclareVariantScope() const { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); /// Called on well-formed '\#pragma omp metadirective' after parsing /// of the associated statement. StmtResult ActOnOpenMPMetaDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp [begin] assume[s]'. void ActOnOpenMPAssumesDirective(SourceLocation Loc, OpenMPDirectiveKind DKind, ArrayRef<std::string> Assumptions, bool SkippedClauses); /// Check if there is an active global `omp begin assumes` directive. bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); } /// Check if there is an active global `omp assumes` directive. bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); } /// Called on well-formed '#pragma omp end assumes'. void ActOnOpenMPEndAssumesDirective(); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; const ValueDecl *getOpenMPDeclareMapperVarName() const; /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Called at the end of target region i.e. '#pragma omp end declare target'. const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective(); /// Called once a target context is completed, that can be when a /// '#pragma omp end declare target' was encountered or when a /// '#pragma omp declare target' without declaration-definition-seq was /// encountered. void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, DeclareTargetContextInfo &DTCI); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true if currently in OpenMP task with untied clause context. bool isInOpenMPTaskUntiedContext() const; /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return !DeclareTargetNesting.empty(); } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to /// an OpenMP loop directive. StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt); /// Process a canonical OpenMP loop nest that can either be a canonical /// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an /// OpenMP loop transformation construct. StmtResult ActOnOpenMPLoopnest(Stmt *AStmt); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '#pragma omp tile' after parsing of its clauses and /// the associated statement. StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '#pragma omp unroll' after parsing of its clauses /// and the associated statement. StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp interop'. StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp dispatch' after parsing of the // /associated statement. StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp masked' after parsing of the // /associated statement. StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp loop' after parsing of the /// associated statement. StmtResult ActOnOpenMPGenericLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \param NumAppendArgs The number of omp_interop_t arguments to account for /// in checking. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, unsigned NumAppendArgs, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. /// \param AdjustArgsNothing The list of 'nothing' arguments. /// \param AdjustArgsNeedDevicePtr The list of 'need_device_ptr' arguments. /// \param AppendArgs The list of 'append_args' arguments. /// \param AdjustArgsLoc The Location of an 'adjust_args' clause. /// \param AppendArgsLoc The Location of an 'append_args' clause. /// \param SR The SourceRange of the 'declare variant' directive. void ActOnOpenMPDeclareVariantDirective( FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, ArrayRef<Expr *> AdjustArgsNothing, ArrayRef<Expr *> AdjustArgsNeedDevicePtr, ArrayRef<OMPDeclareVariantAttr::InteropType> AppendArgs, SourceLocation AdjustArgsLoc, SourceLocation AppendArgsLoc, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'align' clause. OMPClause *ActOnOpenMPAlignClause(Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'sizes' clause. OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'full' clauses. OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-form 'partial' clauses. OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'when' clause. OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'compare' clause. OMPClause *ActOnOpenMPCompareClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'init' clause. OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'use' clause. OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'novariants' clause. OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'nocontext' clause. OMPClause *ActOnOpenMPNocontextClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'filter' clause. OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause *ActOnOpenMPMapClause( ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, bool NoDiagnose = false, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause * ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Called on a well-formed 'bind' clause. OMPClause *ActOnOpenMPBindClause(OpenMPBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_PRValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. In the success case, /// the statement is rewritten to remove implicit nodes from the return /// value. bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA); private: /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. bool checkMustTailAttr(const Stmt *St, const Attr &MTA); public: /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool isValidSveBitcast(QualType srcType, QualType destType); bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy); bool areVectorTypesSameSize(QualType srcType, QualType destType); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckMatrixCast - Check type constraints for matrix casts. // We allow casting between matrixes of the same dimensions i.e. when they // have the same number of rows and column. Returns true if the cast is // invalid. bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy, CastKind &Kind); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; QualType PreferredConditionType(ConditionKind K) const { return K == ConditionKind::Switch ? Context.IntTy : Context.BoolTy; } ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK, bool MissingOK = false); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T); virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) = 0; virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc); virtual ~VerifyICEDiagnoser() {} }; enum AllowFoldKind { NoFold, AllowFold, }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, AllowFoldKind CanFold = NoFold) { return VerifyIntegerConstantExpression(E, nullptr, CanFold); } /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics /// unless \p EmitOnBothSides is true. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD = nullptr); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, const PartialDiagnostic &PD, FunctionDecl *FD = nullptr) { return targetDiag(Loc, PD.getDiagID(), FD) << PD; } /// Check if the type is allowed to be used for the current target. void checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); enum CUDAVariableTarget { CVT_Device, /// Emitted on device side with a shadow variable on host side CVT_Host, /// Emitted on host side only CVT_Both, /// Emitted on both sides with different addresses CVT_Unified, /// Emitted as a unified address, e.g. managed variables }; /// Determines whether the given variable is emitted on host or device side. CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); enum class AttributeCompletion { Attribute, Scope, None, }; void CodeCompleteAttribute( AttributeCommonInfo::Syntax Syntax, AttributeCompletion Completion = AttributeCompletion::Attribute, const IdentifierInfo *Scope = nullptr); /// Determines the preferred type of the current function argument, by /// examining the signatures of all possible overloads. /// Returns null if unknown or ambiguous, or if code completion is off. /// /// If the code completion point has been reached, also reports the function /// signatures that were considered. /// /// FIXME: rename to GuessCallArgumentType to reduce confusion. QualType ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc, bool Braced); QualType ProduceCtorInitMemberSignatureHelp( Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc, bool Braced); QualType ProduceTemplateArgumentSignatureHelp( TemplateTy, ArrayRef<ParsedTemplateArgument>, SourceLocation LAngleLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, StringRef ParamName, QualType ArgTy, QualType ParamTy); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum); bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinComplex(CallExpr *TheCall); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinArithmeticFence(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, const char *TypeDesc); bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc); bool SemaBuiltinElementwiseMath(CallExpr *TheCall); bool PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall); bool PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckFreeArguments(const CallExpr *E); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Nullable_result = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); void deepTypeCheckForSYCLDevice(SourceLocation UsedAt, llvm::DenseSet<QualType> Visited, ValueDecl *DeclToCheck); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; template <> void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, AlignPackInfo Value); } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getHashValue()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
integrator.h
#ifndef _INTEGRATOR_H #define _INTEGRATOR_H #include <omp.h> #include <optional> #include "camera.h" #include "core.h" #include "image.h" #include "photon_map.h" #include "scene.h" class Integrator { protected: const std::shared_ptr<Camera> camera; public: Integrator(const std::shared_ptr<Camera>& camera) : camera(camera) {} // render scene virtual void render(const Scene& scene, Sampler& sampler, Image& image) = 0; // compute cosine term // NOTE: need to account for the asymmetry of BSDF when photon tracing // https://pbr-book.org/3ed-2018/Light_Transport_III_Bidirectional_Methods/The_Path-Space_Measurement_Equation#x3-Non-symmetryDuetoShadingNormals // Veach, Eric. Robust Monte Carlo methods for light transport simulation. // Stanford University, 1998. Section 5.3 static float cosTerm(const Vec3f& wo, const Vec3f& wi, const SurfaceInfo& surfaceInfo, const TransportDirection& transport_dir) { const float wi_ns = dot(wi, surfaceInfo.shadingNormal); const float wi_ng = dot(wi, surfaceInfo.geometricNormal); const float wo_ns = dot(wo, surfaceInfo.shadingNormal); const float wo_ng = dot(wo, surfaceInfo.geometricNormal); // prevent light leaks if (wi_ng * wi_ns <= 0 || wo_ng * wo_ns <= 0) { return 0; } if (transport_dir == TransportDirection::FROM_CAMERA) { return std::abs(wi_ns); } else if (transport_dir == TransportDirection::FROM_LIGHT) { return std::abs(wo_ns) * std::abs(wi_ng) / std::abs(wo_ng); } else { spdlog::error("[Integrator] invalid transport direction"); std::exit(EXIT_FAILURE); } } }; // abstraction of path based integrator class PathIntegrator : public Integrator { private: // number of samples in each pixel const uint32_t n_samples; public: // compute radiance coming from the given ray virtual Vec3f integrate(const Ray& ray, const Scene& scene, Sampler& sampler) const = 0; PathIntegrator(const std::shared_ptr<Camera>& camera, uint32_t n_samples) : Integrator(camera), n_samples(n_samples) {} void render(const Scene& scene, Sampler& sampler, Image& image) override final { const uint32_t width = image.getWidth(); const uint32_t height = image.getHeight(); spdlog::info("[PathIntegrator] rendering..."); #pragma omp parallel for collapse(2) schedule(dynamic, 1) for (int i = 0; i < height; ++i) { for (int j = 0; j < width; ++j) { // init sampler for each pixel const std::unique_ptr<Sampler> sampler_per_pixel = sampler.clone(); sampler_per_pixel->setSeed((sampler.getSeed() + 1) * (j + width * i)); // warmup sampler for (uint32_t k = 0; k < 100; ++k) { sampler_per_pixel->getNext1D(); } // iteration for (uint32_t k = 0; k < n_samples; ++k) { // SSAA const float u = (2.0f * (j + sampler_per_pixel->getNext1D()) - width) / height; const float v = (2.0f * (i + sampler_per_pixel->getNext1D()) - height) / height; Ray ray; float pdf; if (camera->sampleRay(Vec2f(u, v), *sampler_per_pixel, ray, pdf)) { // compute incoming radiance const Vec3f radiance = integrate(ray, scene, *sampler_per_pixel) / pdf; // invalid radiance check if (std::isnan(radiance[0]) || std::isnan(radiance[1]) || std::isnan(radiance[2])) { spdlog::error("[PathIntegrator] radiance is NaN"); continue; } else if (std::isinf(radiance[0]) || std::isinf(radiance[1]) || std::isinf(radiance[2])) { spdlog::error("[PathIntegrator] radiance is inf"); continue; } else if (radiance[0] < 0 || radiance[1] < 0 || radiance[2] < 0) { spdlog::error("[PathIntegrator] radiance is minus"); continue; } image.addPixel(i, j, radiance); } else { image.setPixel(i, j, Vec3f(0)); } } } } spdlog::info("[PathIntegrator] done"); // take average image /= Vec3f(n_samples); } }; // implementation of path tracing // NOTE: for reference purpose class PathTracing : public PathIntegrator { private: const uint32_t maxDepth; public: PathTracing(const std::shared_ptr<Camera>& camera, uint32_t n_samples, uint32_t maxDepth = 100) : PathIntegrator(camera, n_samples), maxDepth(maxDepth) {} Vec3f integrate(const Ray& ray_in, const Scene& scene, Sampler& sampler) const override { Vec3f radiance(0); Ray ray = ray_in; Vec3f throughput(1, 1, 1); for (uint32_t k = 0; k < maxDepth; ++k) { IntersectInfo info; if (scene.intersect(ray, info)) { // russian roulette if (k > 0) { const float russian_roulette_prob = std::min( std::max(throughput[0], std::max(throughput[1], throughput[2])), 1.0f); if (sampler.getNext1D() >= russian_roulette_prob) { break; } throughput /= russian_roulette_prob; } // Le if (info.hitPrimitive->hasAreaLight()) { radiance += throughput * info.hitPrimitive->Le(info.surfaceInfo, -ray.direction); } // sample direction by BxDF Vec3f dir; float pdf_dir; Vec3f f = info.hitPrimitive->sampleBxDF( -ray.direction, info.surfaceInfo, TransportDirection::FROM_CAMERA, sampler, dir, pdf_dir); // update throughput and ray throughput *= f * cosTerm(-ray.direction, dir, info.surfaceInfo, TransportDirection::FROM_CAMERA) / pdf_dir; ray = Ray(info.surfaceInfo.position, dir); } else { break; } } return radiance; } }; // this implementation is based on modified version of original SPPM // Knaus, Claude, and Matthias Zwicker. // "Progressive photon mapping: A probabilistic approach." ACM Transactions on // Graphics (TOG) 30.3 (2011): 1-13. class PPMAPA : public Integrator { private: // number of iterations const uint32_t nIterations; // number of photons in each iteration const uint32_t nPhotons; // parameter for radius reduction, see the paper const float alpha; // maximum tracing depth const uint32_t maxDepth; // number of emitted photons uint32_t nEmittedPhotons; // global search radius for radiance estimation float globalRadius; PhotonMap photonMap; // compute reflected radiance with photon map Vec3f computeRadianceWithPhotonMap(const Vec3f& wo, const IntersectInfo& info) const { // get nearby photons const std::vector<int> photon_indices = photonMap.queryPhotonsInRange(info.surfaceInfo.position, globalRadius); Vec3f Lo; for (const int photon_idx : photon_indices) { const Photon& photon = photonMap.getIthPhoton(photon_idx); const Vec3f f = info.hitPrimitive->evaluateBxDF( wo, photon.wi, info.surfaceInfo, TransportDirection::FROM_CAMERA); Lo += f * photon.throughput; } Lo /= (nPhotons * PI * globalRadius * globalRadius); return Lo; } // sample initial ray from light and compute initial throughput Ray sampleRayFromLight(const Scene& scene, Sampler& sampler, Vec3f& throughput) { // sample light float light_choose_pdf; const std::shared_ptr<Light> light = scene.sampleLight(sampler, light_choose_pdf); // sample point on light float light_pos_pdf; const SurfaceInfo light_surf = light->samplePoint(sampler, light_pos_pdf); // sample direction on light float light_dir_pdf; const Vec3f dir = light->sampleDirection(light_surf, sampler, light_dir_pdf); // spawn ray Ray ray(light_surf.position, dir); throughput = light->Le(light_surf, dir) / (light_choose_pdf * light_pos_pdf * light_dir_pdf) * std::abs(dot(dir, light_surf.shadingNormal)); return ray; } // photon tracing and build photon map void buildPhotonMap(const Scene& scene, std::vector<std::unique_ptr<Sampler>>& samplers) { // photon tracing std::vector<Photon> photons; // spdlog::info("[PPMAPA] tracing photons..."); #pragma omp parallel for for (uint32_t i = 0; i < nPhotons; ++i) { auto& sampler_per_thread = *samplers[omp_get_thread_num()]; // sample initial ray from light and set initial throughput Vec3f throughput; Ray ray = sampleRayFromLight(scene, sampler_per_thread, throughput); // trace photons // whener hitting diffuse surface, add photon to the photon array // recursively tracing photon with russian roulette for (uint32_t k = 0; k < maxDepth; ++k) { if (std::isnan(throughput[0]) || std::isnan(throughput[1]) || std::isnan(throughput[2])) { spdlog::error("[PPMAPA] photon throughput is NaN"); break; } else if (throughput[0] < 0 || throughput[1] < 0 || throughput[2] < 0) { spdlog::error("[PPMAPA] photon throughput is minus"); break; } IntersectInfo info; if (scene.intersect(ray, info)) { const BxDFType bxdf_type = info.hitPrimitive->getBxDFType(); if (bxdf_type == BxDFType::DIFFUSE) { // TODO: remove lock to get more speed #pragma omp critical { photons.emplace_back(throughput, info.surfaceInfo.position, -ray.direction); } } // russian roulette if (k > 0) { const float russian_roulette_prob = std::min( std::max(throughput[0], std::max(throughput[1], throughput[2])), 1.0f); if (sampler_per_thread.getNext1D() >= russian_roulette_prob) { break; } throughput /= russian_roulette_prob; } // sample direction by BxDF Vec3f dir; float pdf_dir; const Vec3f f = info.hitPrimitive->sampleBxDF( -ray.direction, info.surfaceInfo, TransportDirection::FROM_LIGHT, sampler_per_thread, dir, pdf_dir); // update throughput and ray throughput *= f * cosTerm(-ray.direction, dir, info.surfaceInfo, TransportDirection::FROM_LIGHT) / pdf_dir; ray = Ray(info.surfaceInfo.position, dir); } else { // photon goes to the sky break; } } } // spdlog::info("[PPMAPA] done"); // build photon map // spdlog::info("[PPMAPA] building photon map..."); photonMap.setPhotons(photons); photonMap.build(); // spdlog::info("[PPMAPA] done"); } // compute incoming radiance with photon map Vec3f integrate(const Ray& ray_in, const Scene& scene, Sampler& sampler) const { Ray ray = ray_in; Vec3f throughput(1, 1, 1); for (uint32_t k = 0; k < maxDepth; ++k) { IntersectInfo info; if (scene.intersect(ray, info)) { // when directly hitting light if (info.hitPrimitive->hasAreaLight()) { return throughput * info.hitPrimitive->Le(info.surfaceInfo, -ray.direction); } const BxDFType bxdf_type = info.hitPrimitive->getBxDFType(); // if hitting diffuse surface, compute reflected radiance with photon // map if (bxdf_type == BxDFType::DIFFUSE) { return throughput * computeRadianceWithPhotonMap(-ray.direction, info); } // if hitting specular surface, generate next ray and continue tracing else if (bxdf_type == BxDFType::SPECULAR) { // sample direction by BxDF Vec3f dir; float pdf_dir; Vec3f f = info.hitPrimitive->sampleBxDF( -ray.direction, info.surfaceInfo, TransportDirection::FROM_CAMERA, sampler, dir, pdf_dir); // update throughput and ray throughput *= f * cosTerm(-ray.direction, dir, info.surfaceInfo, TransportDirection::FROM_CAMERA) / pdf_dir; ray = Ray(info.surfaceInfo.position, dir); } } else { // ray goes out the the sky break; } } return Vec3f(0); } public: PPMAPA(const std::shared_ptr<Camera>& camera, uint32_t nIterations, uint32_t nPhotons, float alpha, float initialRadius, uint32_t maxDepth = 100) : Integrator(camera), nIterations(nIterations), nPhotons(nPhotons), alpha(alpha), globalRadius(initialRadius), maxDepth(maxDepth), nEmittedPhotons(0) {} void render(const Scene& scene, Sampler& sampler, Image& image) override { // init sampler for each thread std::vector<std::unique_ptr<Sampler>> samplers(omp_get_max_threads()); for (int i = 0; i < samplers.size(); ++i) { samplers[i] = sampler.clone(); samplers[i]->setSeed(sampler.getSeed() * (i + 1)); // warpup sampler for (int j = 0; j < 10; ++j) { samplers[i]->getNext1D(); } } const uint32_t width = image.getWidth(); const uint32_t height = image.getHeight(); spdlog::info("[PPMAPA] rendering..."); for (uint32_t iteration = 0; iteration < nIterations; ++iteration) { spdlog::info("[PPMAPA] iteration: {}", iteration); spdlog::info("[PPMAPA] radius: {}", globalRadius); // clear previous photon map photonMap.clear(); // photon tracing and build photon map // spdlog::info("[PPMAPA] photon tracing pass..."); buildPhotonMap(scene, samplers); nEmittedPhotons += nPhotons; // spdlog::info("[PPMAPA] done"); // eye tracing // spdlog::info("[PPMAPA] eye tracing pass..."); #pragma omp parallel for collapse(2) schedule(dynamic, 1) for (uint32_t i = 0; i < height; ++i) { for (uint32_t j = 0; j < width; ++j) { auto& sampler_per_thread = *samplers[omp_get_thread_num()]; // SSAA const float u = (2.0f * (j + sampler_per_thread.getNext1D()) - width) / height; const float v = (2.0f * (i + sampler_per_thread.getNext1D()) - height) / height; Ray ray; float pdf; if (camera->sampleRay(Vec2f(u, v), sampler_per_thread, ray, pdf)) { // compute incoming radiance with photon map const Vec3f radiance = integrate(ray, scene, sampler_per_thread) / pdf; // invalid radiance check if (std::isnan(radiance[0]) || std::isnan(radiance[1]) || std::isnan(radiance[2])) { spdlog::error("[SPPM] radiance is NaN"); continue; } else if (std::isinf(radiance[0]) || std::isinf(radiance[1]) || std::isinf(radiance[2])) { spdlog::error("[SPPM] radiance is inf"); continue; } else if (radiance[0] < 0 || radiance[1] < 0 || radiance[2] < 0) { spdlog::error("[SPPM] radiance is minus"); continue; } // add contribution image.addPixel(i, j, radiance); } else { image.setPixel(i, j, Vec3f(0)); } } } // spdlog::info("[SPPM] done"); // update search radius globalRadius = std::sqrt((iteration + alpha) / (iteration + 1)) * globalRadius; // save image at each iteration // Image image_copied = image; // image_copied /= Vec3f(iteration + 1); // image_copied.gammaCorrection(2.2f); // image_copied.writePPM("iteration_" + std::to_string(iteration) + // ".ppm"); } // take average image /= Vec3f(nIterations); spdlog::info("[PPMAPA] done"); } }; #endif
#ifndef _INTEGRATOR_H #define _INTEGRATOR_H #include <omp.h> #include <optional> #include "camera.h" #include "core.h" #include "image.h" #include "photon_map.h" #include "scene.h" class Integrator { protected: const std::shared_ptr<Camera> camera; public: Integrator(const std::shared_ptr<Camera>& camera) : camera(camera) {} // render scene virtual void render(const Scene& scene, Sampler& sampler, Image& image) = 0; // compute cosine term // NOTE: need to account for the asymmetry of BSDF when photon tracing // https://pbr-book.org/3ed-2018/Light_Transport_III_Bidirectional_Methods/The_Path-Space_Measurement_Equation#x3-Non-symmetryDuetoShadingNormals // Veach, Eric. Robust Monte Carlo methods for light transport simulation. // Stanford University, 1998. Section 5.3 static float cosTerm(const Vec3f& wo, const Vec3f& wi, const SurfaceInfo& surfaceInfo, const TransportDirection& transport_dir) { const float wi_ns = dot(wi, surfaceInfo.shadingNormal); const float wi_ng = dot(wi, surfaceInfo.geometricNormal); const float wo_ns = dot(wo, surfaceInfo.shadingNormal); const float wo_ng = dot(wo, surfaceInfo.geometricNormal); // prevent light leaks if (wi_ng * wi_ns <= 0 || wo_ng * wo_ns <= 0) { return 0; } if (transport_dir == TransportDirection::FROM_CAMERA) { return std::abs(wi_ns); } else if (transport_dir == TransportDirection::FROM_LIGHT) { return std::abs(wo_ns) * std::abs(wi_ng) / std::abs(wo_ng); } else { spdlog::error("[Integrator] invalid transport direction"); std::exit(EXIT_FAILURE); } } }; // abstraction of path based integrator class PathIntegrator : public Integrator { private: // number of samples in each pixel const uint32_t n_samples; public: // compute radiance coming from the given ray virtual Vec3f integrate(const Ray& ray, const Scene& scene, Sampler& sampler) const = 0; PathIntegrator(const std::shared_ptr<Camera>& camera, uint32_t n_samples) : Integrator(camera), n_samples(n_samples) {} void render(const Scene& scene, Sampler& sampler, Image& image) override final { const uint32_t width = image.getWidth(); const uint32_t height = image.getHeight(); spdlog::info("[PathIntegrator] rendering..."); for (int i = 0; i < height; ++i) { for (int j = 0; j < width; ++j) { // init sampler for each pixel const std::unique_ptr<Sampler> sampler_per_pixel = sampler.clone(); sampler_per_pixel->setSeed((sampler.getSeed() + 1) * (j + width * i)); // warmup sampler for (uint32_t k = 0; k < 100; ++k) { sampler_per_pixel->getNext1D(); } // iteration for (uint32_t k = 0; k < n_samples; ++k) { // SSAA const float u = (2.0f * (j + sampler_per_pixel->getNext1D()) - width) / height; const float v = (2.0f * (i + sampler_per_pixel->getNext1D()) - height) / height; Ray ray; float pdf; if (camera->sampleRay(Vec2f(u, v), *sampler_per_pixel, ray, pdf)) { // compute incoming radiance const Vec3f radiance = integrate(ray, scene, *sampler_per_pixel) / pdf; // invalid radiance check if (std::isnan(radiance[0]) || std::isnan(radiance[1]) || std::isnan(radiance[2])) { spdlog::error("[PathIntegrator] radiance is NaN"); continue; } else if (std::isinf(radiance[0]) || std::isinf(radiance[1]) || std::isinf(radiance[2])) { spdlog::error("[PathIntegrator] radiance is inf"); continue; } else if (radiance[0] < 0 || radiance[1] < 0 || radiance[2] < 0) { spdlog::error("[PathIntegrator] radiance is minus"); continue; } image.addPixel(i, j, radiance); } else { image.setPixel(i, j, Vec3f(0)); } } } } spdlog::info("[PathIntegrator] done"); // take average image /= Vec3f(n_samples); } }; // implementation of path tracing // NOTE: for reference purpose class PathTracing : public PathIntegrator { private: const uint32_t maxDepth; public: PathTracing(const std::shared_ptr<Camera>& camera, uint32_t n_samples, uint32_t maxDepth = 100) : PathIntegrator(camera, n_samples), maxDepth(maxDepth) {} Vec3f integrate(const Ray& ray_in, const Scene& scene, Sampler& sampler) const override { Vec3f radiance(0); Ray ray = ray_in; Vec3f throughput(1, 1, 1); for (uint32_t k = 0; k < maxDepth; ++k) { IntersectInfo info; if (scene.intersect(ray, info)) { // russian roulette if (k > 0) { const float russian_roulette_prob = std::min( std::max(throughput[0], std::max(throughput[1], throughput[2])), 1.0f); if (sampler.getNext1D() >= russian_roulette_prob) { break; } throughput /= russian_roulette_prob; } // Le if (info.hitPrimitive->hasAreaLight()) { radiance += throughput * info.hitPrimitive->Le(info.surfaceInfo, -ray.direction); } // sample direction by BxDF Vec3f dir; float pdf_dir; Vec3f f = info.hitPrimitive->sampleBxDF( -ray.direction, info.surfaceInfo, TransportDirection::FROM_CAMERA, sampler, dir, pdf_dir); // update throughput and ray throughput *= f * cosTerm(-ray.direction, dir, info.surfaceInfo, TransportDirection::FROM_CAMERA) / pdf_dir; ray = Ray(info.surfaceInfo.position, dir); } else { break; } } return radiance; } }; // this implementation is based on modified version of original SPPM // Knaus, Claude, and Matthias Zwicker. // "Progressive photon mapping: A probabilistic approach." ACM Transactions on // Graphics (TOG) 30.3 (2011): 1-13. class PPMAPA : public Integrator { private: // number of iterations const uint32_t nIterations; // number of photons in each iteration const uint32_t nPhotons; // parameter for radius reduction, see the paper const float alpha; // maximum tracing depth const uint32_t maxDepth; // number of emitted photons uint32_t nEmittedPhotons; // global search radius for radiance estimation float globalRadius; PhotonMap photonMap; // compute reflected radiance with photon map Vec3f computeRadianceWithPhotonMap(const Vec3f& wo, const IntersectInfo& info) const { // get nearby photons const std::vector<int> photon_indices = photonMap.queryPhotonsInRange(info.surfaceInfo.position, globalRadius); Vec3f Lo; for (const int photon_idx : photon_indices) { const Photon& photon = photonMap.getIthPhoton(photon_idx); const Vec3f f = info.hitPrimitive->evaluateBxDF( wo, photon.wi, info.surfaceInfo, TransportDirection::FROM_CAMERA); Lo += f * photon.throughput; } Lo /= (nPhotons * PI * globalRadius * globalRadius); return Lo; } // sample initial ray from light and compute initial throughput Ray sampleRayFromLight(const Scene& scene, Sampler& sampler, Vec3f& throughput) { // sample light float light_choose_pdf; const std::shared_ptr<Light> light = scene.sampleLight(sampler, light_choose_pdf); // sample point on light float light_pos_pdf; const SurfaceInfo light_surf = light->samplePoint(sampler, light_pos_pdf); // sample direction on light float light_dir_pdf; const Vec3f dir = light->sampleDirection(light_surf, sampler, light_dir_pdf); // spawn ray Ray ray(light_surf.position, dir); throughput = light->Le(light_surf, dir) / (light_choose_pdf * light_pos_pdf * light_dir_pdf) * std::abs(dot(dir, light_surf.shadingNormal)); return ray; } // photon tracing and build photon map void buildPhotonMap(const Scene& scene, std::vector<std::unique_ptr<Sampler>>& samplers) { // photon tracing std::vector<Photon> photons; // spdlog::info("[PPMAPA] tracing photons..."); for (uint32_t i = 0; i < nPhotons; ++i) { auto& sampler_per_thread = *samplers[omp_get_thread_num()]; // sample initial ray from light and set initial throughput Vec3f throughput; Ray ray = sampleRayFromLight(scene, sampler_per_thread, throughput); // trace photons // whener hitting diffuse surface, add photon to the photon array // recursively tracing photon with russian roulette for (uint32_t k = 0; k < maxDepth; ++k) { if (std::isnan(throughput[0]) || std::isnan(throughput[1]) || std::isnan(throughput[2])) { spdlog::error("[PPMAPA] photon throughput is NaN"); break; } else if (throughput[0] < 0 || throughput[1] < 0 || throughput[2] < 0) { spdlog::error("[PPMAPA] photon throughput is minus"); break; } IntersectInfo info; if (scene.intersect(ray, info)) { const BxDFType bxdf_type = info.hitPrimitive->getBxDFType(); if (bxdf_type == BxDFType::DIFFUSE) { // TODO: remove lock to get more speed photons.emplace_back(throughput, info.surfaceInfo.position, -ray.direction); } // russian roulette if (k > 0) { const float russian_roulette_prob = std::min( std::max(throughput[0], std::max(throughput[1], throughput[2])), 1.0f); if (sampler_per_thread.getNext1D() >= russian_roulette_prob) { break; } throughput /= russian_roulette_prob; } // sample direction by BxDF Vec3f dir; float pdf_dir; const Vec3f f = info.hitPrimitive->sampleBxDF( -ray.direction, info.surfaceInfo, TransportDirection::FROM_LIGHT, sampler_per_thread, dir, pdf_dir); // update throughput and ray throughput *= f * cosTerm(-ray.direction, dir, info.surfaceInfo, TransportDirection::FROM_LIGHT) / pdf_dir; ray = Ray(info.surfaceInfo.position, dir); } else { // photon goes to the sky break; } } } // spdlog::info("[PPMAPA] done"); // build photon map // spdlog::info("[PPMAPA] building photon map..."); photonMap.setPhotons(photons); photonMap.build(); // spdlog::info("[PPMAPA] done"); } // compute incoming radiance with photon map Vec3f integrate(const Ray& ray_in, const Scene& scene, Sampler& sampler) const { Ray ray = ray_in; Vec3f throughput(1, 1, 1); for (uint32_t k = 0; k < maxDepth; ++k) { IntersectInfo info; if (scene.intersect(ray, info)) { // when directly hitting light if (info.hitPrimitive->hasAreaLight()) { return throughput * info.hitPrimitive->Le(info.surfaceInfo, -ray.direction); } const BxDFType bxdf_type = info.hitPrimitive->getBxDFType(); // if hitting diffuse surface, compute reflected radiance with photon // map if (bxdf_type == BxDFType::DIFFUSE) { return throughput * computeRadianceWithPhotonMap(-ray.direction, info); } // if hitting specular surface, generate next ray and continue tracing else if (bxdf_type == BxDFType::SPECULAR) { // sample direction by BxDF Vec3f dir; float pdf_dir; Vec3f f = info.hitPrimitive->sampleBxDF( -ray.direction, info.surfaceInfo, TransportDirection::FROM_CAMERA, sampler, dir, pdf_dir); // update throughput and ray throughput *= f * cosTerm(-ray.direction, dir, info.surfaceInfo, TransportDirection::FROM_CAMERA) / pdf_dir; ray = Ray(info.surfaceInfo.position, dir); } } else { // ray goes out the the sky break; } } return Vec3f(0); } public: PPMAPA(const std::shared_ptr<Camera>& camera, uint32_t nIterations, uint32_t nPhotons, float alpha, float initialRadius, uint32_t maxDepth = 100) : Integrator(camera), nIterations(nIterations), nPhotons(nPhotons), alpha(alpha), globalRadius(initialRadius), maxDepth(maxDepth), nEmittedPhotons(0) {} void render(const Scene& scene, Sampler& sampler, Image& image) override { // init sampler for each thread std::vector<std::unique_ptr<Sampler>> samplers(omp_get_max_threads()); for (int i = 0; i < samplers.size(); ++i) { samplers[i] = sampler.clone(); samplers[i]->setSeed(sampler.getSeed() * (i + 1)); // warpup sampler for (int j = 0; j < 10; ++j) { samplers[i]->getNext1D(); } } const uint32_t width = image.getWidth(); const uint32_t height = image.getHeight(); spdlog::info("[PPMAPA] rendering..."); for (uint32_t iteration = 0; iteration < nIterations; ++iteration) { spdlog::info("[PPMAPA] iteration: {}", iteration); spdlog::info("[PPMAPA] radius: {}", globalRadius); // clear previous photon map photonMap.clear(); // photon tracing and build photon map // spdlog::info("[PPMAPA] photon tracing pass..."); buildPhotonMap(scene, samplers); nEmittedPhotons += nPhotons; // spdlog::info("[PPMAPA] done"); // eye tracing // spdlog::info("[PPMAPA] eye tracing pass..."); for (uint32_t i = 0; i < height; ++i) { for (uint32_t j = 0; j < width; ++j) { auto& sampler_per_thread = *samplers[omp_get_thread_num()]; // SSAA const float u = (2.0f * (j + sampler_per_thread.getNext1D()) - width) / height; const float v = (2.0f * (i + sampler_per_thread.getNext1D()) - height) / height; Ray ray; float pdf; if (camera->sampleRay(Vec2f(u, v), sampler_per_thread, ray, pdf)) { // compute incoming radiance with photon map const Vec3f radiance = integrate(ray, scene, sampler_per_thread) / pdf; // invalid radiance check if (std::isnan(radiance[0]) || std::isnan(radiance[1]) || std::isnan(radiance[2])) { spdlog::error("[SPPM] radiance is NaN"); continue; } else if (std::isinf(radiance[0]) || std::isinf(radiance[1]) || std::isinf(radiance[2])) { spdlog::error("[SPPM] radiance is inf"); continue; } else if (radiance[0] < 0 || radiance[1] < 0 || radiance[2] < 0) { spdlog::error("[SPPM] radiance is minus"); continue; } // add contribution image.addPixel(i, j, radiance); } else { image.setPixel(i, j, Vec3f(0)); } } } // spdlog::info("[SPPM] done"); // update search radius globalRadius = std::sqrt((iteration + alpha) / (iteration + 1)) * globalRadius; // save image at each iteration // Image image_copied = image; // image_copied /= Vec3f(iteration + 1); // image_copied.gammaCorrection(2.2f); // image_copied.writePPM("iteration_" + std::to_string(iteration) + // ".ppm"); } // take average image /= Vec3f(nIterations); spdlog::info("[PPMAPA] done"); } }; #endif
#ifndef _INTEGRATOR_H #define _INTEGRATOR_H #include <omp.h> #include <optional> #include "camera.h" #include "core.h" #include "image.h" #include "photon_map.h" #include "scene.h" class Integrator { protected: const std::shared_ptr<Camera> camera; public: Integrator(const std::shared_ptr<Camera>& camera) : camera(camera) {} // render scene virtual void render(const Scene& scene, Sampler& sampler, Image& image) = 0; // compute cosine term // NOTE: need to account for the asymmetry of BSDF when photon tracing // https://pbr-book.org/3ed-2018/Light_Transport_III_Bidirectional_Methods/The_Path-Space_Measurement_Equation#x3-Non-symmetryDuetoShadingNormals // Veach, Eric. Robust Monte Carlo methods for light transport simulation. // Stanford University, 1998. Section 5.3 static float cosTerm(const Vec3f& wo, const Vec3f& wi, const SurfaceInfo& surfaceInfo, const TransportDirection& transport_dir) { const float wi_ns = dot(wi, surfaceInfo.shadingNormal); const float wi_ng = dot(wi, surfaceInfo.geometricNormal); const float wo_ns = dot(wo, surfaceInfo.shadingNormal); const float wo_ng = dot(wo, surfaceInfo.geometricNormal); // prevent light leaks if (wi_ng * wi_ns <= 0 || wo_ng * wo_ns <= 0) { return 0; } if (transport_dir == TransportDirection::FROM_CAMERA) { return std::abs(wi_ns); } else if (transport_dir == TransportDirection::FROM_LIGHT) { return std::abs(wo_ns) * std::abs(wi_ng) / std::abs(wo_ng); } else { spdlog::error("[Integrator] invalid transport direction"); std::exit(EXIT_FAILURE); } } }; // abstraction of path based integrator class PathIntegrator : public Integrator { private: // number of samples in each pixel const uint32_t n_samples; public: // compute radiance coming from the given ray virtual Vec3f integrate(const Ray& ray, const Scene& scene, Sampler& sampler) const = 0; PathIntegrator(const std::shared_ptr<Camera>& camera, uint32_t n_samples) : Integrator(camera), n_samples(n_samples) {} void render(const Scene& scene, Sampler& sampler, Image& image) override final { const uint32_t width = image.getWidth(); const uint32_t height = image.getHeight(); spdlog::info("[PathIntegrator] rendering..."); #pragma omp parallel for collapse(2) schedule(dynamic, 1) for (int i = 0; i < height; ++i) { for (int j = 0; j < width; ++j) { // init sampler for each pixel const std::unique_ptr<Sampler> sampler_per_pixel = sampler.clone(); sampler_per_pixel->setSeed((sampler.getSeed() + 1) * (j + width * i)); // warmup sampler for (uint32_t k = 0; k < 100; ++k) { sampler_per_pixel->getNext1D(); } // iteration for (uint32_t k = 0; k < n_samples; ++k) { // SSAA const float u = (2.0f * (j + sampler_per_pixel->getNext1D()) - width) / height; const float v = (2.0f * (i + sampler_per_pixel->getNext1D()) - height) / height; Ray ray; float pdf; if (camera->sampleRay(Vec2f(u, v), *sampler_per_pixel, ray, pdf)) { // compute incoming radiance const Vec3f radiance = integrate(ray, scene, *sampler_per_pixel) / pdf; // invalid radiance check if (std::isnan(radiance[0]) || std::isnan(radiance[1]) || std::isnan(radiance[2])) { spdlog::error("[PathIntegrator] radiance is NaN"); continue; } else if (std::isinf(radiance[0]) || std::isinf(radiance[1]) || std::isinf(radiance[2])) { spdlog::error("[PathIntegrator] radiance is inf"); continue; } else if (radiance[0] < 0 || radiance[1] < 0 || radiance[2] < 0) { spdlog::error("[PathIntegrator] radiance is minus"); continue; } image.addPixel(i, j, radiance); } else { image.setPixel(i, j, Vec3f(0)); } } } } spdlog::info("[PathIntegrator] done"); // take average image /= Vec3f(n_samples); } }; // implementation of path tracing // NOTE: for reference purpose class PathTracing : public PathIntegrator { private: const uint32_t maxDepth; public: PathTracing(const std::shared_ptr<Camera>& camera, uint32_t n_samples, uint32_t maxDepth = 100) : PathIntegrator(camera, n_samples), maxDepth(maxDepth) {} Vec3f integrate(const Ray& ray_in, const Scene& scene, Sampler& sampler) const override { Vec3f radiance(0); Ray ray = ray_in; Vec3f throughput(1, 1, 1); for (uint32_t k = 0; k < maxDepth; ++k) { IntersectInfo info; if (scene.intersect(ray, info)) { // russian roulette if (k > 0) { const float russian_roulette_prob = std::min( std::max(throughput[0], std::max(throughput[1], throughput[2])), 1.0f); if (sampler.getNext1D() >= russian_roulette_prob) { break; } throughput /= russian_roulette_prob; } // Le if (info.hitPrimitive->hasAreaLight()) { radiance += throughput * info.hitPrimitive->Le(info.surfaceInfo, -ray.direction); } // sample direction by BxDF Vec3f dir; float pdf_dir; Vec3f f = info.hitPrimitive->sampleBxDF( -ray.direction, info.surfaceInfo, TransportDirection::FROM_CAMERA, sampler, dir, pdf_dir); // update throughput and ray throughput *= f * cosTerm(-ray.direction, dir, info.surfaceInfo, TransportDirection::FROM_CAMERA) / pdf_dir; ray = Ray(info.surfaceInfo.position, dir); } else { break; } } return radiance; } }; // this implementation is based on modified version of original SPPM // Knaus, Claude, and Matthias Zwicker. // "Progressive photon mapping: A probabilistic approach." ACM Transactions on // Graphics (TOG) 30.3 (2011): 1-13. class PPMAPA : public Integrator { private: // number of iterations const uint32_t nIterations; // number of photons in each iteration const uint32_t nPhotons; // parameter for radius reduction, see the paper const float alpha; // maximum tracing depth const uint32_t maxDepth; // number of emitted photons uint32_t nEmittedPhotons; // global search radius for radiance estimation float globalRadius; PhotonMap photonMap; // compute reflected radiance with photon map Vec3f computeRadianceWithPhotonMap(const Vec3f& wo, const IntersectInfo& info) const { // get nearby photons const std::vector<int> photon_indices = photonMap.queryPhotonsInRange(info.surfaceInfo.position, globalRadius); Vec3f Lo; for (const int photon_idx : photon_indices) { const Photon& photon = photonMap.getIthPhoton(photon_idx); const Vec3f f = info.hitPrimitive->evaluateBxDF( wo, photon.wi, info.surfaceInfo, TransportDirection::FROM_CAMERA); Lo += f * photon.throughput; } Lo /= (nPhotons * PI * globalRadius * globalRadius); return Lo; } // sample initial ray from light and compute initial throughput Ray sampleRayFromLight(const Scene& scene, Sampler& sampler, Vec3f& throughput) { // sample light float light_choose_pdf; const std::shared_ptr<Light> light = scene.sampleLight(sampler, light_choose_pdf); // sample point on light float light_pos_pdf; const SurfaceInfo light_surf = light->samplePoint(sampler, light_pos_pdf); // sample direction on light float light_dir_pdf; const Vec3f dir = light->sampleDirection(light_surf, sampler, light_dir_pdf); // spawn ray Ray ray(light_surf.position, dir); throughput = light->Le(light_surf, dir) / (light_choose_pdf * light_pos_pdf * light_dir_pdf) * std::abs(dot(dir, light_surf.shadingNormal)); return ray; } // photon tracing and build photon map void buildPhotonMap(const Scene& scene, std::vector<std::unique_ptr<Sampler>>& samplers) { // photon tracing std::vector<Photon> photons; // spdlog::info("[PPMAPA] tracing photons..."); #pragma omp parallel for for (uint32_t i = 0; i < nPhotons; ++i) { auto& sampler_per_thread = *samplers[omp_get_thread_num()]; // sample initial ray from light and set initial throughput Vec3f throughput; Ray ray = sampleRayFromLight(scene, sampler_per_thread, throughput); // trace photons // whener hitting diffuse surface, add photon to the photon array // recursively tracing photon with russian roulette for (uint32_t k = 0; k < maxDepth; ++k) { if (std::isnan(throughput[0]) || std::isnan(throughput[1]) || std::isnan(throughput[2])) { spdlog::error("[PPMAPA] photon throughput is NaN"); break; } else if (throughput[0] < 0 || throughput[1] < 0 || throughput[2] < 0) { spdlog::error("[PPMAPA] photon throughput is minus"); break; } IntersectInfo info; if (scene.intersect(ray, info)) { const BxDFType bxdf_type = info.hitPrimitive->getBxDFType(); if (bxdf_type == BxDFType::DIFFUSE) { // TODO: remove lock to get more speed #pragma omp critical { photons.emplace_back(throughput, info.surfaceInfo.position, -ray.direction); } } // russian roulette if (k > 0) { const float russian_roulette_prob = std::min( std::max(throughput[0], std::max(throughput[1], throughput[2])), 1.0f); if (sampler_per_thread.getNext1D() >= russian_roulette_prob) { break; } throughput /= russian_roulette_prob; } // sample direction by BxDF Vec3f dir; float pdf_dir; const Vec3f f = info.hitPrimitive->sampleBxDF( -ray.direction, info.surfaceInfo, TransportDirection::FROM_LIGHT, sampler_per_thread, dir, pdf_dir); // update throughput and ray throughput *= f * cosTerm(-ray.direction, dir, info.surfaceInfo, TransportDirection::FROM_LIGHT) / pdf_dir; ray = Ray(info.surfaceInfo.position, dir); } else { // photon goes to the sky break; } } } // spdlog::info("[PPMAPA] done"); // build photon map // spdlog::info("[PPMAPA] building photon map..."); photonMap.setPhotons(photons); photonMap.build(); // spdlog::info("[PPMAPA] done"); } // compute incoming radiance with photon map Vec3f integrate(const Ray& ray_in, const Scene& scene, Sampler& sampler) const { Ray ray = ray_in; Vec3f throughput(1, 1, 1); for (uint32_t k = 0; k < maxDepth; ++k) { IntersectInfo info; if (scene.intersect(ray, info)) { // when directly hitting light if (info.hitPrimitive->hasAreaLight()) { return throughput * info.hitPrimitive->Le(info.surfaceInfo, -ray.direction); } const BxDFType bxdf_type = info.hitPrimitive->getBxDFType(); // if hitting diffuse surface, compute reflected radiance with photon // map if (bxdf_type == BxDFType::DIFFUSE) { return throughput * computeRadianceWithPhotonMap(-ray.direction, info); } // if hitting specular surface, generate next ray and continue tracing else if (bxdf_type == BxDFType::SPECULAR) { // sample direction by BxDF Vec3f dir; float pdf_dir; Vec3f f = info.hitPrimitive->sampleBxDF( -ray.direction, info.surfaceInfo, TransportDirection::FROM_CAMERA, sampler, dir, pdf_dir); // update throughput and ray throughput *= f * cosTerm(-ray.direction, dir, info.surfaceInfo, TransportDirection::FROM_CAMERA) / pdf_dir; ray = Ray(info.surfaceInfo.position, dir); } } else { // ray goes out the the sky break; } } return Vec3f(0); } public: PPMAPA(const std::shared_ptr<Camera>& camera, uint32_t nIterations, uint32_t nPhotons, float alpha, float initialRadius, uint32_t maxDepth = 100) : Integrator(camera), nIterations(nIterations), nPhotons(nPhotons), alpha(alpha), globalRadius(initialRadius), maxDepth(maxDepth), nEmittedPhotons(0) {} void render(const Scene& scene, Sampler& sampler, Image& image) override { // init sampler for each thread std::vector<std::unique_ptr<Sampler>> samplers(omp_get_max_threads()); for (int i = 0; i < samplers.size(); ++i) { samplers[i] = sampler.clone(); samplers[i]->setSeed(sampler.getSeed() * (i + 1)); // warpup sampler for (int j = 0; j < 10; ++j) { samplers[i]->getNext1D(); } } const uint32_t width = image.getWidth(); const uint32_t height = image.getHeight(); spdlog::info("[PPMAPA] rendering..."); for (uint32_t iteration = 0; iteration < nIterations; ++iteration) { spdlog::info("[PPMAPA] iteration: {}", iteration); spdlog::info("[PPMAPA] radius: {}", globalRadius); // clear previous photon map photonMap.clear(); // photon tracing and build photon map // spdlog::info("[PPMAPA] photon tracing pass..."); buildPhotonMap(scene, samplers); nEmittedPhotons += nPhotons; // spdlog::info("[PPMAPA] done"); // eye tracing // spdlog::info("[PPMAPA] eye tracing pass..."); #pragma omp parallel for collapse(2) schedule(dynamic, 1) for (uint32_t i = 0; i < height; ++i) { for (uint32_t j = 0; j < width; ++j) { auto& sampler_per_thread = *samplers[omp_get_thread_num()]; // SSAA const float u = (2.0f * (j + sampler_per_thread.getNext1D()) - width) / height; const float v = (2.0f * (i + sampler_per_thread.getNext1D()) - height) / height; Ray ray; float pdf; if (camera->sampleRay(Vec2f(u, v), sampler_per_thread, ray, pdf)) { // compute incoming radiance with photon map const Vec3f radiance = integrate(ray, scene, sampler_per_thread) / pdf; // invalid radiance check if (std::isnan(radiance[0]) || std::isnan(radiance[1]) || std::isnan(radiance[2])) { spdlog::error("[SPPM] radiance is NaN"); continue; } else if (std::isinf(radiance[0]) || std::isinf(radiance[1]) || std::isinf(radiance[2])) { spdlog::error("[SPPM] radiance is inf"); continue; } else if (radiance[0] < 0 || radiance[1] < 0 || radiance[2] < 0) { spdlog::error("[SPPM] radiance is minus"); continue; } // add contribution image.addPixel(i, j, radiance); } else { image.setPixel(i, j, Vec3f(0)); } } } // spdlog::info("[SPPM] done"); // update search radius globalRadius = std::sqrt((iteration + alpha) / (iteration + 1)) * globalRadius; // save image at each iteration // Image image_copied = image; // image_copied /= Vec3f(iteration + 1); // image_copied.gammaCorrection(2.2f); // image_copied.writePPM("iteration_" + std::to_string(iteration) + // ".ppm"); } // take average image /= Vec3f(nIterations); spdlog::info("[PPMAPA] done"); } }; #endif
kmp_stats.h
#ifndef KMP_STATS_H #define KMP_STATS_H /** @file kmp_stats.h * Functions for collecting statistics. */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "kmp_config.h" #include "kmp_debug.h" #if KMP_STATS_ENABLED /* Statistics accumulator. Accumulates number of samples and computes min, max, mean, standard deviation on the fly. Online variance calculation algorithm from http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#On-line_algorithm */ #include "kmp_stats_timing.h" #include <limits> #include <math.h> #include <new> // placement new #include <stdint.h> #include <string> #include <vector> /* Enable developer statistics here if you want them. They are more detailed than is useful for application characterisation and are intended for the runtime library developer. */ #define KMP_DEVELOPER_STATS 0 /* Enable/Disable histogram output */ #define KMP_STATS_HIST 0 /*! * @ingroup STATS_GATHERING * \brief flags to describe the statistic (timer or counter) * */ enum stats_flags_e { noTotal = 1 << 0, //!< do not show a TOTAL_aggregation for this statistic onlyInMaster = 1 << 1, //!< statistic is valid only for master noUnits = 1 << 2, //!< statistic doesn't need units printed next to it notInMaster = 1 << 3, //!< statistic is valid only for non-master threads logEvent = 1 << 4 //!< statistic can be logged on the event timeline when //! KMP_STATS_EVENTS is on (valid only for timers) }; /*! * @ingroup STATS_GATHERING * \brief the states which a thread can be in * */ enum stats_state_e { IDLE, SERIAL_REGION, FORK_JOIN_BARRIER, PLAIN_BARRIER, TASKWAIT, TASKYIELD, TASKGROUP, IMPLICIT_TASK, EXPLICIT_TASK, TEAMS_REGION }; /*! * \brief Add new counters under KMP_FOREACH_COUNTER() macro in kmp_stats.h * * @param macro a user defined macro that takes three arguments - * macro(COUNTER_NAME, flags, arg) * @param arg a user defined argument to send to the user defined macro * * \details A counter counts the occurrence of some event. Each thread * accumulates its own count, at the end of execution the counts are aggregated * treating each thread as a separate measurement. (Unless onlyInMaster is set, * in which case there's only a single measurement). The min,mean,max are * therefore the values for the threads. Adding the counter here and then * putting a KMP_BLOCK_COUNTER(name) at the point you want to count is all you * need to do. All of the tables and printing is generated from this macro. * Format is "macro(name, flags, arg)" * * @ingroup STATS_GATHERING */ // clang-format off #define KMP_FOREACH_COUNTER(macro, arg) \ macro(OMP_PARALLEL,stats_flags_e::onlyInMaster|stats_flags_e::noTotal,arg) \ macro(OMP_NESTED_PARALLEL, 0, arg) \ macro(OMP_LOOP_STATIC, 0, arg) \ macro(OMP_LOOP_STATIC_STEAL, 0, arg) \ macro(OMP_LOOP_DYNAMIC, 0, arg) \ macro(OMP_DISTRIBUTE, 0, arg) \ macro(OMP_BARRIER, 0, arg) \ macro(OMP_CRITICAL, 0, arg) \ macro(OMP_SINGLE, 0, arg) \ macro(OMP_MASTER, 0, arg) \ macro(OMP_TEAMS, 0, arg) \ macro(OMP_set_lock, 0, arg) \ macro(OMP_test_lock, 0, arg) \ macro(REDUCE_wait, 0, arg) \ macro(REDUCE_nowait, 0, arg) \ macro(OMP_TASKYIELD, 0, arg) \ macro(OMP_TASKLOOP, 0, arg) \ macro(TASK_executed, 0, arg) \ macro(TASK_cancelled, 0, arg) \ macro(TASK_stolen, 0, arg) // clang-format on /*! * \brief Add new timers under KMP_FOREACH_TIMER() macro in kmp_stats.h * * @param macro a user defined macro that takes three arguments - * macro(TIMER_NAME, flags, arg) * @param arg a user defined argument to send to the user defined macro * * \details A timer collects multiple samples of some count in each thread and * then finally aggregates all of the samples from all of the threads. For most * timers the printing code also provides an aggregation over the thread totals. * These are printed as TOTAL_foo. The count is normally a time (in ticks), * hence the name "timer". (But can be any value, so we use this for "number of * arguments passed to fork" as well). For timers the threads are not * significant, it's the individual observations that count, so the statistics * are at that level. Format is "macro(name, flags, arg)" * * @ingroup STATS_GATHERING2 */ // clang-format off #define KMP_FOREACH_TIMER(macro, arg) \ macro (OMP_worker_thread_life, stats_flags_e::logEvent, arg) \ macro (OMP_parallel, stats_flags_e::logEvent, arg) \ macro (OMP_parallel_overhead, stats_flags_e::logEvent, arg) \ macro (OMP_teams, stats_flags_e::logEvent, arg) \ macro (OMP_teams_overhead, stats_flags_e::logEvent, arg) \ macro (OMP_loop_static, 0, arg) \ macro (OMP_loop_static_scheduling, 0, arg) \ macro (OMP_loop_dynamic, 0, arg) \ macro (OMP_loop_dynamic_scheduling, 0, arg) \ macro (OMP_distribute, 0, arg) \ macro (OMP_distribute_scheduling, 0, arg) \ macro (OMP_critical, 0, arg) \ macro (OMP_critical_wait, 0, arg) \ macro (OMP_single, 0, arg) \ macro (OMP_master, 0, arg) \ macro (OMP_task_immediate, 0, arg) \ macro (OMP_task_taskwait, 0, arg) \ macro (OMP_task_taskyield, 0, arg) \ macro (OMP_task_taskgroup, 0, arg) \ macro (OMP_task_join_bar, 0, arg) \ macro (OMP_task_plain_bar, 0, arg) \ macro (OMP_taskloop_scheduling, 0, arg) \ macro (OMP_plain_barrier, stats_flags_e::logEvent, arg) \ macro (OMP_idle, stats_flags_e::logEvent, arg) \ macro (OMP_fork_barrier, stats_flags_e::logEvent, arg) \ macro (OMP_join_barrier, stats_flags_e::logEvent, arg) \ macro (OMP_serial, stats_flags_e::logEvent, arg) \ macro (OMP_set_numthreads, stats_flags_e::noUnits | stats_flags_e::noTotal, \ arg) \ macro (OMP_PARALLEL_args, stats_flags_e::noUnits | stats_flags_e::noTotal, \ arg) \ macro (OMP_loop_static_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (OMP_loop_static_total_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (OMP_loop_dynamic_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (OMP_loop_dynamic_total_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (OMP_distribute_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ KMP_FOREACH_DEVELOPER_TIMER(macro, arg) // clang-format on // OMP_worker_thread_life -- Time from thread becoming an OpenMP thread (either // initializing OpenMP or being created by a master) // until the thread is destroyed // OMP_parallel -- Time thread spends executing work directly // within a #pragma omp parallel // OMP_parallel_overhead -- Time thread spends setting up a parallel region // OMP_loop_static -- Time thread spends executing loop iterations from // a statically scheduled loop // OMP_loop_static_scheduling -- Time thread spends scheduling loop iterations // from a statically scheduled loop // OMP_loop_dynamic -- Time thread spends executing loop iterations from // a dynamically scheduled loop // OMP_loop_dynamic_scheduling -- Time thread spends scheduling loop iterations // from a dynamically scheduled loop // OMP_critical -- Time thread spends executing critical section // OMP_critical_wait -- Time thread spends waiting to enter // a critical section // OMP_single -- Time spent executing a "single" region // OMP_master -- Time spent executing a "master" region // OMP_task_immediate -- Time spent executing non-deferred tasks // OMP_task_taskwait -- Time spent executing tasks inside a taskwait // construct // OMP_task_taskyield -- Time spent executing tasks inside a taskyield // construct // OMP_task_taskgroup -- Time spent executing tasks inside a taskygroup // construct // OMP_task_join_bar -- Time spent executing tasks inside a join barrier // OMP_task_plain_bar -- Time spent executing tasks inside a barrier // construct // OMP_taskloop_scheduling -- Time spent scheduling tasks inside a taskloop // construct // OMP_plain_barrier -- Time spent in a #pragma omp barrier construct or // inside implicit barrier at end of worksharing // construct // OMP_idle -- Time worker threads spend waiting for next // parallel region // OMP_fork_barrier -- Time spent in a the fork barrier surrounding a // parallel region // OMP_join_barrier -- Time spent in a the join barrier surrounding a // parallel region // OMP_serial -- Time thread zero spends executing serial code // OMP_set_numthreads -- Values passed to omp_set_num_threads // OMP_PARALLEL_args -- Number of arguments passed to a parallel region // OMP_loop_static_iterations -- Number of iterations thread is assigned for // statically scheduled loops // OMP_loop_dynamic_iterations -- Number of iterations thread is assigned for // dynamically scheduled loops #if (KMP_DEVELOPER_STATS) // Timers which are of interest to runtime library developers, not end users. // These have to be explicitly enabled in addition to the other stats. // KMP_fork_barrier -- time in __kmp_fork_barrier // KMP_join_barrier -- time in __kmp_join_barrier // KMP_barrier -- time in __kmp_barrier // KMP_end_split_barrier -- time in __kmp_end_split_barrier // KMP_setup_icv_copy -- time in __kmp_setup_icv_copy // KMP_icv_copy -- start/stop timer for any ICV copying // KMP_linear_gather -- time in __kmp_linear_barrier_gather // KMP_linear_release -- time in __kmp_linear_barrier_release // KMP_tree_gather -- time in __kmp_tree_barrier_gather // KMP_tree_release -- time in __kmp_tree_barrier_release // KMP_hyper_gather -- time in __kmp_hyper_barrier_gather // KMP_hyper_release -- time in __kmp_hyper_barrier_release // clang-format off #define KMP_FOREACH_DEVELOPER_TIMER(macro, arg) \ macro(KMP_fork_call, 0, arg) \ macro(KMP_join_call, 0, arg) \ macro(KMP_end_split_barrier, 0, arg) \ macro(KMP_hier_gather, 0, arg) \ macro(KMP_hier_release, 0, arg) \ macro(KMP_hyper_gather, 0, arg) \ macro(KMP_hyper_release, 0, arg) \ macro(KMP_linear_gather, 0, arg) \ macro(KMP_linear_release, 0, arg) \ macro(KMP_tree_gather, 0, arg) \ macro(KMP_tree_release, 0, arg) \ macro(USER_resume, 0, arg) \ macro(USER_suspend, 0, arg) \ macro(KMP_allocate_team, 0, arg) \ macro(KMP_setup_icv_copy, 0, arg) \ macro(USER_icv_copy, 0, arg) \ macro (FOR_static_steal_stolen, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (FOR_static_steal_chunks, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) #else #define KMP_FOREACH_DEVELOPER_TIMER(macro, arg) #endif // clang-format on /*! * \brief Add new explicit timers under KMP_FOREACH_EXPLICIT_TIMER() macro. * * @param macro a user defined macro that takes three arguments - * macro(TIMER_NAME, flags, arg) * @param arg a user defined argument to send to the user defined macro * * \warning YOU MUST HAVE THE SAME NAMED TIMER UNDER KMP_FOREACH_TIMER() OR ELSE * BAD THINGS WILL HAPPEN! * * \details Explicit timers are ones where we need to allocate a timer itself * (as well as the accumulated timing statistics). We allocate these on a * per-thread basis, and explicitly start and stop them. Block timers just * allocate the timer itself on the stack, and use the destructor to notice * block exit; they don't need to be defined here. The name here should be the * same as that of a timer above. * * @ingroup STATS_GATHERING */ #define KMP_FOREACH_EXPLICIT_TIMER(macro, arg) KMP_FOREACH_TIMER(macro, arg) #define ENUMERATE(name, ignore, prefix) prefix##name, enum timer_e { KMP_FOREACH_TIMER(ENUMERATE, TIMER_) TIMER_LAST }; enum explicit_timer_e { KMP_FOREACH_EXPLICIT_TIMER(ENUMERATE, EXPLICIT_TIMER_) EXPLICIT_TIMER_LAST }; enum counter_e { KMP_FOREACH_COUNTER(ENUMERATE, COUNTER_) COUNTER_LAST }; #undef ENUMERATE /* * A logarithmic histogram. It accumulates the number of values in each power of * ten bin. So 1<=x<10, 10<=x<100, ... * Mostly useful where we have some big outliers and want to see information * about them. */ class logHistogram { enum { numBins = 31, /* Number of powers of 10. If this changes you need to change * the initializer for binMax */ /* * If you want to use this to analyse values that may be less than 1, (for * instance times in s), then the logOffset gives you negative powers. * In our case here, we're just looking at times in ticks, or counts, so we * can never see values with magnitude < 1 (other than zero), so we can set * it to 0. As above change the initializer if you change this. */ logOffset = 0 }; uint32_t KMP_ALIGN_CACHE zeroCount; struct { uint32_t count; double total; } bins[numBins]; static double binMax[numBins]; #ifdef KMP_DEBUG uint64_t _total; void check() const { uint64_t t = zeroCount; for (int i = 0; i < numBins; i++) t += bins[i].count; KMP_DEBUG_ASSERT(t == _total); } #else void check() const {} #endif public: logHistogram() { reset(); } logHistogram(logHistogram const &o) { for (int i = 0; i < numBins; i++) bins[i] = o.bins[i]; #ifdef KMP_DEBUG _total = o._total; #endif } void reset() { zeroCount = 0; for (int i = 0; i < numBins; i++) { bins[i].count = 0; bins[i].total = 0; } #ifdef KMP_DEBUG _total = 0; #endif } uint32_t count(int b) const { return bins[b + logOffset].count; } double total(int b) const { return bins[b + logOffset].total; } static uint32_t findBin(double sample); logHistogram &operator+=(logHistogram const &o) { zeroCount += o.zeroCount; for (int i = 0; i < numBins; i++) { bins[i].count += o.bins[i].count; bins[i].total += o.bins[i].total; } #ifdef KMP_DEBUG _total += o._total; check(); #endif return *this; } void addSample(double sample); int minBin() const; int maxBin() const; std::string format(char) const; }; class statistic { double KMP_ALIGN_CACHE minVal; double maxVal; double meanVal; double m2; uint64_t sampleCount; double offset; bool collectingHist; logHistogram hist; public: statistic(bool doHist = bool(KMP_STATS_HIST)) { reset(); collectingHist = doHist; } statistic(statistic const &o) : minVal(o.minVal), maxVal(o.maxVal), meanVal(o.meanVal), m2(o.m2), sampleCount(o.sampleCount), offset(o.offset), collectingHist(o.collectingHist), hist(o.hist) {} statistic(double minv, double maxv, double meanv, uint64_t sc, double sd) : minVal(minv), maxVal(maxv), meanVal(meanv), m2(sd * sd * sc), sampleCount(sc), offset(0.0), collectingHist(false) {} bool haveHist() const { return collectingHist; } double getMin() const { return minVal; } double getMean() const { return meanVal; } double getMax() const { return maxVal; } uint64_t getCount() const { return sampleCount; } double getSD() const { return sqrt(m2 / sampleCount); } double getTotal() const { return sampleCount * meanVal; } logHistogram const *getHist() const { return &hist; } void setOffset(double d) { offset = d; } void reset() { minVal = std::numeric_limits<double>::max(); maxVal = -minVal; meanVal = 0.0; m2 = 0.0; sampleCount = 0; offset = 0.0; hist.reset(); } void addSample(double sample); void scale(double factor); void scaleDown(double f) { scale(1. / f); } void forceCount(uint64_t count) { sampleCount = count; } statistic &operator+=(statistic const &other); std::string format(char unit, bool total = false) const; std::string formatHist(char unit) const { return hist.format(unit); } }; struct statInfo { const char *name; uint32_t flags; }; class timeStat : public statistic { static statInfo timerInfo[]; public: timeStat() : statistic() {} static const char *name(timer_e e) { return timerInfo[e].name; } static bool noTotal(timer_e e) { return timerInfo[e].flags & stats_flags_e::noTotal; } static bool masterOnly(timer_e e) { return timerInfo[e].flags & stats_flags_e::onlyInMaster; } static bool workerOnly(timer_e e) { return timerInfo[e].flags & stats_flags_e::notInMaster; } static bool noUnits(timer_e e) { return timerInfo[e].flags & stats_flags_e::noUnits; } static bool logEvent(timer_e e) { return timerInfo[e].flags & stats_flags_e::logEvent; } static void clearEventFlags() { for (int i = 0; i < TIMER_LAST; i++) { timerInfo[i].flags &= (~(stats_flags_e::logEvent)); } } }; // Where we need explicitly to start and end the timer, this version can be used // Since these timers normally aren't nicely scoped, so don't have a good place // to live on the stack of the thread, they're more work to use. class explicitTimer { timeStat *stat; timer_e timerEnumValue; tsc_tick_count startTime; tsc_tick_count pauseStartTime; tsc_tick_count::tsc_interval_t totalPauseTime; public: explicitTimer(timeStat *s, timer_e te) : stat(s), timerEnumValue(te), startTime(), pauseStartTime(0), totalPauseTime() {} // void setStat(timeStat *s) { stat = s; } void start(tsc_tick_count tick); void pause(tsc_tick_count tick) { pauseStartTime = tick; } void resume(tsc_tick_count tick) { totalPauseTime += (tick - pauseStartTime); } void stop(tsc_tick_count tick, kmp_stats_list *stats_ptr = nullptr); void reset() { startTime = 0; pauseStartTime = 0; totalPauseTime = 0; } timer_e get_type() const { return timerEnumValue; } }; // Where you need to partition a threads clock ticks into separate states // e.g., a partitionedTimers class with two timers of EXECUTING_TASK, and // DOING_NOTHING would render these conditions: // time(EXECUTING_TASK) + time(DOING_NOTHING) = total time thread is alive // No clock tick in the EXECUTING_TASK is a member of DOING_NOTHING and vice // versa class partitionedTimers { private: std::vector<explicitTimer> timer_stack; public: partitionedTimers(); void init(explicitTimer timer); void exchange(explicitTimer timer); void push(explicitTimer timer); void pop(); void windup(); }; // Special wrapper around the partitioned timers to aid timing code blocks // It avoids the need to have an explicit end, leaving the scope suffices. class blockPartitionedTimer { partitionedTimers *part_timers; public: blockPartitionedTimer(partitionedTimers *pt, explicitTimer timer) : part_timers(pt) { part_timers->push(timer); } ~blockPartitionedTimer() { part_timers->pop(); } }; // Special wrapper around the thread state to aid in keeping state in code // blocks It avoids the need to have an explicit end, leaving the scope // suffices. class blockThreadState { stats_state_e *state_pointer; stats_state_e old_state; public: blockThreadState(stats_state_e *thread_state_pointer, stats_state_e new_state) : state_pointer(thread_state_pointer), old_state(*thread_state_pointer) { *state_pointer = new_state; } ~blockThreadState() { *state_pointer = old_state; } }; // If all you want is a count, then you can use this... // The individual per-thread counts will be aggregated into a statistic at // program exit. class counter { uint64_t value; static const statInfo counterInfo[]; public: counter() : value(0) {} void increment() { value++; } uint64_t getValue() const { return value; } void reset() { value = 0; } static const char *name(counter_e e) { return counterInfo[e].name; } static bool masterOnly(counter_e e) { return counterInfo[e].flags & stats_flags_e::onlyInMaster; } }; /* **************************************************************** Class to implement an event There are four components to an event: start time, stop time nest_level, and timer_name. The start and stop time should be obvious (recorded in clock ticks). The nest_level relates to the bar width in the timeline graph. The timer_name is used to determine which timer event triggered this event. the interface to this class is through four read-only operations: 1) getStart() -- returns the start time as 64 bit integer 2) getStop() -- returns the stop time as 64 bit integer 3) getNestLevel() -- returns the nest level of the event 4) getTimerName() -- returns the timer name that triggered event *MORE ON NEST_LEVEL* The nest level is used in the bar graph that represents the timeline. Its main purpose is for showing how events are nested inside eachother. For example, say events, A, B, and C are recorded. If the timeline looks like this: Begin -------------------------------------------------------------> Time | | | | | | A B C C B A start start start end end end Then A, B, C will have a nest level of 1, 2, 3 respectively. These values are then used to calculate the barwidth so you can see that inside A, B has occurred, and inside B, C has occurred. Currently, this is shown with A's bar width being larger than B's bar width, and B's bar width being larger than C's bar width. **************************************************************** */ class kmp_stats_event { uint64_t start; uint64_t stop; int nest_level; timer_e timer_name; public: kmp_stats_event() : start(0), stop(0), nest_level(0), timer_name(TIMER_LAST) {} kmp_stats_event(uint64_t strt, uint64_t stp, int nst, timer_e nme) : start(strt), stop(stp), nest_level(nst), timer_name(nme) {} inline uint64_t getStart() const { return start; } inline uint64_t getStop() const { return stop; } inline int getNestLevel() const { return nest_level; } inline timer_e getTimerName() const { return timer_name; } }; /* **************************************************************** Class to implement a dynamically expandable array of events --------------------------------------------------------- | event 1 | event 2 | event 3 | event 4 | ... | event N | --------------------------------------------------------- An event is pushed onto the back of this array at every explicitTimer->stop() call. The event records the thread #, start time, stop time, and nest level related to the bar width. The event vector starts at size INIT_SIZE and grows (doubles in size) if needed. An implication of this behavior is that log(N) reallocations are needed (where N is number of events). If you want to avoid reallocations, then set INIT_SIZE to a large value. the interface to this class is through six operations: 1) reset() -- sets the internal_size back to 0 but does not deallocate any memory 2) size() -- returns the number of valid elements in the vector 3) push_back(start, stop, nest, timer_name) -- pushes an event onto the back of the array 4) deallocate() -- frees all memory associated with the vector 5) sort() -- sorts the vector by start time 6) operator[index] or at(index) -- returns event reference at that index **************************************************************** */ class kmp_stats_event_vector { kmp_stats_event *events; int internal_size; int allocated_size; static const int INIT_SIZE = 1024; public: kmp_stats_event_vector() { events = (kmp_stats_event *)__kmp_allocate(sizeof(kmp_stats_event) * INIT_SIZE); internal_size = 0; allocated_size = INIT_SIZE; } ~kmp_stats_event_vector() {} inline void reset() { internal_size = 0; } inline int size() const { return internal_size; } void push_back(uint64_t start_time, uint64_t stop_time, int nest_level, timer_e name) { int i; if (internal_size == allocated_size) { kmp_stats_event *tmp = (kmp_stats_event *)__kmp_allocate( sizeof(kmp_stats_event) * allocated_size * 2); for (i = 0; i < internal_size; i++) tmp[i] = events[i]; __kmp_free(events); events = tmp; allocated_size *= 2; } events[internal_size] = kmp_stats_event(start_time, stop_time, nest_level, name); internal_size++; return; } void deallocate(); void sort(); const kmp_stats_event &operator[](int index) const { return events[index]; } kmp_stats_event &operator[](int index) { return events[index]; } const kmp_stats_event &at(int index) const { return events[index]; } kmp_stats_event &at(int index) { return events[index]; } }; /* **************************************************************** Class to implement a doubly-linked, circular, statistics list |---| ---> |---| ---> |---| ---> |---| ---> ... next | | | | | | | | |---| <--- |---| <--- |---| <--- |---| <--- ... prev Sentinel first second third Node node node node The Sentinel Node is the user handle on the list. The first node corresponds to thread 0's statistics. The second node corresponds to thread 1's statistics and so on... Each node has a _timers, _counters, and _explicitTimers array to hold that thread's statistics. The _explicitTimers point to the correct _timer and update its statistics at every stop() call. The explicitTimers' pointers are set up in the constructor. Each node also has an event vector to hold that thread's timing events. The event vector expands as necessary and records the start-stop times for each timer. The nestLevel variable is for plotting events and is related to the bar width in the timeline graph. Every thread will have a thread local pointer to its node in the list. The sentinel node is used by the master thread to store "dummy" statistics before __kmp_create_worker() is called. **************************************************************** */ class kmp_stats_list { int gtid; timeStat _timers[TIMER_LAST + 1]; counter _counters[COUNTER_LAST + 1]; explicitTimer thread_life_timer; partitionedTimers _partitionedTimers; int _nestLevel; // one per thread kmp_stats_event_vector _event_vector; kmp_stats_list *next; kmp_stats_list *prev; stats_state_e state; int thread_is_idle_flag; public: kmp_stats_list() : thread_life_timer(&_timers[TIMER_OMP_worker_thread_life], TIMER_OMP_worker_thread_life), _nestLevel(0), _event_vector(), next(this), prev(this), state(IDLE), thread_is_idle_flag(0) {} ~kmp_stats_list() {} inline timeStat *getTimer(timer_e idx) { return &_timers[idx]; } inline counter *getCounter(counter_e idx) { return &_counters[idx]; } inline partitionedTimers *getPartitionedTimers() { return &_partitionedTimers; } inline timeStat *getTimers() { return _timers; } inline counter *getCounters() { return _counters; } inline kmp_stats_event_vector &getEventVector() { return _event_vector; } inline void startLife() { thread_life_timer.start(tsc_tick_count::now()); } inline void endLife() { thread_life_timer.stop(tsc_tick_count::now(), this); } inline void resetEventVector() { _event_vector.reset(); } inline void incrementNestValue() { _nestLevel++; } inline int getNestValue() { return _nestLevel; } inline void decrementNestValue() { _nestLevel--; } inline int getGtid() const { return gtid; } inline void setGtid(int newgtid) { gtid = newgtid; } inline void setState(stats_state_e newstate) { state = newstate; } inline stats_state_e getState() const { return state; } inline stats_state_e *getStatePointer() { return &state; } inline bool isIdle() { return thread_is_idle_flag == 1; } inline void setIdleFlag() { thread_is_idle_flag = 1; } inline void resetIdleFlag() { thread_is_idle_flag = 0; } kmp_stats_list *push_back(int gtid); // returns newly created list node inline void push_event(uint64_t start_time, uint64_t stop_time, int nest_level, timer_e name) { _event_vector.push_back(start_time, stop_time, nest_level, name); } void deallocate(); class iterator; kmp_stats_list::iterator begin(); kmp_stats_list::iterator end(); int size(); class iterator { kmp_stats_list *ptr; friend kmp_stats_list::iterator kmp_stats_list::begin(); friend kmp_stats_list::iterator kmp_stats_list::end(); public: iterator(); ~iterator(); iterator operator++(); iterator operator++(int dummy); iterator operator--(); iterator operator--(int dummy); bool operator!=(const iterator &rhs); bool operator==(const iterator &rhs); kmp_stats_list *operator*() const; // dereference operator }; }; /* **************************************************************** Class to encapsulate all output functions and the environment variables This module holds filenames for various outputs (normal stats, events, plot file), as well as coloring information for the plot file. The filenames and flags variables are read from environment variables. These are read once by the constructor of the global variable __kmp_stats_output which calls init(). During this init() call, event flags for the timeStat::timerInfo[] global array are cleared if KMP_STATS_EVENTS is not true (on, 1, yes). The only interface function that is public is outputStats(heading). This function should print out everything it needs to, either to files or stderr, depending on the environment variables described below ENVIRONMENT VARIABLES: KMP_STATS_FILE -- if set, all statistics (not events) will be printed to this file, otherwise, print to stderr KMP_STATS_THREADS -- if set to "on", then will print per thread statistics to either KMP_STATS_FILE or stderr KMP_STATS_PLOT_FILE -- if set, print the ploticus plot file to this filename, otherwise, the plot file is sent to "events.plt" KMP_STATS_EVENTS -- if set to "on", then log events, otherwise, don't log events KMP_STATS_EVENTS_FILE -- if set, all events are outputted to this file, otherwise, output is sent to "events.dat" **************************************************************** */ class kmp_stats_output_module { public: struct rgb_color { float r; float g; float b; }; private: std::string outputFileName; static const char *eventsFileName; static const char *plotFileName; static int printPerThreadFlag; static int printPerThreadEventsFlag; static const rgb_color globalColorArray[]; static rgb_color timerColorInfo[]; void init(); static void setupEventColors(); static void printPloticusFile(); static void printHeaderInfo(FILE *statsOut); static void printTimerStats(FILE *statsOut, statistic const *theStats, statistic const *totalStats); static void printCounterStats(FILE *statsOut, statistic const *theStats); static void printCounters(FILE *statsOut, counter const *theCounters); static void printEvents(FILE *eventsOut, kmp_stats_event_vector *theEvents, int gtid); static rgb_color getEventColor(timer_e e) { return timerColorInfo[e]; } static void windupExplicitTimers(); bool eventPrintingEnabled() const { return printPerThreadEventsFlag; } public: kmp_stats_output_module() { init(); } void outputStats(const char *heading); }; #ifdef __cplusplus extern "C" { #endif void __kmp_stats_init(); void __kmp_stats_fini(); void __kmp_reset_stats(); void __kmp_output_stats(const char *); void __kmp_accumulate_stats_at_exit(void); // thread local pointer to stats node within list extern KMP_THREAD_LOCAL kmp_stats_list *__kmp_stats_thread_ptr; // head to stats list. extern kmp_stats_list *__kmp_stats_list; // lock for __kmp_stats_list extern kmp_tas_lock_t __kmp_stats_lock; // reference start time extern tsc_tick_count __kmp_stats_start_time; // interface to output extern kmp_stats_output_module __kmp_stats_output; #ifdef __cplusplus } #endif // Simple, standard interfaces that drop out completely if stats aren't enabled /*! * \brief Adds value to specified timer (name). * * @param name timer name as specified under the KMP_FOREACH_TIMER() macro * @param value double precision sample value to add to statistics for the timer * * \details Use KMP_COUNT_VALUE(name, value) macro to add a particular value to * a timer statistics. * * @ingroup STATS_GATHERING */ #define KMP_COUNT_VALUE(name, value) \ __kmp_stats_thread_ptr->getTimer(TIMER_##name)->addSample(value) /*! * \brief Increments specified counter (name). * * @param name counter name as specified under the KMP_FOREACH_COUNTER() macro * * \details Use KMP_COUNT_BLOCK(name, value) macro to increment a statistics * counter for the executing thread. * * @ingroup STATS_GATHERING */ #define KMP_COUNT_BLOCK(name) \ __kmp_stats_thread_ptr->getCounter(COUNTER_##name)->increment() /*! * \brief Outputs the current thread statistics and reset them. * * @param heading_string heading put above the final stats output * * \details Explicitly stops all timers and outputs all stats. Environment * variable, `OMPTB_STATSFILE=filename`, can be used to output the stats to a * filename instead of stderr. Environment variable, * `OMPTB_STATSTHREADS=true|undefined`, can be used to output thread specific * stats. For now the `OMPTB_STATSTHREADS` environment variable can either be * defined with any value, which will print out thread specific stats, or it can * be undefined (not specified in the environment) and thread specific stats * won't be printed. It should be noted that all statistics are reset when this * macro is called. * * @ingroup STATS_GATHERING */ #define KMP_OUTPUT_STATS(heading_string) __kmp_output_stats(heading_string) /*! * \brief Initializes the partitioned timers to begin with name. * * @param name timer which you want this thread to begin with * * @ingroup STATS_GATHERING */ #define KMP_INIT_PARTITIONED_TIMERS(name) \ __kmp_stats_thread_ptr->getPartitionedTimers()->init(explicitTimer( \ __kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name)) #define KMP_TIME_PARTITIONED_BLOCK(name) \ blockPartitionedTimer __PBLOCKTIME__( \ __kmp_stats_thread_ptr->getPartitionedTimers(), \ explicitTimer(__kmp_stats_thread_ptr->getTimer(TIMER_##name), \ TIMER_##name)) #define KMP_PUSH_PARTITIONED_TIMER(name) \ __kmp_stats_thread_ptr->getPartitionedTimers()->push(explicitTimer( \ __kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name)) #define KMP_POP_PARTITIONED_TIMER() \ __kmp_stats_thread_ptr->getPartitionedTimers()->pop() #define KMP_EXCHANGE_PARTITIONED_TIMER(name) \ __kmp_stats_thread_ptr->getPartitionedTimers()->exchange(explicitTimer( \ __kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name)) #define KMP_SET_THREAD_STATE(state_name) \ __kmp_stats_thread_ptr->setState(state_name) #define KMP_GET_THREAD_STATE() __kmp_stats_thread_ptr->getState() #define KMP_SET_THREAD_STATE_BLOCK(state_name) \ blockThreadState __BTHREADSTATE__(__kmp_stats_thread_ptr->getStatePointer(), \ state_name) /*! * \brief resets all stats (counters to 0, timers to 0 elapsed ticks) * * \details Reset all stats for all threads. * * @ingroup STATS_GATHERING */ #define KMP_RESET_STATS() __kmp_reset_stats() #if (KMP_DEVELOPER_STATS) #define KMP_COUNT_DEVELOPER_VALUE(n, v) KMP_COUNT_VALUE(n, v) #define KMP_COUNT_DEVELOPER_BLOCK(n) KMP_COUNT_BLOCK(n) #define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) KMP_TIME_PARTITIONED_BLOCK(n) #define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) KMP_PUSH_PARTITIONED_TIMER(n) #define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) KMP_POP_PARTITIONED_TIMER(n) #define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) \ KMP_EXCHANGE_PARTITIONED_TIMER(n) #else // Null definitions #define KMP_COUNT_DEVELOPER_VALUE(n, v) ((void)0) #define KMP_COUNT_DEVELOPER_BLOCK(n) ((void)0) #define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) ((void)0) #define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #endif #else // KMP_STATS_ENABLED // Null definitions #define KMP_COUNT_VALUE(n, v) ((void)0) #define KMP_COUNT_BLOCK(n) ((void)0) #define KMP_OUTPUT_STATS(heading_string) ((void)0) #define KMP_RESET_STATS() ((void)0) #define KMP_COUNT_DEVELOPER_VALUE(n, v) ((void)0) #define KMP_COUNT_DEVELOPER_BLOCK(n) ((void)0) #define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) ((void)0) #define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_INIT_PARTITIONED_TIMERS(name) ((void)0) #define KMP_TIME_PARTITIONED_BLOCK(name) ((void)0) #define KMP_PUSH_PARTITIONED_TIMER(name) ((void)0) #define KMP_POP_PARTITIONED_TIMER() ((void)0) #define KMP_SET_THREAD_STATE(state_name) ((void)0) #define KMP_GET_THREAD_STATE() ((void)0) #define KMP_SET_THREAD_STATE_BLOCK(state_name) ((void)0) #endif // KMP_STATS_ENABLED #endif // KMP_STATS_H
#ifndef KMP_STATS_H #define KMP_STATS_H /** @file kmp_stats.h * Functions for collecting statistics. */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "kmp_config.h" #include "kmp_debug.h" #if KMP_STATS_ENABLED /* Statistics accumulator. Accumulates number of samples and computes min, max, mean, standard deviation on the fly. Online variance calculation algorithm from http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#On-line_algorithm */ #include "kmp_stats_timing.h" #include <limits> #include <math.h> #include <new> // placement new #include <stdint.h> #include <string> #include <vector> /* Enable developer statistics here if you want them. They are more detailed than is useful for application characterisation and are intended for the runtime library developer. */ #define KMP_DEVELOPER_STATS 0 /* Enable/Disable histogram output */ #define KMP_STATS_HIST 0 /*! * @ingroup STATS_GATHERING * \brief flags to describe the statistic (timer or counter) * */ enum stats_flags_e { noTotal = 1 << 0, //!< do not show a TOTAL_aggregation for this statistic onlyInMaster = 1 << 1, //!< statistic is valid only for master noUnits = 1 << 2, //!< statistic doesn't need units printed next to it notInMaster = 1 << 3, //!< statistic is valid only for non-master threads logEvent = 1 << 4 //!< statistic can be logged on the event timeline when //! KMP_STATS_EVENTS is on (valid only for timers) }; /*! * @ingroup STATS_GATHERING * \brief the states which a thread can be in * */ enum stats_state_e { IDLE, SERIAL_REGION, FORK_JOIN_BARRIER, PLAIN_BARRIER, TASKWAIT, TASKYIELD, TASKGROUP, IMPLICIT_TASK, EXPLICIT_TASK, TEAMS_REGION }; /*! * \brief Add new counters under KMP_FOREACH_COUNTER() macro in kmp_stats.h * * @param macro a user defined macro that takes three arguments - * macro(COUNTER_NAME, flags, arg) * @param arg a user defined argument to send to the user defined macro * * \details A counter counts the occurrence of some event. Each thread * accumulates its own count, at the end of execution the counts are aggregated * treating each thread as a separate measurement. (Unless onlyInMaster is set, * in which case there's only a single measurement). The min,mean,max are * therefore the values for the threads. Adding the counter here and then * putting a KMP_BLOCK_COUNTER(name) at the point you want to count is all you * need to do. All of the tables and printing is generated from this macro. * Format is "macro(name, flags, arg)" * * @ingroup STATS_GATHERING */ // clang-format off #define KMP_FOREACH_COUNTER(macro, arg) \ macro(OMP_PARALLEL,stats_flags_e::onlyInMaster|stats_flags_e::noTotal,arg) \ macro(OMP_NESTED_PARALLEL, 0, arg) \ macro(OMP_LOOP_STATIC, 0, arg) \ macro(OMP_LOOP_STATIC_STEAL, 0, arg) \ macro(OMP_LOOP_DYNAMIC, 0, arg) \ macro(OMP_DISTRIBUTE, 0, arg) \ macro(OMP_BARRIER, 0, arg) \ macro(OMP_CRITICAL, 0, arg) \ macro(OMP_SINGLE, 0, arg) \ macro(OMP_MASTER, 0, arg) \ macro(OMP_TEAMS, 0, arg) \ macro(OMP_set_lock, 0, arg) \ macro(OMP_test_lock, 0, arg) \ macro(REDUCE_wait, 0, arg) \ macro(REDUCE_nowait, 0, arg) \ macro(OMP_TASKYIELD, 0, arg) \ macro(OMP_TASKLOOP, 0, arg) \ macro(TASK_executed, 0, arg) \ macro(TASK_cancelled, 0, arg) \ macro(TASK_stolen, 0, arg) // clang-format on /*! * \brief Add new timers under KMP_FOREACH_TIMER() macro in kmp_stats.h * * @param macro a user defined macro that takes three arguments - * macro(TIMER_NAME, flags, arg) * @param arg a user defined argument to send to the user defined macro * * \details A timer collects multiple samples of some count in each thread and * then finally aggregates all of the samples from all of the threads. For most * timers the printing code also provides an aggregation over the thread totals. * These are printed as TOTAL_foo. The count is normally a time (in ticks), * hence the name "timer". (But can be any value, so we use this for "number of * arguments passed to fork" as well). For timers the threads are not * significant, it's the individual observations that count, so the statistics * are at that level. Format is "macro(name, flags, arg)" * * @ingroup STATS_GATHERING2 */ // clang-format off #define KMP_FOREACH_TIMER(macro, arg) \ macro (OMP_worker_thread_life, stats_flags_e::logEvent, arg) \ macro (OMP_parallel, stats_flags_e::logEvent, arg) \ macro (OMP_parallel_overhead, stats_flags_e::logEvent, arg) \ macro (OMP_teams, stats_flags_e::logEvent, arg) \ macro (OMP_teams_overhead, stats_flags_e::logEvent, arg) \ macro (OMP_loop_static, 0, arg) \ macro (OMP_loop_static_scheduling, 0, arg) \ macro (OMP_loop_dynamic, 0, arg) \ macro (OMP_loop_dynamic_scheduling, 0, arg) \ macro (OMP_distribute, 0, arg) \ macro (OMP_distribute_scheduling, 0, arg) \ macro (OMP_critical, 0, arg) \ macro (OMP_critical_wait, 0, arg) \ macro (OMP_single, 0, arg) \ macro (OMP_master, 0, arg) \ macro (OMP_task_immediate, 0, arg) \ macro (OMP_task_taskwait, 0, arg) \ macro (OMP_task_taskyield, 0, arg) \ macro (OMP_task_taskgroup, 0, arg) \ macro (OMP_task_join_bar, 0, arg) \ macro (OMP_task_plain_bar, 0, arg) \ macro (OMP_taskloop_scheduling, 0, arg) \ macro (OMP_plain_barrier, stats_flags_e::logEvent, arg) \ macro (OMP_idle, stats_flags_e::logEvent, arg) \ macro (OMP_fork_barrier, stats_flags_e::logEvent, arg) \ macro (OMP_join_barrier, stats_flags_e::logEvent, arg) \ macro (OMP_serial, stats_flags_e::logEvent, arg) \ macro (OMP_set_numthreads, stats_flags_e::noUnits | stats_flags_e::noTotal, \ arg) \ macro (OMP_PARALLEL_args, stats_flags_e::noUnits | stats_flags_e::noTotal, \ arg) \ macro (OMP_loop_static_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (OMP_loop_static_total_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (OMP_loop_dynamic_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (OMP_loop_dynamic_total_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (OMP_distribute_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ KMP_FOREACH_DEVELOPER_TIMER(macro, arg) // clang-format on // OMP_worker_thread_life -- Time from thread becoming an OpenMP thread (either // initializing OpenMP or being created by a master) // until the thread is destroyed // OMP_parallel -- Time thread spends executing work directly // within a // OMP_parallel_overhead -- Time thread spends setting up a parallel region // OMP_loop_static -- Time thread spends executing loop iterations from // a statically scheduled loop // OMP_loop_static_scheduling -- Time thread spends scheduling loop iterations // from a statically scheduled loop // OMP_loop_dynamic -- Time thread spends executing loop iterations from // a dynamically scheduled loop // OMP_loop_dynamic_scheduling -- Time thread spends scheduling loop iterations // from a dynamically scheduled loop // OMP_critical -- Time thread spends executing critical section // OMP_critical_wait -- Time thread spends waiting to enter // a critical section // OMP_single -- Time spent executing a "single" region // OMP_master -- Time spent executing a "master" region // OMP_task_immediate -- Time spent executing non-deferred tasks // OMP_task_taskwait -- Time spent executing tasks inside a taskwait // construct // OMP_task_taskyield -- Time spent executing tasks inside a taskyield // construct // OMP_task_taskgroup -- Time spent executing tasks inside a taskygroup // construct // OMP_task_join_bar -- Time spent executing tasks inside a join barrier // OMP_task_plain_bar -- Time spent executing tasks inside a barrier // construct // OMP_taskloop_scheduling -- Time spent scheduling tasks inside a taskloop // construct // OMP_plain_barrier -- Time spent in a // inside implicit barrier at end of worksharing // construct // OMP_idle -- Time worker threads spend waiting for next // parallel region // OMP_fork_barrier -- Time spent in a the fork barrier surrounding a // parallel region // OMP_join_barrier -- Time spent in a the join barrier surrounding a // parallel region // OMP_serial -- Time thread zero spends executing serial code // OMP_set_numthreads -- Values passed to omp_set_num_threads // OMP_PARALLEL_args -- Number of arguments passed to a parallel region // OMP_loop_static_iterations -- Number of iterations thread is assigned for // statically scheduled loops // OMP_loop_dynamic_iterations -- Number of iterations thread is assigned for // dynamically scheduled loops #if (KMP_DEVELOPER_STATS) // Timers which are of interest to runtime library developers, not end users. // These have to be explicitly enabled in addition to the other stats. // KMP_fork_barrier -- time in __kmp_fork_barrier // KMP_join_barrier -- time in __kmp_join_barrier // KMP_barrier -- time in __kmp_barrier // KMP_end_split_barrier -- time in __kmp_end_split_barrier // KMP_setup_icv_copy -- time in __kmp_setup_icv_copy // KMP_icv_copy -- start/stop timer for any ICV copying // KMP_linear_gather -- time in __kmp_linear_barrier_gather // KMP_linear_release -- time in __kmp_linear_barrier_release // KMP_tree_gather -- time in __kmp_tree_barrier_gather // KMP_tree_release -- time in __kmp_tree_barrier_release // KMP_hyper_gather -- time in __kmp_hyper_barrier_gather // KMP_hyper_release -- time in __kmp_hyper_barrier_release // clang-format off #define KMP_FOREACH_DEVELOPER_TIMER(macro, arg) \ macro(KMP_fork_call, 0, arg) \ macro(KMP_join_call, 0, arg) \ macro(KMP_end_split_barrier, 0, arg) \ macro(KMP_hier_gather, 0, arg) \ macro(KMP_hier_release, 0, arg) \ macro(KMP_hyper_gather, 0, arg) \ macro(KMP_hyper_release, 0, arg) \ macro(KMP_linear_gather, 0, arg) \ macro(KMP_linear_release, 0, arg) \ macro(KMP_tree_gather, 0, arg) \ macro(KMP_tree_release, 0, arg) \ macro(USER_resume, 0, arg) \ macro(USER_suspend, 0, arg) \ macro(KMP_allocate_team, 0, arg) \ macro(KMP_setup_icv_copy, 0, arg) \ macro(USER_icv_copy, 0, arg) \ macro (FOR_static_steal_stolen, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (FOR_static_steal_chunks, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) #else #define KMP_FOREACH_DEVELOPER_TIMER(macro, arg) #endif // clang-format on /*! * \brief Add new explicit timers under KMP_FOREACH_EXPLICIT_TIMER() macro. * * @param macro a user defined macro that takes three arguments - * macro(TIMER_NAME, flags, arg) * @param arg a user defined argument to send to the user defined macro * * \warning YOU MUST HAVE THE SAME NAMED TIMER UNDER KMP_FOREACH_TIMER() OR ELSE * BAD THINGS WILL HAPPEN! * * \details Explicit timers are ones where we need to allocate a timer itself * (as well as the accumulated timing statistics). We allocate these on a * per-thread basis, and explicitly start and stop them. Block timers just * allocate the timer itself on the stack, and use the destructor to notice * block exit; they don't need to be defined here. The name here should be the * same as that of a timer above. * * @ingroup STATS_GATHERING */ #define KMP_FOREACH_EXPLICIT_TIMER(macro, arg) KMP_FOREACH_TIMER(macro, arg) #define ENUMERATE(name, ignore, prefix) prefix##name, enum timer_e { KMP_FOREACH_TIMER(ENUMERATE, TIMER_) TIMER_LAST }; enum explicit_timer_e { KMP_FOREACH_EXPLICIT_TIMER(ENUMERATE, EXPLICIT_TIMER_) EXPLICIT_TIMER_LAST }; enum counter_e { KMP_FOREACH_COUNTER(ENUMERATE, COUNTER_) COUNTER_LAST }; #undef ENUMERATE /* * A logarithmic histogram. It accumulates the number of values in each power of * ten bin. So 1<=x<10, 10<=x<100, ... * Mostly useful where we have some big outliers and want to see information * about them. */ class logHistogram { enum { numBins = 31, /* Number of powers of 10. If this changes you need to change * the initializer for binMax */ /* * If you want to use this to analyse values that may be less than 1, (for * instance times in s), then the logOffset gives you negative powers. * In our case here, we're just looking at times in ticks, or counts, so we * can never see values with magnitude < 1 (other than zero), so we can set * it to 0. As above change the initializer if you change this. */ logOffset = 0 }; uint32_t KMP_ALIGN_CACHE zeroCount; struct { uint32_t count; double total; } bins[numBins]; static double binMax[numBins]; #ifdef KMP_DEBUG uint64_t _total; void check() const { uint64_t t = zeroCount; for (int i = 0; i < numBins; i++) t += bins[i].count; KMP_DEBUG_ASSERT(t == _total); } #else void check() const {} #endif public: logHistogram() { reset(); } logHistogram(logHistogram const &o) { for (int i = 0; i < numBins; i++) bins[i] = o.bins[i]; #ifdef KMP_DEBUG _total = o._total; #endif } void reset() { zeroCount = 0; for (int i = 0; i < numBins; i++) { bins[i].count = 0; bins[i].total = 0; } #ifdef KMP_DEBUG _total = 0; #endif } uint32_t count(int b) const { return bins[b + logOffset].count; } double total(int b) const { return bins[b + logOffset].total; } static uint32_t findBin(double sample); logHistogram &operator+=(logHistogram const &o) { zeroCount += o.zeroCount; for (int i = 0; i < numBins; i++) { bins[i].count += o.bins[i].count; bins[i].total += o.bins[i].total; } #ifdef KMP_DEBUG _total += o._total; check(); #endif return *this; } void addSample(double sample); int minBin() const; int maxBin() const; std::string format(char) const; }; class statistic { double KMP_ALIGN_CACHE minVal; double maxVal; double meanVal; double m2; uint64_t sampleCount; double offset; bool collectingHist; logHistogram hist; public: statistic(bool doHist = bool(KMP_STATS_HIST)) { reset(); collectingHist = doHist; } statistic(statistic const &o) : minVal(o.minVal), maxVal(o.maxVal), meanVal(o.meanVal), m2(o.m2), sampleCount(o.sampleCount), offset(o.offset), collectingHist(o.collectingHist), hist(o.hist) {} statistic(double minv, double maxv, double meanv, uint64_t sc, double sd) : minVal(minv), maxVal(maxv), meanVal(meanv), m2(sd * sd * sc), sampleCount(sc), offset(0.0), collectingHist(false) {} bool haveHist() const { return collectingHist; } double getMin() const { return minVal; } double getMean() const { return meanVal; } double getMax() const { return maxVal; } uint64_t getCount() const { return sampleCount; } double getSD() const { return sqrt(m2 / sampleCount); } double getTotal() const { return sampleCount * meanVal; } logHistogram const *getHist() const { return &hist; } void setOffset(double d) { offset = d; } void reset() { minVal = std::numeric_limits<double>::max(); maxVal = -minVal; meanVal = 0.0; m2 = 0.0; sampleCount = 0; offset = 0.0; hist.reset(); } void addSample(double sample); void scale(double factor); void scaleDown(double f) { scale(1. / f); } void forceCount(uint64_t count) { sampleCount = count; } statistic &operator+=(statistic const &other); std::string format(char unit, bool total = false) const; std::string formatHist(char unit) const { return hist.format(unit); } }; struct statInfo { const char *name; uint32_t flags; }; class timeStat : public statistic { static statInfo timerInfo[]; public: timeStat() : statistic() {} static const char *name(timer_e e) { return timerInfo[e].name; } static bool noTotal(timer_e e) { return timerInfo[e].flags & stats_flags_e::noTotal; } static bool masterOnly(timer_e e) { return timerInfo[e].flags & stats_flags_e::onlyInMaster; } static bool workerOnly(timer_e e) { return timerInfo[e].flags & stats_flags_e::notInMaster; } static bool noUnits(timer_e e) { return timerInfo[e].flags & stats_flags_e::noUnits; } static bool logEvent(timer_e e) { return timerInfo[e].flags & stats_flags_e::logEvent; } static void clearEventFlags() { for (int i = 0; i < TIMER_LAST; i++) { timerInfo[i].flags &= (~(stats_flags_e::logEvent)); } } }; // Where we need explicitly to start and end the timer, this version can be used // Since these timers normally aren't nicely scoped, so don't have a good place // to live on the stack of the thread, they're more work to use. class explicitTimer { timeStat *stat; timer_e timerEnumValue; tsc_tick_count startTime; tsc_tick_count pauseStartTime; tsc_tick_count::tsc_interval_t totalPauseTime; public: explicitTimer(timeStat *s, timer_e te) : stat(s), timerEnumValue(te), startTime(), pauseStartTime(0), totalPauseTime() {} // void setStat(timeStat *s) { stat = s; } void start(tsc_tick_count tick); void pause(tsc_tick_count tick) { pauseStartTime = tick; } void resume(tsc_tick_count tick) { totalPauseTime += (tick - pauseStartTime); } void stop(tsc_tick_count tick, kmp_stats_list *stats_ptr = nullptr); void reset() { startTime = 0; pauseStartTime = 0; totalPauseTime = 0; } timer_e get_type() const { return timerEnumValue; } }; // Where you need to partition a threads clock ticks into separate states // e.g., a partitionedTimers class with two timers of EXECUTING_TASK, and // DOING_NOTHING would render these conditions: // time(EXECUTING_TASK) + time(DOING_NOTHING) = total time thread is alive // No clock tick in the EXECUTING_TASK is a member of DOING_NOTHING and vice // versa class partitionedTimers { private: std::vector<explicitTimer> timer_stack; public: partitionedTimers(); void init(explicitTimer timer); void exchange(explicitTimer timer); void push(explicitTimer timer); void pop(); void windup(); }; // Special wrapper around the partitioned timers to aid timing code blocks // It avoids the need to have an explicit end, leaving the scope suffices. class blockPartitionedTimer { partitionedTimers *part_timers; public: blockPartitionedTimer(partitionedTimers *pt, explicitTimer timer) : part_timers(pt) { part_timers->push(timer); } ~blockPartitionedTimer() { part_timers->pop(); } }; // Special wrapper around the thread state to aid in keeping state in code // blocks It avoids the need to have an explicit end, leaving the scope // suffices. class blockThreadState { stats_state_e *state_pointer; stats_state_e old_state; public: blockThreadState(stats_state_e *thread_state_pointer, stats_state_e new_state) : state_pointer(thread_state_pointer), old_state(*thread_state_pointer) { *state_pointer = new_state; } ~blockThreadState() { *state_pointer = old_state; } }; // If all you want is a count, then you can use this... // The individual per-thread counts will be aggregated into a statistic at // program exit. class counter { uint64_t value; static const statInfo counterInfo[]; public: counter() : value(0) {} void increment() { value++; } uint64_t getValue() const { return value; } void reset() { value = 0; } static const char *name(counter_e e) { return counterInfo[e].name; } static bool masterOnly(counter_e e) { return counterInfo[e].flags & stats_flags_e::onlyInMaster; } }; /* **************************************************************** Class to implement an event There are four components to an event: start time, stop time nest_level, and timer_name. The start and stop time should be obvious (recorded in clock ticks). The nest_level relates to the bar width in the timeline graph. The timer_name is used to determine which timer event triggered this event. the interface to this class is through four read-only operations: 1) getStart() -- returns the start time as 64 bit integer 2) getStop() -- returns the stop time as 64 bit integer 3) getNestLevel() -- returns the nest level of the event 4) getTimerName() -- returns the timer name that triggered event *MORE ON NEST_LEVEL* The nest level is used in the bar graph that represents the timeline. Its main purpose is for showing how events are nested inside eachother. For example, say events, A, B, and C are recorded. If the timeline looks like this: Begin -------------------------------------------------------------> Time | | | | | | A B C C B A start start start end end end Then A, B, C will have a nest level of 1, 2, 3 respectively. These values are then used to calculate the barwidth so you can see that inside A, B has occurred, and inside B, C has occurred. Currently, this is shown with A's bar width being larger than B's bar width, and B's bar width being larger than C's bar width. **************************************************************** */ class kmp_stats_event { uint64_t start; uint64_t stop; int nest_level; timer_e timer_name; public: kmp_stats_event() : start(0), stop(0), nest_level(0), timer_name(TIMER_LAST) {} kmp_stats_event(uint64_t strt, uint64_t stp, int nst, timer_e nme) : start(strt), stop(stp), nest_level(nst), timer_name(nme) {} inline uint64_t getStart() const { return start; } inline uint64_t getStop() const { return stop; } inline int getNestLevel() const { return nest_level; } inline timer_e getTimerName() const { return timer_name; } }; /* **************************************************************** Class to implement a dynamically expandable array of events --------------------------------------------------------- | event 1 | event 2 | event 3 | event 4 | ... | event N | --------------------------------------------------------- An event is pushed onto the back of this array at every explicitTimer->stop() call. The event records the thread #, start time, stop time, and nest level related to the bar width. The event vector starts at size INIT_SIZE and grows (doubles in size) if needed. An implication of this behavior is that log(N) reallocations are needed (where N is number of events). If you want to avoid reallocations, then set INIT_SIZE to a large value. the interface to this class is through six operations: 1) reset() -- sets the internal_size back to 0 but does not deallocate any memory 2) size() -- returns the number of valid elements in the vector 3) push_back(start, stop, nest, timer_name) -- pushes an event onto the back of the array 4) deallocate() -- frees all memory associated with the vector 5) sort() -- sorts the vector by start time 6) operator[index] or at(index) -- returns event reference at that index **************************************************************** */ class kmp_stats_event_vector { kmp_stats_event *events; int internal_size; int allocated_size; static const int INIT_SIZE = 1024; public: kmp_stats_event_vector() { events = (kmp_stats_event *)__kmp_allocate(sizeof(kmp_stats_event) * INIT_SIZE); internal_size = 0; allocated_size = INIT_SIZE; } ~kmp_stats_event_vector() {} inline void reset() { internal_size = 0; } inline int size() const { return internal_size; } void push_back(uint64_t start_time, uint64_t stop_time, int nest_level, timer_e name) { int i; if (internal_size == allocated_size) { kmp_stats_event *tmp = (kmp_stats_event *)__kmp_allocate( sizeof(kmp_stats_event) * allocated_size * 2); for (i = 0; i < internal_size; i++) tmp[i] = events[i]; __kmp_free(events); events = tmp; allocated_size *= 2; } events[internal_size] = kmp_stats_event(start_time, stop_time, nest_level, name); internal_size++; return; } void deallocate(); void sort(); const kmp_stats_event &operator[](int index) const { return events[index]; } kmp_stats_event &operator[](int index) { return events[index]; } const kmp_stats_event &at(int index) const { return events[index]; } kmp_stats_event &at(int index) { return events[index]; } }; /* **************************************************************** Class to implement a doubly-linked, circular, statistics list |---| ---> |---| ---> |---| ---> |---| ---> ... next | | | | | | | | |---| <--- |---| <--- |---| <--- |---| <--- ... prev Sentinel first second third Node node node node The Sentinel Node is the user handle on the list. The first node corresponds to thread 0's statistics. The second node corresponds to thread 1's statistics and so on... Each node has a _timers, _counters, and _explicitTimers array to hold that thread's statistics. The _explicitTimers point to the correct _timer and update its statistics at every stop() call. The explicitTimers' pointers are set up in the constructor. Each node also has an event vector to hold that thread's timing events. The event vector expands as necessary and records the start-stop times for each timer. The nestLevel variable is for plotting events and is related to the bar width in the timeline graph. Every thread will have a thread local pointer to its node in the list. The sentinel node is used by the master thread to store "dummy" statistics before __kmp_create_worker() is called. **************************************************************** */ class kmp_stats_list { int gtid; timeStat _timers[TIMER_LAST + 1]; counter _counters[COUNTER_LAST + 1]; explicitTimer thread_life_timer; partitionedTimers _partitionedTimers; int _nestLevel; // one per thread kmp_stats_event_vector _event_vector; kmp_stats_list *next; kmp_stats_list *prev; stats_state_e state; int thread_is_idle_flag; public: kmp_stats_list() : thread_life_timer(&_timers[TIMER_OMP_worker_thread_life], TIMER_OMP_worker_thread_life), _nestLevel(0), _event_vector(), next(this), prev(this), state(IDLE), thread_is_idle_flag(0) {} ~kmp_stats_list() {} inline timeStat *getTimer(timer_e idx) { return &_timers[idx]; } inline counter *getCounter(counter_e idx) { return &_counters[idx]; } inline partitionedTimers *getPartitionedTimers() { return &_partitionedTimers; } inline timeStat *getTimers() { return _timers; } inline counter *getCounters() { return _counters; } inline kmp_stats_event_vector &getEventVector() { return _event_vector; } inline void startLife() { thread_life_timer.start(tsc_tick_count::now()); } inline void endLife() { thread_life_timer.stop(tsc_tick_count::now(), this); } inline void resetEventVector() { _event_vector.reset(); } inline void incrementNestValue() { _nestLevel++; } inline int getNestValue() { return _nestLevel; } inline void decrementNestValue() { _nestLevel--; } inline int getGtid() const { return gtid; } inline void setGtid(int newgtid) { gtid = newgtid; } inline void setState(stats_state_e newstate) { state = newstate; } inline stats_state_e getState() const { return state; } inline stats_state_e *getStatePointer() { return &state; } inline bool isIdle() { return thread_is_idle_flag == 1; } inline void setIdleFlag() { thread_is_idle_flag = 1; } inline void resetIdleFlag() { thread_is_idle_flag = 0; } kmp_stats_list *push_back(int gtid); // returns newly created list node inline void push_event(uint64_t start_time, uint64_t stop_time, int nest_level, timer_e name) { _event_vector.push_back(start_time, stop_time, nest_level, name); } void deallocate(); class iterator; kmp_stats_list::iterator begin(); kmp_stats_list::iterator end(); int size(); class iterator { kmp_stats_list *ptr; friend kmp_stats_list::iterator kmp_stats_list::begin(); friend kmp_stats_list::iterator kmp_stats_list::end(); public: iterator(); ~iterator(); iterator operator++(); iterator operator++(int dummy); iterator operator--(); iterator operator--(int dummy); bool operator!=(const iterator &rhs); bool operator==(const iterator &rhs); kmp_stats_list *operator*() const; // dereference operator }; }; /* **************************************************************** Class to encapsulate all output functions and the environment variables This module holds filenames for various outputs (normal stats, events, plot file), as well as coloring information for the plot file. The filenames and flags variables are read from environment variables. These are read once by the constructor of the global variable __kmp_stats_output which calls init(). During this init() call, event flags for the timeStat::timerInfo[] global array are cleared if KMP_STATS_EVENTS is not true (on, 1, yes). The only interface function that is public is outputStats(heading). This function should print out everything it needs to, either to files or stderr, depending on the environment variables described below ENVIRONMENT VARIABLES: KMP_STATS_FILE -- if set, all statistics (not events) will be printed to this file, otherwise, print to stderr KMP_STATS_THREADS -- if set to "on", then will print per thread statistics to either KMP_STATS_FILE or stderr KMP_STATS_PLOT_FILE -- if set, print the ploticus plot file to this filename, otherwise, the plot file is sent to "events.plt" KMP_STATS_EVENTS -- if set to "on", then log events, otherwise, don't log events KMP_STATS_EVENTS_FILE -- if set, all events are outputted to this file, otherwise, output is sent to "events.dat" **************************************************************** */ class kmp_stats_output_module { public: struct rgb_color { float r; float g; float b; }; private: std::string outputFileName; static const char *eventsFileName; static const char *plotFileName; static int printPerThreadFlag; static int printPerThreadEventsFlag; static const rgb_color globalColorArray[]; static rgb_color timerColorInfo[]; void init(); static void setupEventColors(); static void printPloticusFile(); static void printHeaderInfo(FILE *statsOut); static void printTimerStats(FILE *statsOut, statistic const *theStats, statistic const *totalStats); static void printCounterStats(FILE *statsOut, statistic const *theStats); static void printCounters(FILE *statsOut, counter const *theCounters); static void printEvents(FILE *eventsOut, kmp_stats_event_vector *theEvents, int gtid); static rgb_color getEventColor(timer_e e) { return timerColorInfo[e]; } static void windupExplicitTimers(); bool eventPrintingEnabled() const { return printPerThreadEventsFlag; } public: kmp_stats_output_module() { init(); } void outputStats(const char *heading); }; #ifdef __cplusplus extern "C" { #endif void __kmp_stats_init(); void __kmp_stats_fini(); void __kmp_reset_stats(); void __kmp_output_stats(const char *); void __kmp_accumulate_stats_at_exit(void); // thread local pointer to stats node within list extern KMP_THREAD_LOCAL kmp_stats_list *__kmp_stats_thread_ptr; // head to stats list. extern kmp_stats_list *__kmp_stats_list; // lock for __kmp_stats_list extern kmp_tas_lock_t __kmp_stats_lock; // reference start time extern tsc_tick_count __kmp_stats_start_time; // interface to output extern kmp_stats_output_module __kmp_stats_output; #ifdef __cplusplus } #endif // Simple, standard interfaces that drop out completely if stats aren't enabled /*! * \brief Adds value to specified timer (name). * * @param name timer name as specified under the KMP_FOREACH_TIMER() macro * @param value double precision sample value to add to statistics for the timer * * \details Use KMP_COUNT_VALUE(name, value) macro to add a particular value to * a timer statistics. * * @ingroup STATS_GATHERING */ #define KMP_COUNT_VALUE(name, value) \ __kmp_stats_thread_ptr->getTimer(TIMER_##name)->addSample(value) /*! * \brief Increments specified counter (name). * * @param name counter name as specified under the KMP_FOREACH_COUNTER() macro * * \details Use KMP_COUNT_BLOCK(name, value) macro to increment a statistics * counter for the executing thread. * * @ingroup STATS_GATHERING */ #define KMP_COUNT_BLOCK(name) \ __kmp_stats_thread_ptr->getCounter(COUNTER_##name)->increment() /*! * \brief Outputs the current thread statistics and reset them. * * @param heading_string heading put above the final stats output * * \details Explicitly stops all timers and outputs all stats. Environment * variable, `OMPTB_STATSFILE=filename`, can be used to output the stats to a * filename instead of stderr. Environment variable, * `OMPTB_STATSTHREADS=true|undefined`, can be used to output thread specific * stats. For now the `OMPTB_STATSTHREADS` environment variable can either be * defined with any value, which will print out thread specific stats, or it can * be undefined (not specified in the environment) and thread specific stats * won't be printed. It should be noted that all statistics are reset when this * macro is called. * * @ingroup STATS_GATHERING */ #define KMP_OUTPUT_STATS(heading_string) __kmp_output_stats(heading_string) /*! * \brief Initializes the partitioned timers to begin with name. * * @param name timer which you want this thread to begin with * * @ingroup STATS_GATHERING */ #define KMP_INIT_PARTITIONED_TIMERS(name) \ __kmp_stats_thread_ptr->getPartitionedTimers()->init(explicitTimer( \ __kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name)) #define KMP_TIME_PARTITIONED_BLOCK(name) \ blockPartitionedTimer __PBLOCKTIME__( \ __kmp_stats_thread_ptr->getPartitionedTimers(), \ explicitTimer(__kmp_stats_thread_ptr->getTimer(TIMER_##name), \ TIMER_##name)) #define KMP_PUSH_PARTITIONED_TIMER(name) \ __kmp_stats_thread_ptr->getPartitionedTimers()->push(explicitTimer( \ __kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name)) #define KMP_POP_PARTITIONED_TIMER() \ __kmp_stats_thread_ptr->getPartitionedTimers()->pop() #define KMP_EXCHANGE_PARTITIONED_TIMER(name) \ __kmp_stats_thread_ptr->getPartitionedTimers()->exchange(explicitTimer( \ __kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name)) #define KMP_SET_THREAD_STATE(state_name) \ __kmp_stats_thread_ptr->setState(state_name) #define KMP_GET_THREAD_STATE() __kmp_stats_thread_ptr->getState() #define KMP_SET_THREAD_STATE_BLOCK(state_name) \ blockThreadState __BTHREADSTATE__(__kmp_stats_thread_ptr->getStatePointer(), \ state_name) /*! * \brief resets all stats (counters to 0, timers to 0 elapsed ticks) * * \details Reset all stats for all threads. * * @ingroup STATS_GATHERING */ #define KMP_RESET_STATS() __kmp_reset_stats() #if (KMP_DEVELOPER_STATS) #define KMP_COUNT_DEVELOPER_VALUE(n, v) KMP_COUNT_VALUE(n, v) #define KMP_COUNT_DEVELOPER_BLOCK(n) KMP_COUNT_BLOCK(n) #define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) KMP_TIME_PARTITIONED_BLOCK(n) #define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) KMP_PUSH_PARTITIONED_TIMER(n) #define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) KMP_POP_PARTITIONED_TIMER(n) #define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) \ KMP_EXCHANGE_PARTITIONED_TIMER(n) #else // Null definitions #define KMP_COUNT_DEVELOPER_VALUE(n, v) ((void)0) #define KMP_COUNT_DEVELOPER_BLOCK(n) ((void)0) #define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) ((void)0) #define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #endif #else // KMP_STATS_ENABLED // Null definitions #define KMP_COUNT_VALUE(n, v) ((void)0) #define KMP_COUNT_BLOCK(n) ((void)0) #define KMP_OUTPUT_STATS(heading_string) ((void)0) #define KMP_RESET_STATS() ((void)0) #define KMP_COUNT_DEVELOPER_VALUE(n, v) ((void)0) #define KMP_COUNT_DEVELOPER_BLOCK(n) ((void)0) #define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) ((void)0) #define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_INIT_PARTITIONED_TIMERS(name) ((void)0) #define KMP_TIME_PARTITIONED_BLOCK(name) ((void)0) #define KMP_PUSH_PARTITIONED_TIMER(name) ((void)0) #define KMP_POP_PARTITIONED_TIMER() ((void)0) #define KMP_SET_THREAD_STATE(state_name) ((void)0) #define KMP_GET_THREAD_STATE() ((void)0) #define KMP_SET_THREAD_STATE_BLOCK(state_name) ((void)0) #endif // KMP_STATS_ENABLED #endif // KMP_STATS_H
#ifndef KMP_STATS_H #define KMP_STATS_H /** @file kmp_stats.h * Functions for collecting statistics. */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "kmp_config.h" #include "kmp_debug.h" #if KMP_STATS_ENABLED /* Statistics accumulator. Accumulates number of samples and computes min, max, mean, standard deviation on the fly. Online variance calculation algorithm from http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#On-line_algorithm */ #include "kmp_stats_timing.h" #include <limits> #include <math.h> #include <new> // placement new #include <stdint.h> #include <string> #include <vector> /* Enable developer statistics here if you want them. They are more detailed than is useful for application characterisation and are intended for the runtime library developer. */ #define KMP_DEVELOPER_STATS 0 /* Enable/Disable histogram output */ #define KMP_STATS_HIST 0 /*! * @ingroup STATS_GATHERING * \brief flags to describe the statistic (timer or counter) * */ enum stats_flags_e { noTotal = 1 << 0, //!< do not show a TOTAL_aggregation for this statistic onlyInMaster = 1 << 1, //!< statistic is valid only for master noUnits = 1 << 2, //!< statistic doesn't need units printed next to it notInMaster = 1 << 3, //!< statistic is valid only for non-master threads logEvent = 1 << 4 //!< statistic can be logged on the event timeline when //! KMP_STATS_EVENTS is on (valid only for timers) }; /*! * @ingroup STATS_GATHERING * \brief the states which a thread can be in * */ enum stats_state_e { IDLE, SERIAL_REGION, FORK_JOIN_BARRIER, PLAIN_BARRIER, TASKWAIT, TASKYIELD, TASKGROUP, IMPLICIT_TASK, EXPLICIT_TASK, TEAMS_REGION }; /*! * \brief Add new counters under KMP_FOREACH_COUNTER() macro in kmp_stats.h * * @param macro a user defined macro that takes three arguments - * macro(COUNTER_NAME, flags, arg) * @param arg a user defined argument to send to the user defined macro * * \details A counter counts the occurrence of some event. Each thread * accumulates its own count, at the end of execution the counts are aggregated * treating each thread as a separate measurement. (Unless onlyInMaster is set, * in which case there's only a single measurement). The min,mean,max are * therefore the values for the threads. Adding the counter here and then * putting a KMP_BLOCK_COUNTER(name) at the point you want to count is all you * need to do. All of the tables and printing is generated from this macro. * Format is "macro(name, flags, arg)" * * @ingroup STATS_GATHERING */ // clang-format off #define KMP_FOREACH_COUNTER(macro, arg) \ macro(OMP_PARALLEL,stats_flags_e::onlyInMaster|stats_flags_e::noTotal,arg) \ macro(OMP_NESTED_PARALLEL, 0, arg) \ macro(OMP_LOOP_STATIC, 0, arg) \ macro(OMP_LOOP_STATIC_STEAL, 0, arg) \ macro(OMP_LOOP_DYNAMIC, 0, arg) \ macro(OMP_DISTRIBUTE, 0, arg) \ macro(OMP_BARRIER, 0, arg) \ macro(OMP_CRITICAL, 0, arg) \ macro(OMP_SINGLE, 0, arg) \ macro(OMP_MASTER, 0, arg) \ macro(OMP_TEAMS, 0, arg) \ macro(OMP_set_lock, 0, arg) \ macro(OMP_test_lock, 0, arg) \ macro(REDUCE_wait, 0, arg) \ macro(REDUCE_nowait, 0, arg) \ macro(OMP_TASKYIELD, 0, arg) \ macro(OMP_TASKLOOP, 0, arg) \ macro(TASK_executed, 0, arg) \ macro(TASK_cancelled, 0, arg) \ macro(TASK_stolen, 0, arg) // clang-format on /*! * \brief Add new timers under KMP_FOREACH_TIMER() macro in kmp_stats.h * * @param macro a user defined macro that takes three arguments - * macro(TIMER_NAME, flags, arg) * @param arg a user defined argument to send to the user defined macro * * \details A timer collects multiple samples of some count in each thread and * then finally aggregates all of the samples from all of the threads. For most * timers the printing code also provides an aggregation over the thread totals. * These are printed as TOTAL_foo. The count is normally a time (in ticks), * hence the name "timer". (But can be any value, so we use this for "number of * arguments passed to fork" as well). For timers the threads are not * significant, it's the individual observations that count, so the statistics * are at that level. Format is "macro(name, flags, arg)" * * @ingroup STATS_GATHERING2 */ // clang-format off #define KMP_FOREACH_TIMER(macro, arg) \ macro (OMP_worker_thread_life, stats_flags_e::logEvent, arg) \ macro (OMP_parallel, stats_flags_e::logEvent, arg) \ macro (OMP_parallel_overhead, stats_flags_e::logEvent, arg) \ macro (OMP_teams, stats_flags_e::logEvent, arg) \ macro (OMP_teams_overhead, stats_flags_e::logEvent, arg) \ macro (OMP_loop_static, 0, arg) \ macro (OMP_loop_static_scheduling, 0, arg) \ macro (OMP_loop_dynamic, 0, arg) \ macro (OMP_loop_dynamic_scheduling, 0, arg) \ macro (OMP_distribute, 0, arg) \ macro (OMP_distribute_scheduling, 0, arg) \ macro (OMP_critical, 0, arg) \ macro (OMP_critical_wait, 0, arg) \ macro (OMP_single, 0, arg) \ macro (OMP_master, 0, arg) \ macro (OMP_task_immediate, 0, arg) \ macro (OMP_task_taskwait, 0, arg) \ macro (OMP_task_taskyield, 0, arg) \ macro (OMP_task_taskgroup, 0, arg) \ macro (OMP_task_join_bar, 0, arg) \ macro (OMP_task_plain_bar, 0, arg) \ macro (OMP_taskloop_scheduling, 0, arg) \ macro (OMP_plain_barrier, stats_flags_e::logEvent, arg) \ macro (OMP_idle, stats_flags_e::logEvent, arg) \ macro (OMP_fork_barrier, stats_flags_e::logEvent, arg) \ macro (OMP_join_barrier, stats_flags_e::logEvent, arg) \ macro (OMP_serial, stats_flags_e::logEvent, arg) \ macro (OMP_set_numthreads, stats_flags_e::noUnits | stats_flags_e::noTotal, \ arg) \ macro (OMP_PARALLEL_args, stats_flags_e::noUnits | stats_flags_e::noTotal, \ arg) \ macro (OMP_loop_static_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (OMP_loop_static_total_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (OMP_loop_dynamic_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (OMP_loop_dynamic_total_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (OMP_distribute_iterations, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ KMP_FOREACH_DEVELOPER_TIMER(macro, arg) // clang-format on // OMP_worker_thread_life -- Time from thread becoming an OpenMP thread (either // initializing OpenMP or being created by a master) // until the thread is destroyed // OMP_parallel -- Time thread spends executing work directly // within a #pragma omp parallel // OMP_parallel_overhead -- Time thread spends setting up a parallel region // OMP_loop_static -- Time thread spends executing loop iterations from // a statically scheduled loop // OMP_loop_static_scheduling -- Time thread spends scheduling loop iterations // from a statically scheduled loop // OMP_loop_dynamic -- Time thread spends executing loop iterations from // a dynamically scheduled loop // OMP_loop_dynamic_scheduling -- Time thread spends scheduling loop iterations // from a dynamically scheduled loop // OMP_critical -- Time thread spends executing critical section // OMP_critical_wait -- Time thread spends waiting to enter // a critical section // OMP_single -- Time spent executing a "single" region // OMP_master -- Time spent executing a "master" region // OMP_task_immediate -- Time spent executing non-deferred tasks // OMP_task_taskwait -- Time spent executing tasks inside a taskwait // construct // OMP_task_taskyield -- Time spent executing tasks inside a taskyield // construct // OMP_task_taskgroup -- Time spent executing tasks inside a taskygroup // construct // OMP_task_join_bar -- Time spent executing tasks inside a join barrier // OMP_task_plain_bar -- Time spent executing tasks inside a barrier // construct // OMP_taskloop_scheduling -- Time spent scheduling tasks inside a taskloop // construct // OMP_plain_barrier -- Time spent in a #pragma omp barrier construct or // inside implicit barrier at end of worksharing // construct // OMP_idle -- Time worker threads spend waiting for next // parallel region // OMP_fork_barrier -- Time spent in a the fork barrier surrounding a // parallel region // OMP_join_barrier -- Time spent in a the join barrier surrounding a // parallel region // OMP_serial -- Time thread zero spends executing serial code // OMP_set_numthreads -- Values passed to omp_set_num_threads // OMP_PARALLEL_args -- Number of arguments passed to a parallel region // OMP_loop_static_iterations -- Number of iterations thread is assigned for // statically scheduled loops // OMP_loop_dynamic_iterations -- Number of iterations thread is assigned for // dynamically scheduled loops #if (KMP_DEVELOPER_STATS) // Timers which are of interest to runtime library developers, not end users. // These have to be explicitly enabled in addition to the other stats. // KMP_fork_barrier -- time in __kmp_fork_barrier // KMP_join_barrier -- time in __kmp_join_barrier // KMP_barrier -- time in __kmp_barrier // KMP_end_split_barrier -- time in __kmp_end_split_barrier // KMP_setup_icv_copy -- time in __kmp_setup_icv_copy // KMP_icv_copy -- start/stop timer for any ICV copying // KMP_linear_gather -- time in __kmp_linear_barrier_gather // KMP_linear_release -- time in __kmp_linear_barrier_release // KMP_tree_gather -- time in __kmp_tree_barrier_gather // KMP_tree_release -- time in __kmp_tree_barrier_release // KMP_hyper_gather -- time in __kmp_hyper_barrier_gather // KMP_hyper_release -- time in __kmp_hyper_barrier_release // clang-format off #define KMP_FOREACH_DEVELOPER_TIMER(macro, arg) \ macro(KMP_fork_call, 0, arg) \ macro(KMP_join_call, 0, arg) \ macro(KMP_end_split_barrier, 0, arg) \ macro(KMP_hier_gather, 0, arg) \ macro(KMP_hier_release, 0, arg) \ macro(KMP_hyper_gather, 0, arg) \ macro(KMP_hyper_release, 0, arg) \ macro(KMP_linear_gather, 0, arg) \ macro(KMP_linear_release, 0, arg) \ macro(KMP_tree_gather, 0, arg) \ macro(KMP_tree_release, 0, arg) \ macro(USER_resume, 0, arg) \ macro(USER_suspend, 0, arg) \ macro(KMP_allocate_team, 0, arg) \ macro(KMP_setup_icv_copy, 0, arg) \ macro(USER_icv_copy, 0, arg) \ macro (FOR_static_steal_stolen, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \ macro (FOR_static_steal_chunks, \ stats_flags_e::noUnits | stats_flags_e::noTotal, arg) #else #define KMP_FOREACH_DEVELOPER_TIMER(macro, arg) #endif // clang-format on /*! * \brief Add new explicit timers under KMP_FOREACH_EXPLICIT_TIMER() macro. * * @param macro a user defined macro that takes three arguments - * macro(TIMER_NAME, flags, arg) * @param arg a user defined argument to send to the user defined macro * * \warning YOU MUST HAVE THE SAME NAMED TIMER UNDER KMP_FOREACH_TIMER() OR ELSE * BAD THINGS WILL HAPPEN! * * \details Explicit timers are ones where we need to allocate a timer itself * (as well as the accumulated timing statistics). We allocate these on a * per-thread basis, and explicitly start and stop them. Block timers just * allocate the timer itself on the stack, and use the destructor to notice * block exit; they don't need to be defined here. The name here should be the * same as that of a timer above. * * @ingroup STATS_GATHERING */ #define KMP_FOREACH_EXPLICIT_TIMER(macro, arg) KMP_FOREACH_TIMER(macro, arg) #define ENUMERATE(name, ignore, prefix) prefix##name, enum timer_e { KMP_FOREACH_TIMER(ENUMERATE, TIMER_) TIMER_LAST }; enum explicit_timer_e { KMP_FOREACH_EXPLICIT_TIMER(ENUMERATE, EXPLICIT_TIMER_) EXPLICIT_TIMER_LAST }; enum counter_e { KMP_FOREACH_COUNTER(ENUMERATE, COUNTER_) COUNTER_LAST }; #undef ENUMERATE /* * A logarithmic histogram. It accumulates the number of values in each power of * ten bin. So 1<=x<10, 10<=x<100, ... * Mostly useful where we have some big outliers and want to see information * about them. */ class logHistogram { enum { numBins = 31, /* Number of powers of 10. If this changes you need to change * the initializer for binMax */ /* * If you want to use this to analyse values that may be less than 1, (for * instance times in s), then the logOffset gives you negative powers. * In our case here, we're just looking at times in ticks, or counts, so we * can never see values with magnitude < 1 (other than zero), so we can set * it to 0. As above change the initializer if you change this. */ logOffset = 0 }; uint32_t KMP_ALIGN_CACHE zeroCount; struct { uint32_t count; double total; } bins[numBins]; static double binMax[numBins]; #ifdef KMP_DEBUG uint64_t _total; void check() const { uint64_t t = zeroCount; for (int i = 0; i < numBins; i++) t += bins[i].count; KMP_DEBUG_ASSERT(t == _total); } #else void check() const {} #endif public: logHistogram() { reset(); } logHistogram(logHistogram const &o) { for (int i = 0; i < numBins; i++) bins[i] = o.bins[i]; #ifdef KMP_DEBUG _total = o._total; #endif } void reset() { zeroCount = 0; for (int i = 0; i < numBins; i++) { bins[i].count = 0; bins[i].total = 0; } #ifdef KMP_DEBUG _total = 0; #endif } uint32_t count(int b) const { return bins[b + logOffset].count; } double total(int b) const { return bins[b + logOffset].total; } static uint32_t findBin(double sample); logHistogram &operator+=(logHistogram const &o) { zeroCount += o.zeroCount; for (int i = 0; i < numBins; i++) { bins[i].count += o.bins[i].count; bins[i].total += o.bins[i].total; } #ifdef KMP_DEBUG _total += o._total; check(); #endif return *this; } void addSample(double sample); int minBin() const; int maxBin() const; std::string format(char) const; }; class statistic { double KMP_ALIGN_CACHE minVal; double maxVal; double meanVal; double m2; uint64_t sampleCount; double offset; bool collectingHist; logHistogram hist; public: statistic(bool doHist = bool(KMP_STATS_HIST)) { reset(); collectingHist = doHist; } statistic(statistic const &o) : minVal(o.minVal), maxVal(o.maxVal), meanVal(o.meanVal), m2(o.m2), sampleCount(o.sampleCount), offset(o.offset), collectingHist(o.collectingHist), hist(o.hist) {} statistic(double minv, double maxv, double meanv, uint64_t sc, double sd) : minVal(minv), maxVal(maxv), meanVal(meanv), m2(sd * sd * sc), sampleCount(sc), offset(0.0), collectingHist(false) {} bool haveHist() const { return collectingHist; } double getMin() const { return minVal; } double getMean() const { return meanVal; } double getMax() const { return maxVal; } uint64_t getCount() const { return sampleCount; } double getSD() const { return sqrt(m2 / sampleCount); } double getTotal() const { return sampleCount * meanVal; } logHistogram const *getHist() const { return &hist; } void setOffset(double d) { offset = d; } void reset() { minVal = std::numeric_limits<double>::max(); maxVal = -minVal; meanVal = 0.0; m2 = 0.0; sampleCount = 0; offset = 0.0; hist.reset(); } void addSample(double sample); void scale(double factor); void scaleDown(double f) { scale(1. / f); } void forceCount(uint64_t count) { sampleCount = count; } statistic &operator+=(statistic const &other); std::string format(char unit, bool total = false) const; std::string formatHist(char unit) const { return hist.format(unit); } }; struct statInfo { const char *name; uint32_t flags; }; class timeStat : public statistic { static statInfo timerInfo[]; public: timeStat() : statistic() {} static const char *name(timer_e e) { return timerInfo[e].name; } static bool noTotal(timer_e e) { return timerInfo[e].flags & stats_flags_e::noTotal; } static bool masterOnly(timer_e e) { return timerInfo[e].flags & stats_flags_e::onlyInMaster; } static bool workerOnly(timer_e e) { return timerInfo[e].flags & stats_flags_e::notInMaster; } static bool noUnits(timer_e e) { return timerInfo[e].flags & stats_flags_e::noUnits; } static bool logEvent(timer_e e) { return timerInfo[e].flags & stats_flags_e::logEvent; } static void clearEventFlags() { for (int i = 0; i < TIMER_LAST; i++) { timerInfo[i].flags &= (~(stats_flags_e::logEvent)); } } }; // Where we need explicitly to start and end the timer, this version can be used // Since these timers normally aren't nicely scoped, so don't have a good place // to live on the stack of the thread, they're more work to use. class explicitTimer { timeStat *stat; timer_e timerEnumValue; tsc_tick_count startTime; tsc_tick_count pauseStartTime; tsc_tick_count::tsc_interval_t totalPauseTime; public: explicitTimer(timeStat *s, timer_e te) : stat(s), timerEnumValue(te), startTime(), pauseStartTime(0), totalPauseTime() {} // void setStat(timeStat *s) { stat = s; } void start(tsc_tick_count tick); void pause(tsc_tick_count tick) { pauseStartTime = tick; } void resume(tsc_tick_count tick) { totalPauseTime += (tick - pauseStartTime); } void stop(tsc_tick_count tick, kmp_stats_list *stats_ptr = nullptr); void reset() { startTime = 0; pauseStartTime = 0; totalPauseTime = 0; } timer_e get_type() const { return timerEnumValue; } }; // Where you need to partition a threads clock ticks into separate states // e.g., a partitionedTimers class with two timers of EXECUTING_TASK, and // DOING_NOTHING would render these conditions: // time(EXECUTING_TASK) + time(DOING_NOTHING) = total time thread is alive // No clock tick in the EXECUTING_TASK is a member of DOING_NOTHING and vice // versa class partitionedTimers { private: std::vector<explicitTimer> timer_stack; public: partitionedTimers(); void init(explicitTimer timer); void exchange(explicitTimer timer); void push(explicitTimer timer); void pop(); void windup(); }; // Special wrapper around the partitioned timers to aid timing code blocks // It avoids the need to have an explicit end, leaving the scope suffices. class blockPartitionedTimer { partitionedTimers *part_timers; public: blockPartitionedTimer(partitionedTimers *pt, explicitTimer timer) : part_timers(pt) { part_timers->push(timer); } ~blockPartitionedTimer() { part_timers->pop(); } }; // Special wrapper around the thread state to aid in keeping state in code // blocks It avoids the need to have an explicit end, leaving the scope // suffices. class blockThreadState { stats_state_e *state_pointer; stats_state_e old_state; public: blockThreadState(stats_state_e *thread_state_pointer, stats_state_e new_state) : state_pointer(thread_state_pointer), old_state(*thread_state_pointer) { *state_pointer = new_state; } ~blockThreadState() { *state_pointer = old_state; } }; // If all you want is a count, then you can use this... // The individual per-thread counts will be aggregated into a statistic at // program exit. class counter { uint64_t value; static const statInfo counterInfo[]; public: counter() : value(0) {} void increment() { value++; } uint64_t getValue() const { return value; } void reset() { value = 0; } static const char *name(counter_e e) { return counterInfo[e].name; } static bool masterOnly(counter_e e) { return counterInfo[e].flags & stats_flags_e::onlyInMaster; } }; /* **************************************************************** Class to implement an event There are four components to an event: start time, stop time nest_level, and timer_name. The start and stop time should be obvious (recorded in clock ticks). The nest_level relates to the bar width in the timeline graph. The timer_name is used to determine which timer event triggered this event. the interface to this class is through four read-only operations: 1) getStart() -- returns the start time as 64 bit integer 2) getStop() -- returns the stop time as 64 bit integer 3) getNestLevel() -- returns the nest level of the event 4) getTimerName() -- returns the timer name that triggered event *MORE ON NEST_LEVEL* The nest level is used in the bar graph that represents the timeline. Its main purpose is for showing how events are nested inside eachother. For example, say events, A, B, and C are recorded. If the timeline looks like this: Begin -------------------------------------------------------------> Time | | | | | | A B C C B A start start start end end end Then A, B, C will have a nest level of 1, 2, 3 respectively. These values are then used to calculate the barwidth so you can see that inside A, B has occurred, and inside B, C has occurred. Currently, this is shown with A's bar width being larger than B's bar width, and B's bar width being larger than C's bar width. **************************************************************** */ class kmp_stats_event { uint64_t start; uint64_t stop; int nest_level; timer_e timer_name; public: kmp_stats_event() : start(0), stop(0), nest_level(0), timer_name(TIMER_LAST) {} kmp_stats_event(uint64_t strt, uint64_t stp, int nst, timer_e nme) : start(strt), stop(stp), nest_level(nst), timer_name(nme) {} inline uint64_t getStart() const { return start; } inline uint64_t getStop() const { return stop; } inline int getNestLevel() const { return nest_level; } inline timer_e getTimerName() const { return timer_name; } }; /* **************************************************************** Class to implement a dynamically expandable array of events --------------------------------------------------------- | event 1 | event 2 | event 3 | event 4 | ... | event N | --------------------------------------------------------- An event is pushed onto the back of this array at every explicitTimer->stop() call. The event records the thread #, start time, stop time, and nest level related to the bar width. The event vector starts at size INIT_SIZE and grows (doubles in size) if needed. An implication of this behavior is that log(N) reallocations are needed (where N is number of events). If you want to avoid reallocations, then set INIT_SIZE to a large value. the interface to this class is through six operations: 1) reset() -- sets the internal_size back to 0 but does not deallocate any memory 2) size() -- returns the number of valid elements in the vector 3) push_back(start, stop, nest, timer_name) -- pushes an event onto the back of the array 4) deallocate() -- frees all memory associated with the vector 5) sort() -- sorts the vector by start time 6) operator[index] or at(index) -- returns event reference at that index **************************************************************** */ class kmp_stats_event_vector { kmp_stats_event *events; int internal_size; int allocated_size; static const int INIT_SIZE = 1024; public: kmp_stats_event_vector() { events = (kmp_stats_event *)__kmp_allocate(sizeof(kmp_stats_event) * INIT_SIZE); internal_size = 0; allocated_size = INIT_SIZE; } ~kmp_stats_event_vector() {} inline void reset() { internal_size = 0; } inline int size() const { return internal_size; } void push_back(uint64_t start_time, uint64_t stop_time, int nest_level, timer_e name) { int i; if (internal_size == allocated_size) { kmp_stats_event *tmp = (kmp_stats_event *)__kmp_allocate( sizeof(kmp_stats_event) * allocated_size * 2); for (i = 0; i < internal_size; i++) tmp[i] = events[i]; __kmp_free(events); events = tmp; allocated_size *= 2; } events[internal_size] = kmp_stats_event(start_time, stop_time, nest_level, name); internal_size++; return; } void deallocate(); void sort(); const kmp_stats_event &operator[](int index) const { return events[index]; } kmp_stats_event &operator[](int index) { return events[index]; } const kmp_stats_event &at(int index) const { return events[index]; } kmp_stats_event &at(int index) { return events[index]; } }; /* **************************************************************** Class to implement a doubly-linked, circular, statistics list |---| ---> |---| ---> |---| ---> |---| ---> ... next | | | | | | | | |---| <--- |---| <--- |---| <--- |---| <--- ... prev Sentinel first second third Node node node node The Sentinel Node is the user handle on the list. The first node corresponds to thread 0's statistics. The second node corresponds to thread 1's statistics and so on... Each node has a _timers, _counters, and _explicitTimers array to hold that thread's statistics. The _explicitTimers point to the correct _timer and update its statistics at every stop() call. The explicitTimers' pointers are set up in the constructor. Each node also has an event vector to hold that thread's timing events. The event vector expands as necessary and records the start-stop times for each timer. The nestLevel variable is for plotting events and is related to the bar width in the timeline graph. Every thread will have a thread local pointer to its node in the list. The sentinel node is used by the master thread to store "dummy" statistics before __kmp_create_worker() is called. **************************************************************** */ class kmp_stats_list { int gtid; timeStat _timers[TIMER_LAST + 1]; counter _counters[COUNTER_LAST + 1]; explicitTimer thread_life_timer; partitionedTimers _partitionedTimers; int _nestLevel; // one per thread kmp_stats_event_vector _event_vector; kmp_stats_list *next; kmp_stats_list *prev; stats_state_e state; int thread_is_idle_flag; public: kmp_stats_list() : thread_life_timer(&_timers[TIMER_OMP_worker_thread_life], TIMER_OMP_worker_thread_life), _nestLevel(0), _event_vector(), next(this), prev(this), state(IDLE), thread_is_idle_flag(0) {} ~kmp_stats_list() {} inline timeStat *getTimer(timer_e idx) { return &_timers[idx]; } inline counter *getCounter(counter_e idx) { return &_counters[idx]; } inline partitionedTimers *getPartitionedTimers() { return &_partitionedTimers; } inline timeStat *getTimers() { return _timers; } inline counter *getCounters() { return _counters; } inline kmp_stats_event_vector &getEventVector() { return _event_vector; } inline void startLife() { thread_life_timer.start(tsc_tick_count::now()); } inline void endLife() { thread_life_timer.stop(tsc_tick_count::now(), this); } inline void resetEventVector() { _event_vector.reset(); } inline void incrementNestValue() { _nestLevel++; } inline int getNestValue() { return _nestLevel; } inline void decrementNestValue() { _nestLevel--; } inline int getGtid() const { return gtid; } inline void setGtid(int newgtid) { gtid = newgtid; } inline void setState(stats_state_e newstate) { state = newstate; } inline stats_state_e getState() const { return state; } inline stats_state_e *getStatePointer() { return &state; } inline bool isIdle() { return thread_is_idle_flag == 1; } inline void setIdleFlag() { thread_is_idle_flag = 1; } inline void resetIdleFlag() { thread_is_idle_flag = 0; } kmp_stats_list *push_back(int gtid); // returns newly created list node inline void push_event(uint64_t start_time, uint64_t stop_time, int nest_level, timer_e name) { _event_vector.push_back(start_time, stop_time, nest_level, name); } void deallocate(); class iterator; kmp_stats_list::iterator begin(); kmp_stats_list::iterator end(); int size(); class iterator { kmp_stats_list *ptr; friend kmp_stats_list::iterator kmp_stats_list::begin(); friend kmp_stats_list::iterator kmp_stats_list::end(); public: iterator(); ~iterator(); iterator operator++(); iterator operator++(int dummy); iterator operator--(); iterator operator--(int dummy); bool operator!=(const iterator &rhs); bool operator==(const iterator &rhs); kmp_stats_list *operator*() const; // dereference operator }; }; /* **************************************************************** Class to encapsulate all output functions and the environment variables This module holds filenames for various outputs (normal stats, events, plot file), as well as coloring information for the plot file. The filenames and flags variables are read from environment variables. These are read once by the constructor of the global variable __kmp_stats_output which calls init(). During this init() call, event flags for the timeStat::timerInfo[] global array are cleared if KMP_STATS_EVENTS is not true (on, 1, yes). The only interface function that is public is outputStats(heading). This function should print out everything it needs to, either to files or stderr, depending on the environment variables described below ENVIRONMENT VARIABLES: KMP_STATS_FILE -- if set, all statistics (not events) will be printed to this file, otherwise, print to stderr KMP_STATS_THREADS -- if set to "on", then will print per thread statistics to either KMP_STATS_FILE or stderr KMP_STATS_PLOT_FILE -- if set, print the ploticus plot file to this filename, otherwise, the plot file is sent to "events.plt" KMP_STATS_EVENTS -- if set to "on", then log events, otherwise, don't log events KMP_STATS_EVENTS_FILE -- if set, all events are outputted to this file, otherwise, output is sent to "events.dat" **************************************************************** */ class kmp_stats_output_module { public: struct rgb_color { float r; float g; float b; }; private: std::string outputFileName; static const char *eventsFileName; static const char *plotFileName; static int printPerThreadFlag; static int printPerThreadEventsFlag; static const rgb_color globalColorArray[]; static rgb_color timerColorInfo[]; void init(); static void setupEventColors(); static void printPloticusFile(); static void printHeaderInfo(FILE *statsOut); static void printTimerStats(FILE *statsOut, statistic const *theStats, statistic const *totalStats); static void printCounterStats(FILE *statsOut, statistic const *theStats); static void printCounters(FILE *statsOut, counter const *theCounters); static void printEvents(FILE *eventsOut, kmp_stats_event_vector *theEvents, int gtid); static rgb_color getEventColor(timer_e e) { return timerColorInfo[e]; } static void windupExplicitTimers(); bool eventPrintingEnabled() const { return printPerThreadEventsFlag; } public: kmp_stats_output_module() { init(); } void outputStats(const char *heading); }; #ifdef __cplusplus extern "C" { #endif void __kmp_stats_init(); void __kmp_stats_fini(); void __kmp_reset_stats(); void __kmp_output_stats(const char *); void __kmp_accumulate_stats_at_exit(void); // thread local pointer to stats node within list extern KMP_THREAD_LOCAL kmp_stats_list *__kmp_stats_thread_ptr; // head to stats list. extern kmp_stats_list *__kmp_stats_list; // lock for __kmp_stats_list extern kmp_tas_lock_t __kmp_stats_lock; // reference start time extern tsc_tick_count __kmp_stats_start_time; // interface to output extern kmp_stats_output_module __kmp_stats_output; #ifdef __cplusplus } #endif // Simple, standard interfaces that drop out completely if stats aren't enabled /*! * \brief Adds value to specified timer (name). * * @param name timer name as specified under the KMP_FOREACH_TIMER() macro * @param value double precision sample value to add to statistics for the timer * * \details Use KMP_COUNT_VALUE(name, value) macro to add a particular value to * a timer statistics. * * @ingroup STATS_GATHERING */ #define KMP_COUNT_VALUE(name, value) \ __kmp_stats_thread_ptr->getTimer(TIMER_##name)->addSample(value) /*! * \brief Increments specified counter (name). * * @param name counter name as specified under the KMP_FOREACH_COUNTER() macro * * \details Use KMP_COUNT_BLOCK(name, value) macro to increment a statistics * counter for the executing thread. * * @ingroup STATS_GATHERING */ #define KMP_COUNT_BLOCK(name) \ __kmp_stats_thread_ptr->getCounter(COUNTER_##name)->increment() /*! * \brief Outputs the current thread statistics and reset them. * * @param heading_string heading put above the final stats output * * \details Explicitly stops all timers and outputs all stats. Environment * variable, `OMPTB_STATSFILE=filename`, can be used to output the stats to a * filename instead of stderr. Environment variable, * `OMPTB_STATSTHREADS=true|undefined`, can be used to output thread specific * stats. For now the `OMPTB_STATSTHREADS` environment variable can either be * defined with any value, which will print out thread specific stats, or it can * be undefined (not specified in the environment) and thread specific stats * won't be printed. It should be noted that all statistics are reset when this * macro is called. * * @ingroup STATS_GATHERING */ #define KMP_OUTPUT_STATS(heading_string) __kmp_output_stats(heading_string) /*! * \brief Initializes the partitioned timers to begin with name. * * @param name timer which you want this thread to begin with * * @ingroup STATS_GATHERING */ #define KMP_INIT_PARTITIONED_TIMERS(name) \ __kmp_stats_thread_ptr->getPartitionedTimers()->init(explicitTimer( \ __kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name)) #define KMP_TIME_PARTITIONED_BLOCK(name) \ blockPartitionedTimer __PBLOCKTIME__( \ __kmp_stats_thread_ptr->getPartitionedTimers(), \ explicitTimer(__kmp_stats_thread_ptr->getTimer(TIMER_##name), \ TIMER_##name)) #define KMP_PUSH_PARTITIONED_TIMER(name) \ __kmp_stats_thread_ptr->getPartitionedTimers()->push(explicitTimer( \ __kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name)) #define KMP_POP_PARTITIONED_TIMER() \ __kmp_stats_thread_ptr->getPartitionedTimers()->pop() #define KMP_EXCHANGE_PARTITIONED_TIMER(name) \ __kmp_stats_thread_ptr->getPartitionedTimers()->exchange(explicitTimer( \ __kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name)) #define KMP_SET_THREAD_STATE(state_name) \ __kmp_stats_thread_ptr->setState(state_name) #define KMP_GET_THREAD_STATE() __kmp_stats_thread_ptr->getState() #define KMP_SET_THREAD_STATE_BLOCK(state_name) \ blockThreadState __BTHREADSTATE__(__kmp_stats_thread_ptr->getStatePointer(), \ state_name) /*! * \brief resets all stats (counters to 0, timers to 0 elapsed ticks) * * \details Reset all stats for all threads. * * @ingroup STATS_GATHERING */ #define KMP_RESET_STATS() __kmp_reset_stats() #if (KMP_DEVELOPER_STATS) #define KMP_COUNT_DEVELOPER_VALUE(n, v) KMP_COUNT_VALUE(n, v) #define KMP_COUNT_DEVELOPER_BLOCK(n) KMP_COUNT_BLOCK(n) #define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) KMP_TIME_PARTITIONED_BLOCK(n) #define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) KMP_PUSH_PARTITIONED_TIMER(n) #define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) KMP_POP_PARTITIONED_TIMER(n) #define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) \ KMP_EXCHANGE_PARTITIONED_TIMER(n) #else // Null definitions #define KMP_COUNT_DEVELOPER_VALUE(n, v) ((void)0) #define KMP_COUNT_DEVELOPER_BLOCK(n) ((void)0) #define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) ((void)0) #define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #endif #else // KMP_STATS_ENABLED // Null definitions #define KMP_COUNT_VALUE(n, v) ((void)0) #define KMP_COUNT_BLOCK(n) ((void)0) #define KMP_OUTPUT_STATS(heading_string) ((void)0) #define KMP_RESET_STATS() ((void)0) #define KMP_COUNT_DEVELOPER_VALUE(n, v) ((void)0) #define KMP_COUNT_DEVELOPER_BLOCK(n) ((void)0) #define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) ((void)0) #define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) ((void)0) #define KMP_INIT_PARTITIONED_TIMERS(name) ((void)0) #define KMP_TIME_PARTITIONED_BLOCK(name) ((void)0) #define KMP_PUSH_PARTITIONED_TIMER(name) ((void)0) #define KMP_POP_PARTITIONED_TIMER() ((void)0) #define KMP_SET_THREAD_STATE(state_name) ((void)0) #define KMP_GET_THREAD_STATE() ((void)0) #define KMP_SET_THREAD_STATE_BLOCK(state_name) ((void)0) #endif // KMP_STATS_ENABLED #endif // KMP_STATS_H
ourParallelKmeans.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <omp.h> typedef struct Point { double x; double y; } point; int main(int argc, char **argv) { printf("Maximum number of Threads = %d\n", omp_get_max_threads()); srand(69420); int stdin_input; int k = 2; stdin_input = 0; printf("Number of Clusters (as an integer bigger than 1):\n"); scanf("%d", &stdin_input); if (stdin_input < 2) { printf("Invalid number of Clusters, defaulting to 2\n"); } else { k = stdin_input; } int n = 10; stdin_input = 0; printf("Number of Points (as an integer bigger than 9):\n"); scanf("%d", &stdin_input); if (stdin_input < 10) { printf("Invalid number of Points, defaulting to 10\n"); } else { n = stdin_input; } int max_executions = 1; stdin_input = 0; printf("Number of Executions (as an integer bigger than 0):\n"); scanf("%d", &stdin_input); if (stdin_input < 1) { printf("Invalid number of Executions, defaulting to 1\n"); } else { max_executions = stdin_input; } point *points; points = (point *)malloc(sizeof(struct Point) * n); for (int i = 0; i < n; i++) { points[i].x = (double)rand() / (double)(RAND_MAX / 10); points[i].y = (double)rand() / (double)(RAND_MAX / 10); } point centroids[k], original_centroids[k]; for (int i = 0; i < k; i++) { centroids[i].x = original_centroids[i].x = (double)rand() / (double)(RAND_MAX / 10); centroids[i].y = original_centroids[i].y = (double)rand() / (double)(RAND_MAX / 10); } point **clusters; clusters = (point **)malloc(sizeof(point *) * k); for (int i = 0; i < k; i++) { clusters[i] = (point *)malloc(sizeof(struct Point) * n); } double meanExecTime = 0; int execution = 0; point previous_centroids[k]; int iterations = 0, changed = 1; while (execution < max_executions) { for (int i = 0; i < k; i++) { centroids[i].x = original_centroids[i].x; centroids[i].y = original_centroids[i].y; } changed = 1; iterations = 0; double b4 = omp_get_wtime(); for (; changed; iterations++) { int clusters_size[k]; for (int i = 0; i < k; i++) { clusters_size[i] = 0; } #pragma omp parallel for schedule(static) for (int i = 0; i < n; i++) { int cluster_closest_to = 0, point_write_position; double distance, smallest_distance; for (int j = 0; j < k; j++) { distance = sqrt(powf((centroids[j].x - points[i].x), 2) + powf((centroids[j].y - points[i].y), 2)); if (j == 0) { smallest_distance = distance; } else { if (distance < smallest_distance) { smallest_distance = distance; cluster_closest_to = j; } } } #pragma omp critical point_write_position = clusters_size[cluster_closest_to]++; clusters[cluster_closest_to][point_write_position] = points[i]; } int has_changed = 1; #pragma omp parallel for schedule(dynamic) for (int i = 0; i < k; i++) { double x = 0, y = 0; for (int j = 0; j < clusters_size[i]; j++) { x += clusters[i][j].x; y += clusters[i][j].y; } if (!(x == 0 && y == 0)) { previous_centroids[i] = centroids[i]; centroids[i].x = (double)x / clusters_size[i]; centroids[i].y = (double)y / clusters_size[i]; if (!(((previous_centroids[i].x - 0.00001f) < centroids[i].x && (previous_centroids[i].x + 0.00001f) > centroids[i].x) && ((previous_centroids[i].y - 0.00001f) < centroids[i].y && (previous_centroids[i].y + 0.00001f) > centroids[i].y))) { has_changed = 0; } } } changed = !has_changed; } double time_delta = (omp_get_wtime() - b4); printf("Time = %f seconds | execution = %d\n", time_delta, execution + 1); meanExecTime += time_delta; execution++; } printf("Average time of the %d executions: %f\n", execution, meanExecTime / execution); stdin_input = 0; printf("If you want to see the results please insert '1'\n"); scanf("%d", &stdin_input); if (stdin_input == 1) { for (int i = 0; i < k; i++) { printf("Centroid %d -> (%f,%f)\n", i, centroids[i].x, centroids[i].y); } printf("The algorithm converged in %d iterations\n", iterations); } return 0; }
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <omp.h> typedef struct Point { double x; double y; } point; int main(int argc, char **argv) { printf("Maximum number of Threads = %d\n", omp_get_max_threads()); srand(69420); int stdin_input; int k = 2; stdin_input = 0; printf("Number of Clusters (as an integer bigger than 1):\n"); scanf("%d", &stdin_input); if (stdin_input < 2) { printf("Invalid number of Clusters, defaulting to 2\n"); } else { k = stdin_input; } int n = 10; stdin_input = 0; printf("Number of Points (as an integer bigger than 9):\n"); scanf("%d", &stdin_input); if (stdin_input < 10) { printf("Invalid number of Points, defaulting to 10\n"); } else { n = stdin_input; } int max_executions = 1; stdin_input = 0; printf("Number of Executions (as an integer bigger than 0):\n"); scanf("%d", &stdin_input); if (stdin_input < 1) { printf("Invalid number of Executions, defaulting to 1\n"); } else { max_executions = stdin_input; } point *points; points = (point *)malloc(sizeof(struct Point) * n); for (int i = 0; i < n; i++) { points[i].x = (double)rand() / (double)(RAND_MAX / 10); points[i].y = (double)rand() / (double)(RAND_MAX / 10); } point centroids[k], original_centroids[k]; for (int i = 0; i < k; i++) { centroids[i].x = original_centroids[i].x = (double)rand() / (double)(RAND_MAX / 10); centroids[i].y = original_centroids[i].y = (double)rand() / (double)(RAND_MAX / 10); } point **clusters; clusters = (point **)malloc(sizeof(point *) * k); for (int i = 0; i < k; i++) { clusters[i] = (point *)malloc(sizeof(struct Point) * n); } double meanExecTime = 0; int execution = 0; point previous_centroids[k]; int iterations = 0, changed = 1; while (execution < max_executions) { for (int i = 0; i < k; i++) { centroids[i].x = original_centroids[i].x; centroids[i].y = original_centroids[i].y; } changed = 1; iterations = 0; double b4 = omp_get_wtime(); for (; changed; iterations++) { int clusters_size[k]; for (int i = 0; i < k; i++) { clusters_size[i] = 0; } for (int i = 0; i < n; i++) { int cluster_closest_to = 0, point_write_position; double distance, smallest_distance; for (int j = 0; j < k; j++) { distance = sqrt(powf((centroids[j].x - points[i].x), 2) + powf((centroids[j].y - points[i].y), 2)); if (j == 0) { smallest_distance = distance; } else { if (distance < smallest_distance) { smallest_distance = distance; cluster_closest_to = j; } } } point_write_position = clusters_size[cluster_closest_to]++; clusters[cluster_closest_to][point_write_position] = points[i]; } int has_changed = 1; for (int i = 0; i < k; i++) { double x = 0, y = 0; for (int j = 0; j < clusters_size[i]; j++) { x += clusters[i][j].x; y += clusters[i][j].y; } if (!(x == 0 && y == 0)) { previous_centroids[i] = centroids[i]; centroids[i].x = (double)x / clusters_size[i]; centroids[i].y = (double)y / clusters_size[i]; if (!(((previous_centroids[i].x - 0.00001f) < centroids[i].x && (previous_centroids[i].x + 0.00001f) > centroids[i].x) && ((previous_centroids[i].y - 0.00001f) < centroids[i].y && (previous_centroids[i].y + 0.00001f) > centroids[i].y))) { has_changed = 0; } } } changed = !has_changed; } double time_delta = (omp_get_wtime() - b4); printf("Time = %f seconds | execution = %d\n", time_delta, execution + 1); meanExecTime += time_delta; execution++; } printf("Average time of the %d executions: %f\n", execution, meanExecTime / execution); stdin_input = 0; printf("If you want to see the results please insert '1'\n"); scanf("%d", &stdin_input); if (stdin_input == 1) { for (int i = 0; i < k; i++) { printf("Centroid %d -> (%f,%f)\n", i, centroids[i].x, centroids[i].y); } printf("The algorithm converged in %d iterations\n", iterations); } return 0; }
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <omp.h> typedef struct Point { double x; double y; } point; int main(int argc, char **argv) { printf("Maximum number of Threads = %d\n", omp_get_max_threads()); srand(69420); int stdin_input; int k = 2; stdin_input = 0; printf("Number of Clusters (as an integer bigger than 1):\n"); scanf("%d", &stdin_input); if (stdin_input < 2) { printf("Invalid number of Clusters, defaulting to 2\n"); } else { k = stdin_input; } int n = 10; stdin_input = 0; printf("Number of Points (as an integer bigger than 9):\n"); scanf("%d", &stdin_input); if (stdin_input < 10) { printf("Invalid number of Points, defaulting to 10\n"); } else { n = stdin_input; } int max_executions = 1; stdin_input = 0; printf("Number of Executions (as an integer bigger than 0):\n"); scanf("%d", &stdin_input); if (stdin_input < 1) { printf("Invalid number of Executions, defaulting to 1\n"); } else { max_executions = stdin_input; } point *points; points = (point *)malloc(sizeof(struct Point) * n); for (int i = 0; i < n; i++) { points[i].x = (double)rand() / (double)(RAND_MAX / 10); points[i].y = (double)rand() / (double)(RAND_MAX / 10); } point centroids[k], original_centroids[k]; for (int i = 0; i < k; i++) { centroids[i].x = original_centroids[i].x = (double)rand() / (double)(RAND_MAX / 10); centroids[i].y = original_centroids[i].y = (double)rand() / (double)(RAND_MAX / 10); } point **clusters; clusters = (point **)malloc(sizeof(point *) * k); for (int i = 0; i < k; i++) { clusters[i] = (point *)malloc(sizeof(struct Point) * n); } double meanExecTime = 0; int execution = 0; point previous_centroids[k]; int iterations = 0, changed = 1; while (execution < max_executions) { for (int i = 0; i < k; i++) { centroids[i].x = original_centroids[i].x; centroids[i].y = original_centroids[i].y; } changed = 1; iterations = 0; double b4 = omp_get_wtime(); for (; changed; iterations++) { int clusters_size[k]; for (int i = 0; i < k; i++) { clusters_size[i] = 0; } #pragma omp parallel for schedule(static) for (int i = 0; i < n; i++) { int cluster_closest_to = 0, point_write_position; double distance, smallest_distance; for (int j = 0; j < k; j++) { distance = sqrt(powf((centroids[j].x - points[i].x), 2) + powf((centroids[j].y - points[i].y), 2)); if (j == 0) { smallest_distance = distance; } else { if (distance < smallest_distance) { smallest_distance = distance; cluster_closest_to = j; } } } #pragma omp critical point_write_position = clusters_size[cluster_closest_to]++; clusters[cluster_closest_to][point_write_position] = points[i]; } int has_changed = 1; #pragma omp parallel for schedule(dynamic) for (int i = 0; i < k; i++) { double x = 0, y = 0; for (int j = 0; j < clusters_size[i]; j++) { x += clusters[i][j].x; y += clusters[i][j].y; } if (!(x == 0 && y == 0)) { previous_centroids[i] = centroids[i]; centroids[i].x = (double)x / clusters_size[i]; centroids[i].y = (double)y / clusters_size[i]; if (!(((previous_centroids[i].x - 0.00001f) < centroids[i].x && (previous_centroids[i].x + 0.00001f) > centroids[i].x) && ((previous_centroids[i].y - 0.00001f) < centroids[i].y && (previous_centroids[i].y + 0.00001f) > centroids[i].y))) { has_changed = 0; } } } changed = !has_changed; } double time_delta = (omp_get_wtime() - b4); printf("Time = %f seconds | execution = %d\n", time_delta, execution + 1); meanExecTime += time_delta; execution++; } printf("Average time of the %d executions: %f\n", execution, meanExecTime / execution); stdin_input = 0; printf("If you want to see the results please insert '1'\n"); scanf("%d", &stdin_input); if (stdin_input == 1) { for (int i = 0; i < k; i++) { printf("Centroid %d -> (%f,%f)\n", i, centroids[i].x, centroids[i].y); } printf("The algorithm converged in %d iterations\n", iterations); } return 0; }
GB_unaryop__abs_uint64_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint64_int64 // op(A') function: GB_tran__abs_uint64_int64 // C type: uint64_t // A type: int64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint64_int64 ( uint64_t *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint64_int64 // op(A') function: GB_tran__abs_uint64_int64 // C type: uint64_t // A type: int64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint64_int64 ( uint64_t *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint64_int64 // op(A') function: GB_tran__abs_uint64_int64 // C type: uint64_t // A type: int64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint64_int64 ( uint64_t *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
papi.c
#include "XSbench_header.h" void counter_init( int *eventset, int *num_papi_events ) { char error_str[PAPI_MAX_STR_LEN]; // int events[] = {PAPI_TOT_INS,PAPI_BR_INS,PAPI_SR_INS}; int events[] = {PAPI_TOT_CYC,PAPI_L3_TCM}; int stat; #if OMP == 1 int thread = omp_get_thread_num(); #else int thread = 0; #endif if( thread == 0 ) printf("Initializing PAPI counters...\n"); *num_papi_events = sizeof(events) / sizeof(int); if ((stat = PAPI_thread_init((long unsigned int (*)(void)) omp_get_thread_num)) != PAPI_OK){ PAPI_perror("PAPI_thread_init"); exit(1); } if ( (stat= PAPI_create_eventset(eventset)) != PAPI_OK){ PAPI_perror("PAPI_create_eventset"); exit(1); } for( int i = 0; i < *num_papi_events; i++ ){ if ((stat=PAPI_add_event(*eventset,events[i])) != PAPI_OK){ PAPI_perror("PAPI_add_event"); exit(1); } } if ((stat=PAPI_start(*eventset)) != PAPI_OK){ PAPI_perror("PAPI_start"); exit(1); } } // Stops the papi counters and prints results void counter_stop( int * eventset, int num_papi_events ) { int * events = malloc(num_papi_events * sizeof(int)); int n = num_papi_events; PAPI_list_events( *eventset, events, &n ); PAPI_event_info_t info; long_long * values = malloc( num_papi_events * sizeof(long_long)); PAPI_stop(*eventset, values); int thread = omp_get_thread_num(); #pragma omp critical (papi) { printf("Thread %d\n", thread); for( int i = 0; i < num_papi_events; i++ ) { PAPI_get_event_info(events[i], &info); printf("%-15lld\t%s\t%s\n", values[i],info.symbol,info.long_descr); } free(events); free(values); } }
#include "XSbench_header.h" void counter_init(int *eventset, int *num_papi_events) { char error_str[PAPI_MAX_STR_LEN]; //int events[] = {PAPI_TOT_INS, PAPI_BR_INS, PAPI_SR_INS}; int events[] = {PAPI_TOT_CYC, PAPI_L3_TCM}; int stat; #if OMP == 1 int thread = omp_get_thread_num(); #else int thread = 0; #endif if (thread == 0) printf("Initializing PAPI counters...\n"); *num_papi_events = sizeof(events) / sizeof(int); if ((stat = PAPI_thread_init((long unsigned int (*) (void))omp_get_thread_num)) != PAPI_OK) { PAPI_perror("PAPI_thread_init"); exit(1); } if ((stat = PAPI_create_eventset(eventset)) != PAPI_OK) { PAPI_perror("PAPI_create_eventset"); exit(1); } for (int i = 0; i < *num_papi_events; i++) { if ((stat = PAPI_add_event(*eventset, events[i])) != PAPI_OK) { PAPI_perror("PAPI_add_event"); exit(1); } } if ((stat = PAPI_start(*eventset)) != PAPI_OK) { PAPI_perror("PAPI_start"); exit(1); } } //Stops the papi counters and prints results void counter_stop(int *eventset, int num_papi_events) { int *events = malloc(num_papi_events * sizeof(int)); int n = num_papi_events; PAPI_list_events(*eventset, events, &n); PAPI_event_info_t info; long_long *values = malloc(num_papi_events * sizeof(long_long)); PAPI_stop(*eventset, values); int thread = omp_get_thread_num(); printf("Thread %d\n", thread); for (int i = 0; i < num_papi_events; i++) { PAPI_get_event_info(events[i], &info); printf("%-15lld\t%s\t%s\n", values[i], info.symbol, info.long_descr); } free(events); free(values); }
#include "XSbench_header.h" void counter_init(int *eventset, int *num_papi_events) { char error_str[PAPI_MAX_STR_LEN]; //int events[] = {PAPI_TOT_INS, PAPI_BR_INS, PAPI_SR_INS}; int events[] = {PAPI_TOT_CYC, PAPI_L3_TCM}; int stat; #if OMP == 1 int thread = omp_get_thread_num(); #else int thread = 0; #endif if (thread == 0) printf("Initializing PAPI counters...\n"); *num_papi_events = sizeof(events) / sizeof(int); if ((stat = PAPI_thread_init((long unsigned int (*) (void))omp_get_thread_num)) != PAPI_OK) { PAPI_perror("PAPI_thread_init"); exit(1); } if ((stat = PAPI_create_eventset(eventset)) != PAPI_OK) { PAPI_perror("PAPI_create_eventset"); exit(1); } for (int i = 0; i < *num_papi_events; i++) { if ((stat = PAPI_add_event(*eventset, events[i])) != PAPI_OK) { PAPI_perror("PAPI_add_event"); exit(1); } } if ((stat = PAPI_start(*eventset)) != PAPI_OK) { PAPI_perror("PAPI_start"); exit(1); } } //Stops the papi counters and prints results void counter_stop(int *eventset, int num_papi_events) { int *events = malloc(num_papi_events * sizeof(int)); int n = num_papi_events; PAPI_list_events(*eventset, events, &n); PAPI_event_info_t info; long_long *values = malloc(num_papi_events * sizeof(long_long)); PAPI_stop(*eventset, values); int thread = omp_get_thread_num(); #pragma omp critical (papi) { printf("Thread %d\n", thread); for (int i = 0; i < num_papi_events; i++) { PAPI_get_event_info(events[i], &info); printf("%-15lld\t%s\t%s\n", values[i], info.symbol, info.long_descr); } free(events); free(values); } }
splay.c
/* Copyright 2007, 2008 Daniel Zerbino ([email protected]) This file is part of Velvet. Velvet is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Velvet is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Velvet; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdlib.h> #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif #include "globals.h" #include "recycleBin.h" #include "kmer.h" #include "utility.h" #define CHUNKSIZE 10000 static RecycleBin *treeMemory = NULL; struct splayNode_st { Kmer kmer; Coordinate position; struct splayNode_st *left; struct splayNode_st *right; IDnum seqID; } ATTRIBUTE_PACKED; typedef struct splayNode_st SplayNode; typedef struct splayNode_st SplayTree; #ifdef _OPENMP void initSplayTreeMemory(void) { int n; n = omp_get_max_threads(); #pragma omp critical if (treeMemory == NULL) treeMemory = newRecycleBinArray(n, sizeof(SplayNode), CHUNKSIZE); } #endif static SplayNode *allocateSplayNode() { #ifdef _OPENMP #ifdef DEBUG if (treeMemory == NULL) { velvetLog("The memory for splay trees seems uninitialised, " "this is probably a bug, aborting.\n"); abort(); } #endif return allocatePointer(getRecycleBinInArray(treeMemory, omp_get_thread_num())); #else if (treeMemory == NULL) treeMemory = newRecycleBin(sizeof(SplayNode), CHUNKSIZE); return allocatePointer(treeMemory); #endif } void destroyAllSplayTrees() { #ifdef _OPENMP destroyRecycleBinArray(treeMemory); #else destroyRecycleBin(treeMemory); #endif treeMemory = NULL; } /* This function can be called only if K2 has a left child */ /* Perform a rotate between a node (K2) and its left child */ /* Update heights, then return new root */ static SplayNode *SingleRotateWithLeft(SplayNode * K2) { SplayNode *K1; K1 = K2->left; K2->left = K1->right; K1->right = K2; return K1; /* New root */ } /* This function can be called only if K1 has a right child */ /* Perform a rotate between a node (K1) and its right child */ /* Update heights, then return new root */ static SplayNode *SingleRotateWithRight(SplayNode * K1) { SplayNode *K2; K2 = K1->right; K1->right = K2->left; K2->left = K1; return K2; /* New root */ } /* Top-down splay procedure, */ /* not requiring kmer to be in tree */ static SplayTree *Splay(Kmer * kmer, SplayTree * T) { SplayNode Header; SplayNode *LeftTreeMax, *RightTreeMin; if (T == NULL) return NULL; Header.left = Header.right = NULL; LeftTreeMax = RightTreeMin = &Header; while (compareKmers(kmer, &(T->kmer))) { if (compareKmers(kmer, &(T->kmer)) < 0) { if (T->left == NULL) break; if (compareKmers(kmer, &(T->left->kmer)) < 0) T = SingleRotateWithLeft(T); if (T->left == NULL) break; /* Link right */ RightTreeMin->left = T; RightTreeMin = T; T = T->left; } else { if (T->right == NULL) break; if (compareKmers(kmer, &(T->right->kmer)) > 0) T = SingleRotateWithRight(T); if (T->right == NULL) break; /* Link left */ LeftTreeMax->right = T; LeftTreeMax = T; T = T->right; } } /* while kmer != T->kmer */ /* Reassemble */ LeftTreeMax->right = T->left; RightTreeMin->left = T->right; T->left = Header.right; T->right = Header.left; return T; } Kmer * findInTree(Kmer * X, SplayTree ** T) { *T = Splay(X, *T); return &((*T)->kmer); } void insertIntoTree(Kmer * kmer, SplayTree ** T) { SplayNode *newNode; if (*T == NULL) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->left = newNode->right = NULL; *T = newNode; return; } *T = Splay(kmer, *T); if (compareKmers(kmer, &((*T)->kmer)) < 0) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->left = (*T)->left; newNode->right = *T; (*T)->left = NULL; *T = newNode; } else if (compareKmers(&((*T)->kmer), kmer) < 0) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->right = (*T)->right; newNode->left = *T; (*T)->right = NULL; *T = newNode; } } boolean findOrInsertOccurenceInSplayTree(Kmer * kmer, IDnum * seqID, Coordinate * position, SplayTree ** T) { SplayNode *newNode; if (*T == NULL) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->seqID = *seqID; newNode->position = *position; newNode->left = newNode->right = NULL; *T = newNode; return false; } *T = Splay(kmer, *T); if (compareKmers(kmer, &((*T)->kmer)) < 0) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->seqID = *seqID; newNode->position = *position; newNode->left = (*T)->left; newNode->right = *T; (*T)->left = NULL; *T = newNode; return false; } else if (compareKmers(kmer, &((*T)->kmer)) > 0) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->seqID = *seqID; newNode->position = *position; newNode->right = (*T)->right; newNode->left = *T; (*T)->right = NULL; *T = newNode; return false; } else { *seqID = (*T)->seqID; *position = (*T)->position; return true; } }
#include <stdlib.h> #include <stdio.h> #include "globals.h" #include "recycleBin.h" #include "kmer.h" #include "utility.h" #define CHUNKSIZE 10000 static RecycleBin *treeMemory = NULL; struct splayNode_st { Kmer kmer; Coordinate position; struct splayNode_st *left; struct splayNode_st *right; IDnum seqID; } ATTRIBUTE_PACKED; typedef struct splayNode_st SplayNode; typedef struct splayNode_st SplayTree; static SplayNode * allocateSplayNode() { } void destroyAllSplayTrees() { treeMemory = NULL; } /* This function can be called only if K2 has a left child */ /* Perform a rotate between a node (K2) and its left child */ /* Update heights, then return new root */ static SplayNode * SingleRotateWithLeft(SplayNode * K2) { SplayNode *K1; K1 = K2->left; K2->left = K1->right; K1->right = K2; return K1; /* New root */ } /* This function can be called only if K1 has a right child */ /* Perform a rotate between a node (K1) and its right child */ /* Update heights, then return new root */ static SplayNode * SingleRotateWithRight(SplayNode * K1) { SplayNode *K2; K2 = K1->right; K1->right = K2->left; K2->left = K1; return K2; /* New root */ } /* Top-down splay procedure, */ /* not requiring kmer to be in tree */ static SplayTree * Splay(Kmer * kmer, SplayTree * T) { SplayNode Header; SplayNode *LeftTreeMax, *RightTreeMin; if (T == NULL) return NULL; Header.left = Header.right = NULL; LeftTreeMax = RightTreeMin = &Header; while (compareKmers(kmer, &(T->kmer))) { if (compareKmers(kmer, &(T->kmer)) < 0) { if (T->left == NULL) break; if (compareKmers(kmer, &(T->left->kmer)) < 0) T = SingleRotateWithLeft(T); if (T->left == NULL) break; /* Link right */ RightTreeMin->left = T; RightTreeMin = T; T = T->left; } else { if (T->right == NULL) break; if (compareKmers(kmer, &(T->right->kmer)) > 0) T = SingleRotateWithRight(T); if (T->right == NULL) break; /* Link left */ LeftTreeMax->right = T; LeftTreeMax = T; T = T->right; } } /* while kmer != T->kmer */ /* Reassemble */ LeftTreeMax->right = T->left; RightTreeMin->left = T->right; T->left = Header.right; T->right = Header.left; return T; } Kmer * findInTree(Kmer * X, SplayTree ** T) { *T = Splay(X, *T); return &((*T)->kmer); } void insertIntoTree(Kmer * kmer, SplayTree ** T) { SplayNode *newNode; if (*T == NULL) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->left = newNode->right = NULL; *T = newNode; return; } *T = Splay(kmer, *T); if (compareKmers(kmer, &((*T)->kmer)) < 0) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->left = (*T)->left; newNode->right = *T; (*T)->left = NULL; *T = newNode; } else if (compareKmers(&((*T)->kmer), kmer) < 0) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->right = (*T)->right; newNode->left = *T; (*T)->right = NULL; *T = newNode; } } boolean findOrInsertOccurenceInSplayTree(Kmer * kmer, IDnum * seqID, Coordinate * position, SplayTree ** T) { SplayNode *newNode; if (*T == NULL) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->seqID = *seqID; newNode->position = *position; newNode->left = newNode->right = NULL; *T = newNode; return false; } *T = Splay(kmer, *T); if (compareKmers(kmer, &((*T)->kmer)) < 0) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->seqID = *seqID; newNode->position = *position; newNode->left = (*T)->left; newNode->right = *T; (*T)->left = NULL; *T = newNode; return false; } else if (compareKmers(kmer, &((*T)->kmer)) > 0) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->seqID = *seqID; newNode->position = *position; newNode->right = (*T)->right; newNode->left = *T; (*T)->right = NULL; *T = newNode; return false; } else { *seqID = (*T)->seqID; *position = (*T)->position; return true; } }
#include <stdlib.h> #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif #include "globals.h" #include "recycleBin.h" #include "kmer.h" #include "utility.h" #define CHUNKSIZE 10000 static RecycleBin *treeMemory = NULL; struct splayNode_st { Kmer kmer; Coordinate position; struct splayNode_st *left; struct splayNode_st *right; IDnum seqID; } ATTRIBUTE_PACKED; typedef struct splayNode_st SplayNode; typedef struct splayNode_st SplayTree; #ifdef _OPENMP void initSplayTreeMemory(void) { int n; n = omp_get_max_threads(); #pragma omp critical if (treeMemory == NULL) treeMemory = newRecycleBinArray(n, sizeof(SplayNode), CHUNKSIZE); } #endif static SplayNode * allocateSplayNode() { #ifdef _OPENMP #ifdef DEBUG if (treeMemory == NULL) { velvetLog("The memory for splay trees seems uninitialised, " "this is probably a bug, aborting.\n"); abort(); } #endif return allocatePointer(getRecycleBinInArray(treeMemory, omp_get_thread_num())); #else if (treeMemory == NULL) treeMemory = newRecycleBin(sizeof(SplayNode), CHUNKSIZE); return allocatePointer(treeMemory); #endif } void destroyAllSplayTrees() { #ifdef _OPENMP destroyRecycleBinArray(treeMemory); #else destroyRecycleBin(treeMemory); #endif treeMemory = NULL; } /* This function can be called only if K2 has a left child */ /* Perform a rotate between a node (K2) and its left child */ /* Update heights, then return new root */ static SplayNode * SingleRotateWithLeft(SplayNode * K2) { SplayNode *K1; K1 = K2->left; K2->left = K1->right; K1->right = K2; return K1; /* New root */ } /* This function can be called only if K1 has a right child */ /* Perform a rotate between a node (K1) and its right child */ /* Update heights, then return new root */ static SplayNode * SingleRotateWithRight(SplayNode * K1) { SplayNode *K2; K2 = K1->right; K1->right = K2->left; K2->left = K1; return K2; /* New root */ } /* Top-down splay procedure, */ /* not requiring kmer to be in tree */ static SplayTree * Splay(Kmer * kmer, SplayTree * T) { SplayNode Header; SplayNode *LeftTreeMax, *RightTreeMin; if (T == NULL) return NULL; Header.left = Header.right = NULL; LeftTreeMax = RightTreeMin = &Header; while (compareKmers(kmer, &(T->kmer))) { if (compareKmers(kmer, &(T->kmer)) < 0) { if (T->left == NULL) break; if (compareKmers(kmer, &(T->left->kmer)) < 0) T = SingleRotateWithLeft(T); if (T->left == NULL) break; /* Link right */ RightTreeMin->left = T; RightTreeMin = T; T = T->left; } else { if (T->right == NULL) break; if (compareKmers(kmer, &(T->right->kmer)) > 0) T = SingleRotateWithRight(T); if (T->right == NULL) break; /* Link left */ LeftTreeMax->right = T; LeftTreeMax = T; T = T->right; } } /* while kmer != T->kmer */ /* Reassemble */ LeftTreeMax->right = T->left; RightTreeMin->left = T->right; T->left = Header.right; T->right = Header.left; return T; } Kmer * findInTree(Kmer * X, SplayTree ** T) { *T = Splay(X, *T); return &((*T)->kmer); } void insertIntoTree(Kmer * kmer, SplayTree ** T) { SplayNode *newNode; if (*T == NULL) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->left = newNode->right = NULL; *T = newNode; return; } *T = Splay(kmer, *T); if (compareKmers(kmer, &((*T)->kmer)) < 0) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->left = (*T)->left; newNode->right = *T; (*T)->left = NULL; *T = newNode; } else if (compareKmers(&((*T)->kmer), kmer) < 0) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->right = (*T)->right; newNode->left = *T; (*T)->right = NULL; *T = newNode; } } boolean findOrInsertOccurenceInSplayTree(Kmer * kmer, IDnum * seqID, Coordinate * position, SplayTree ** T) { SplayNode *newNode; if (*T == NULL) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->seqID = *seqID; newNode->position = *position; newNode->left = newNode->right = NULL; *T = newNode; return false; } *T = Splay(kmer, *T); if (compareKmers(kmer, &((*T)->kmer)) < 0) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->seqID = *seqID; newNode->position = *position; newNode->left = (*T)->left; newNode->right = *T; (*T)->left = NULL; *T = newNode; return false; } else if (compareKmers(kmer, &((*T)->kmer)) > 0) { newNode = allocateSplayNode(); copyKmers(&(newNode->kmer), kmer); newNode->seqID = *seqID; newNode->position = *position; newNode->right = (*T)->right; newNode->left = *T; (*T)->right = NULL; *T = newNode; return false; } else { *seqID = (*T)->seqID; *position = (*T)->position; return true; } }
GB_unop__one_uint8_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__one_uint8_uint8 // op(A') function: GB_unop_tran__one_uint8_uint8 // C type: uint8_t // A type: uint8_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = 1 ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__one_uint8_uint8 ( uint8_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__one_uint8_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__one_uint8_uint8 // op(A') function: GB_unop_tran__one_uint8_uint8 // C type: uint8_t // A type: uint8_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = 1 ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__one_uint8_uint8 ( uint8_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__one_uint8_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__one_uint8_uint8 // op(A') function: GB_unop_tran__one_uint8_uint8 // C type: uint8_t // A type: uint8_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = 1 ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__one_uint8_uint8 ( uint8_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__one_uint8_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 2048; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz - 4; i++) { for (j = 4; j < Ny - 4; j++) { for (k = 4; k < Nx - 4; k++) { A[(t + 1) % 2][i][j][k] = coef[0][i][j][k] * A[(t) % 2][i][j][k] + coef[1][i][j][k] * (A[(t) % 2][i - 1][j][k] + A[(t) % 2][i + 1][j][k]) + coef[2][i][j][k] * (A[(t) % 2][i][j - 1][k] + A[(t) % 2][i][j + 1][k]) + coef[3][i][j][k] * (A[(t) % 2][i][j][k - 1] + A[(t) % 2][i][j][k + 1]) + coef[4][i][j][k] * (A[(t) % 2][i - 2][j][k] + A[(t) % 2][i + 2][j][k]) + coef[5][i][j][k] * (A[(t) % 2][i][j - 2][k] + A[(t) % 2][i][j + 2][k]) + coef[6][i][j][k] * (A[(t) % 2][i][j][k - 2] + A[(t) % 2][i][j][k + 2]) + coef[7][i][j][k] * (A[(t) % 2][i - 3][j][k] + A[(t) % 2][i + 3][j][k]) + coef[8][i][j][k] * (A[(t) % 2][i][j - 3][k] + A[(t) % 2][i][j + 3][k]) + coef[9][i][j][k] * (A[(t) % 2][i][j][k - 3] + A[(t) % 2][i][j][k + 3]) + coef[10][i][j][k] * (A[(t) % 2][i - 4][j][k] + A[(t) % 2][i + 4][j][k]) + coef[11][i][j][k] * (A[(t) % 2][i][j - 4][k] + A[(t) % 2][i][j + 4][k]) + coef[12][i][j][k] * (A[(t) % 2][i][j][k - 4] + A[(t) % 2][i][j][k + 4]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 2048; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz - 4; i++) { for (j = 4; j < Ny - 4; j++) { for (k = 4; k < Nx - 4; k++) { A[(t + 1) % 2][i][j][k] = coef[0][i][j][k] * A[(t) % 2][i][j][k] + coef[1][i][j][k] * (A[(t) % 2][i - 1][j][k] + A[(t) % 2][i + 1][j][k]) + coef[2][i][j][k] * (A[(t) % 2][i][j - 1][k] + A[(t) % 2][i][j + 1][k]) + coef[3][i][j][k] * (A[(t) % 2][i][j][k - 1] + A[(t) % 2][i][j][k + 1]) + coef[4][i][j][k] * (A[(t) % 2][i - 2][j][k] + A[(t) % 2][i + 2][j][k]) + coef[5][i][j][k] * (A[(t) % 2][i][j - 2][k] + A[(t) % 2][i][j + 2][k]) + coef[6][i][j][k] * (A[(t) % 2][i][j][k - 2] + A[(t) % 2][i][j][k + 2]) + coef[7][i][j][k] * (A[(t) % 2][i - 3][j][k] + A[(t) % 2][i + 3][j][k]) + coef[8][i][j][k] * (A[(t) % 2][i][j - 3][k] + A[(t) % 2][i][j + 3][k]) + coef[9][i][j][k] * (A[(t) % 2][i][j][k - 3] + A[(t) % 2][i][j][k + 3]) + coef[10][i][j][k] * (A[(t) % 2][i - 4][j][k] + A[(t) % 2][i + 4][j][k]) + coef[11][i][j][k] * (A[(t) % 2][i][j - 4][k] + A[(t) % 2][i][j + 4][k]) + coef[12][i][j][k] * (A[(t) % 2][i][j][k - 4] + A[(t) % 2][i][j][k + 4]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_binop__eq_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint32) // A*D function (colscale): GB (_AxD__eq_uint32) // D*A function (rowscale): GB (_DxB__eq_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__eq_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__eq_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint32) // C=scalar+B GB (_bind1st__eq_uint32) // C=scalar+B' GB (_bind1st_tran__eq_uint32) // C=A+scalar GB (_bind2nd__eq_uint32) // C=A'+scalar GB (_bind2nd_tran__eq_uint32) // C type: bool // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_UINT32 || GxB_NO_EQ_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint32) // A*D function (colscale): GB (_AxD__eq_uint32) // D*A function (rowscale): GB (_DxB__eq_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__eq_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__eq_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint32) // C=scalar+B GB (_bind1st__eq_uint32) // C=scalar+B' GB (_bind1st_tran__eq_uint32) // C=A+scalar GB (_bind2nd__eq_uint32) // C=A'+scalar GB (_bind2nd_tran__eq_uint32) // C type: bool // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_UINT32 || GxB_NO_EQ_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint32) // A*D function (colscale): GB (_AxD__eq_uint32) // D*A function (rowscale): GB (_DxB__eq_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__eq_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__eq_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint32) // C=scalar+B GB (_bind1st__eq_uint32) // C=scalar+B' GB (_bind1st_tran__eq_uint32) // C=A+scalar GB (_bind2nd__eq_uint32) // C=A'+scalar GB (_bind2nd_tran__eq_uint32) // C type: bool // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_UINT32 || GxB_NO_EQ_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pacset_rf_regressor.h
#ifndef PACSET_RF_REG #define PACSET_RF_REG #include <vector> #include <unordered_set> #include <fstream> #include "pacset_base_model.h" #include "packer.h" #include "config.h" #include "json_reader.h" #include "utils.h" #include "node.h" #include "MemoryMapped.h" #define NUM_FILES 10 #define BLOCK_LOGGING 1 template <typename T, typename F> class PacsetRandomForestRegressor: public PacsetBaseModel<T, F> { public: inline void setMembers(const std::vector<int> &bin_sizes, const std::vector<int> &bin_node_sizes, const std::vector<std::vector<int>> &bin_start){ PacsetBaseModel<T, F>::bin_sizes.clear(); std::copy(bin_sizes.begin(), bin_sizes.end(), back_inserter(PacsetBaseModel<T, F>::bin_sizes)); std::copy(bin_node_sizes.begin(), bin_node_sizes.end(), back_inserter(PacsetBaseModel<T, F>::bin_node_sizes)); for (auto i: bin_start) PacsetBaseModel<T, F>::bin_start.push_back(i); } inline void setBinNodeSizes(int pos, int siz){ PacsetBaseModel<T, F>::bin_node_sizes[pos] = siz; } inline void loadModel() { JSONReader<T, F> J; //J.convertSklToBins(PacsetBaseModel<T, F>::bins, J.convertSklToBinsRapidJson(PacsetBaseModel<T, F>::bins, PacsetBaseModel<T, F>::bin_sizes, PacsetBaseModel<T, F>::bin_start, PacsetBaseModel<T, F>::bin_node_sizes); } inline void pack(){ std::string layout = Config::getValue("layout"); auto bin = PacsetBaseModel<T, F>::bins[0]; int num_bins = std::stoi(Config::getValue("numthreads")); for(int i=0; i<num_bins; ++i){ Packer<T, F> packer_obj(layout); if(Config::getValue("intertwine") != std::string("notfound")) packer_obj.setDepthIntertwined(std::atoi(Config::getValue("intertwine").c_str())); //should pack in place packer_obj.pack(PacsetBaseModel<T, F>::bins[i], PacsetBaseModel<T, F>::bin_sizes[i], PacsetBaseModel<T, F>::bin_start[i] ); setBinNodeSizes(i, PacsetBaseModel<T, F>::bins[i].size()); } } inline int mmapAndPredict(const std::vector<T>& observation, std::vector<double> &preds, int obsnum) { int num_classes = std::stoi(Config::getValue("numclasses")); int num_threads = std::stoi(Config::getValue("numthreads")); int num_bins = PacsetBaseModel<T, F>::bin_sizes.size(); std::vector<double> elapsed_arr; std::string modelfname = Config::getValue("modelfilename"); MemoryMapped mmapped_obj(modelfname.c_str(), 0); Node<T, F> *data = (Node<T, F>*)mmapped_obj.getData(); std::unordered_set<int> blocks_accessed; int next_node = 0; int block_offset = 0; int offset = 0; double leaf_sum = 0; std::vector<int> offsets; int curr_offset = 0; int total_num_trees = 0; for (auto val: PacsetBaseModel<T, F>::bin_node_sizes){ offsets.push_back(curr_offset); curr_offset += val; } #pragma omp parallel for num_threads(num_threads) for(int bin_counter=0; bin_counter<num_bins; ++bin_counter){ int block_number = 0; Node<T, F> *bin = data + offsets[bin_counter]; std::vector<int> curr_node(PacsetBaseModel<T, F>::bin_sizes[bin_counter]); std::vector<double> last_node(PacsetBaseModel<T, F>::bin_sizes[bin_counter]); int i, feature_num=0, number_not_in_leaf=0; T feature_val; int siz = PacsetBaseModel<T, F>::bin_sizes[bin_counter]; total_num_trees += siz; for(i=0; i<siz; ++i){ curr_node[i] = PacsetBaseModel<T, F>::bin_start[bin_counter][i]; //bin[curr_node[i]].printNode(); __builtin_prefetch(&bin[curr_node[i]], 0, 3); #ifdef BLOCK_LOGGING block_number = (curr_node[i] + block_offset) / BLOCK_SIZE; #pragma omp critical blocks_accessed.insert(block_number); #endif } do{ number_not_in_leaf = 0; for( i=0; i<siz; ++i){ if(curr_node[i] > 0){ feature_num = bin[curr_node[i]].getFeature(); feature_val = observation[feature_num]; if(bin[curr_node[i]].getLeft() == -1){ last_node[i] = bin[curr_node[i]].getThreshold(); curr_node[i] = -1; } else { curr_node[i] = bin[curr_node[i]].nextNode(feature_val); __builtin_prefetch(&bin[curr_node[i]], 0, 3); ++number_not_in_leaf; } } } }while(number_not_in_leaf); double sum=0; for(i=0; i<siz; ++i){ sum += last_node[i]; } leaf_sum +=sum; block_offset += PacsetBaseModel<T, F>::bin_node_sizes[bin_counter]; } preds.clear(); preds.push_back((double)leaf_sum); preds.push_back((double)total_num_trees); #ifdef BLOCK_LOGGING return blocks_accessed.size(); #else return 0; #endif } inline void predict(const std::vector<std::vector<T>>& observation, std::vector<int>& preds, std::vector<int>&results, bool mmap) { } inline void predict(const std::vector<std::vector<T>>& observation, std::vector<double>& preds, std::vector<double>&results, bool mmap) { //Predicts the class for a vector of observations //By calling predict for a single observation and //tallying the observations // int num_classes = std::stoi(Config::getValue("numclasses")); int num_bins; std::vector<double> elapsed_arr; int blocks; std::vector<int> num_blocks; int ct=1; for(auto single_obs : observation){ auto start = std::chrono::steady_clock::now(); if (mmap) blocks = mmapAndPredict(single_obs, preds, ct); else{ blocks = mmapAndPredict(single_obs, preds, ct); } num_blocks.push_back(blocks); results.push_back((double)preds[0] / (double)preds[1] ); auto end = std::chrono::steady_clock::now(); ct+=1; } } inline void serialize() { auto bins = PacsetBaseModel<T, F>::bins; int num_classes = std::stoi(Config::getValue("numclasses")); int num_bins = bins.size(); std::vector<int> bin_sizes = PacsetBaseModel<T, F>::bin_sizes; std::vector<int> bin_node_sizes = PacsetBaseModel<T, F>::bin_node_sizes; std::vector<std::vector<int>> bin_start = PacsetBaseModel<T, F>::bin_start; std::string format = Config::getValue("format"); //Write the metadata needed to reconstruct bins and for prediction //TODO: change filename std::string filename; if(Config::getValue("metadatafilename") == std::string("notfound")) filename = "metadata.txt"; else filename = Config::getValue("metadatafilename"); std::fstream fout; fout.open(filename, std::ios::out ); //Number of classes fout<<num_classes<<"\n"; //Number of bins fout<<num_bins<<"\n"; //Number of trees in each bin for(auto i: bin_sizes){ fout<<i<<"\n"; } //Number of nodes in each bin for(auto i: bin_node_sizes){ fout<<i<<"\n"; } //start position of each bin for(auto bin: bin_start){ for(auto tree_start: bin){ fout<<tree_start<<"\n"; } } fout.close(); if(format == std::string("notfound") || format == std::string("binary")){ std::string modelfname = Config::getValue("packfilename"); std::string filename; if(modelfname != std::string("notfound")) filename = modelfname; else filename = "packedmodel.bin"; //Write the nodes fout.open(filename, std::ios::binary | std::ios::out ); Node<T, F> node_to_write; for(auto bin: bins){ for(auto node: bin){ node_to_write = node; fout.write((char*)&node_to_write, sizeof(node_to_write)); } } fout.close(); } else{ //Write the nodes std::string modelfname = Config::getValue("packfilename"); std::string filename; if(modelfname != std::string("notfound")) filename = modelfname; else filename = "packedmodel.txt"; std::cout<<"filename: "<<filename <<"\n"; fout.open(filename, std::ios::out ); for(auto bin: bins){ for(auto node: bin){ fout<<node.getLeft()<<", "<<node.getRight() <<", "<<node.getFeature()<<", "<<node.getThreshold()<<"\n"; } } fout.close(); } } inline void deserialize(){ //Write the metadata needed to reconstruct bins and for prediction //TODO: change filename int num_classes, num_bins; std::string filename = Config::getValue("metadatafilename"); //std::string filename = "metadata.txt"; std::fstream f; f.open(filename, std::ios::in ); //Number of classes f>>num_classes; Config::setConfigItem("numclasses", std::to_string(num_classes)); //Number of bins f>>num_bins; Config::setConfigItem("numthreads", std::to_string(num_bins)); std::vector<int> num_trees_bin; std::vector<int> num_nodes_bin; std::vector<std::vector<int>> bin_tree_start; int val; //Number of trees in each bin for(int i=0; i<num_bins; ++i){ f>>val; num_trees_bin.push_back(val); } //Number of nodes in each bin for(int i=0; i<num_bins; ++i){ f>>val; num_nodes_bin.push_back(val); } std::vector<int> temp; //start position of each bin for(int i=0; i<num_bins; ++i){ for(int j=0; j<num_trees_bin[i]; ++j){ f>>val; temp.push_back(val); } bin_tree_start.push_back(temp); temp.clear(); } f.close(); setMembers(num_trees_bin, num_nodes_bin, bin_tree_start); } }; #endif
#ifndef PACSET_RF_REG #define PACSET_RF_REG #include <vector> #include <unordered_set> #include <fstream> #include "pacset_base_model.h" #include "packer.h" #include "config.h" #include "json_reader.h" #include "utils.h" #include "node.h" #include "MemoryMapped.h" #define NUM_FILES 10 #define BLOCK_LOGGING 1 template <typename T, typename F> class PacsetRandomForestRegressor: public PacsetBaseModel<T, F> { public: inline void setMembers(const std::vector<int> &bin_sizes, const std::vector<int> &bin_node_sizes, const std::vector<std::vector<int>> &bin_start){ PacsetBaseModel<T, F>::bin_sizes.clear(); std::copy(bin_sizes.begin(), bin_sizes.end(), back_inserter(PacsetBaseModel<T, F>::bin_sizes)); std::copy(bin_node_sizes.begin(), bin_node_sizes.end(), back_inserter(PacsetBaseModel<T, F>::bin_node_sizes)); for (auto i: bin_start) PacsetBaseModel<T, F>::bin_start.push_back(i); } inline void setBinNodeSizes(int pos, int siz){ PacsetBaseModel<T, F>::bin_node_sizes[pos] = siz; } inline void loadModel() { JSONReader<T, F> J; //J.convertSklToBins(PacsetBaseModel<T, F>::bins, J.convertSklToBinsRapidJson(PacsetBaseModel<T, F>::bins, PacsetBaseModel<T, F>::bin_sizes, PacsetBaseModel<T, F>::bin_start, PacsetBaseModel<T, F>::bin_node_sizes); } inline void pack(){ std::string layout = Config::getValue("layout"); auto bin = PacsetBaseModel<T, F>::bins[0]; int num_bins = std::stoi(Config::getValue("numthreads")); for(int i=0; i<num_bins; ++i){ Packer<T, F> packer_obj(layout); if(Config::getValue("intertwine") != std::string("notfound")) packer_obj.setDepthIntertwined(std::atoi(Config::getValue("intertwine").c_str())); //should pack in place packer_obj.pack(PacsetBaseModel<T, F>::bins[i], PacsetBaseModel<T, F>::bin_sizes[i], PacsetBaseModel<T, F>::bin_start[i] ); setBinNodeSizes(i, PacsetBaseModel<T, F>::bins[i].size()); } } inline int mmapAndPredict(const std::vector<T>& observation, std::vector<double> &preds, int obsnum) { int num_classes = std::stoi(Config::getValue("numclasses")); int num_threads = std::stoi(Config::getValue("numthreads")); int num_bins = PacsetBaseModel<T, F>::bin_sizes.size(); std::vector<double> elapsed_arr; std::string modelfname = Config::getValue("modelfilename"); MemoryMapped mmapped_obj(modelfname.c_str(), 0); Node<T, F> *data = (Node<T, F>*)mmapped_obj.getData(); std::unordered_set<int> blocks_accessed; int next_node = 0; int block_offset = 0; int offset = 0; double leaf_sum = 0; std::vector<int> offsets; int curr_offset = 0; int total_num_trees = 0; for (auto val: PacsetBaseModel<T, F>::bin_node_sizes){ offsets.push_back(curr_offset); curr_offset += val; } for(int bin_counter=0; bin_counter<num_bins; ++bin_counter){ int block_number = 0; Node<T, F> *bin = data + offsets[bin_counter]; std::vector<int> curr_node(PacsetBaseModel<T, F>::bin_sizes[bin_counter]); std::vector<double> last_node(PacsetBaseModel<T, F>::bin_sizes[bin_counter]); int i, feature_num=0, number_not_in_leaf=0; T feature_val; int siz = PacsetBaseModel<T, F>::bin_sizes[bin_counter]; total_num_trees += siz; for(i=0; i<siz; ++i){ curr_node[i] = PacsetBaseModel<T, F>::bin_start[bin_counter][i]; //bin[curr_node[i]].printNode(); __builtin_prefetch(&bin[curr_node[i]], 0, 3); #ifdef BLOCK_LOGGING block_number = (curr_node[i] + block_offset) / BLOCK_SIZE; blocks_accessed.insert(block_number); #endif } do{ number_not_in_leaf = 0; for( i=0; i<siz; ++i){ if(curr_node[i] > 0){ feature_num = bin[curr_node[i]].getFeature(); feature_val = observation[feature_num]; if(bin[curr_node[i]].getLeft() == -1){ last_node[i] = bin[curr_node[i]].getThreshold(); curr_node[i] = -1; } else { curr_node[i] = bin[curr_node[i]].nextNode(feature_val); __builtin_prefetch(&bin[curr_node[i]], 0, 3); ++number_not_in_leaf; } } } }while(number_not_in_leaf); double sum=0; for(i=0; i<siz; ++i){ sum += last_node[i]; } leaf_sum +=sum; block_offset += PacsetBaseModel<T, F>::bin_node_sizes[bin_counter]; } preds.clear(); preds.push_back((double)leaf_sum); preds.push_back((double)total_num_trees); #ifdef BLOCK_LOGGING return blocks_accessed.size(); #else return 0; #endif } inline void predict(const std::vector<std::vector<T>>& observation, std::vector<int>& preds, std::vector<int>&results, bool mmap) { } inline void predict(const std::vector<std::vector<T>>& observation, std::vector<double>& preds, std::vector<double>&results, bool mmap) { //Predicts the class for a vector of observations //By calling predict for a single observation and //tallying the observations // int num_classes = std::stoi(Config::getValue("numclasses")); int num_bins; std::vector<double> elapsed_arr; int blocks; std::vector<int> num_blocks; int ct=1; for(auto single_obs : observation){ auto start = std::chrono::steady_clock::now(); if (mmap) blocks = mmapAndPredict(single_obs, preds, ct); else{ blocks = mmapAndPredict(single_obs, preds, ct); } num_blocks.push_back(blocks); results.push_back((double)preds[0] / (double)preds[1] ); auto end = std::chrono::steady_clock::now(); ct+=1; } } inline void serialize() { auto bins = PacsetBaseModel<T, F>::bins; int num_classes = std::stoi(Config::getValue("numclasses")); int num_bins = bins.size(); std::vector<int> bin_sizes = PacsetBaseModel<T, F>::bin_sizes; std::vector<int> bin_node_sizes = PacsetBaseModel<T, F>::bin_node_sizes; std::vector<std::vector<int>> bin_start = PacsetBaseModel<T, F>::bin_start; std::string format = Config::getValue("format"); //Write the metadata needed to reconstruct bins and for prediction //TODO: change filename std::string filename; if(Config::getValue("metadatafilename") == std::string("notfound")) filename = "metadata.txt"; else filename = Config::getValue("metadatafilename"); std::fstream fout; fout.open(filename, std::ios::out ); //Number of classes fout<<num_classes<<"\n"; //Number of bins fout<<num_bins<<"\n"; //Number of trees in each bin for(auto i: bin_sizes){ fout<<i<<"\n"; } //Number of nodes in each bin for(auto i: bin_node_sizes){ fout<<i<<"\n"; } //start position of each bin for(auto bin: bin_start){ for(auto tree_start: bin){ fout<<tree_start<<"\n"; } } fout.close(); if(format == std::string("notfound") || format == std::string("binary")){ std::string modelfname = Config::getValue("packfilename"); std::string filename; if(modelfname != std::string("notfound")) filename = modelfname; else filename = "packedmodel.bin"; //Write the nodes fout.open(filename, std::ios::binary | std::ios::out ); Node<T, F> node_to_write; for(auto bin: bins){ for(auto node: bin){ node_to_write = node; fout.write((char*)&node_to_write, sizeof(node_to_write)); } } fout.close(); } else{ //Write the nodes std::string modelfname = Config::getValue("packfilename"); std::string filename; if(modelfname != std::string("notfound")) filename = modelfname; else filename = "packedmodel.txt"; std::cout<<"filename: "<<filename <<"\n"; fout.open(filename, std::ios::out ); for(auto bin: bins){ for(auto node: bin){ fout<<node.getLeft()<<", "<<node.getRight() <<", "<<node.getFeature()<<", "<<node.getThreshold()<<"\n"; } } fout.close(); } } inline void deserialize(){ //Write the metadata needed to reconstruct bins and for prediction //TODO: change filename int num_classes, num_bins; std::string filename = Config::getValue("metadatafilename"); //std::string filename = "metadata.txt"; std::fstream f; f.open(filename, std::ios::in ); //Number of classes f>>num_classes; Config::setConfigItem("numclasses", std::to_string(num_classes)); //Number of bins f>>num_bins; Config::setConfigItem("numthreads", std::to_string(num_bins)); std::vector<int> num_trees_bin; std::vector<int> num_nodes_bin; std::vector<std::vector<int>> bin_tree_start; int val; //Number of trees in each bin for(int i=0; i<num_bins; ++i){ f>>val; num_trees_bin.push_back(val); } //Number of nodes in each bin for(int i=0; i<num_bins; ++i){ f>>val; num_nodes_bin.push_back(val); } std::vector<int> temp; //start position of each bin for(int i=0; i<num_bins; ++i){ for(int j=0; j<num_trees_bin[i]; ++j){ f>>val; temp.push_back(val); } bin_tree_start.push_back(temp); temp.clear(); } f.close(); setMembers(num_trees_bin, num_nodes_bin, bin_tree_start); } }; #endif
#ifndef PACSET_RF_REG #define PACSET_RF_REG #include <vector> #include <unordered_set> #include <fstream> #include "pacset_base_model.h" #include "packer.h" #include "config.h" #include "json_reader.h" #include "utils.h" #include "node.h" #include "MemoryMapped.h" #define NUM_FILES 10 #define BLOCK_LOGGING 1 template <typename T, typename F> class PacsetRandomForestRegressor: public PacsetBaseModel<T, F> { public: inline void setMembers(const std::vector<int> &bin_sizes, const std::vector<int> &bin_node_sizes, const std::vector<std::vector<int>> &bin_start){ PacsetBaseModel<T, F>::bin_sizes.clear(); std::copy(bin_sizes.begin(), bin_sizes.end(), back_inserter(PacsetBaseModel<T, F>::bin_sizes)); std::copy(bin_node_sizes.begin(), bin_node_sizes.end(), back_inserter(PacsetBaseModel<T, F>::bin_node_sizes)); for (auto i: bin_start) PacsetBaseModel<T, F>::bin_start.push_back(i); } inline void setBinNodeSizes(int pos, int siz){ PacsetBaseModel<T, F>::bin_node_sizes[pos] = siz; } inline void loadModel() { JSONReader<T, F> J; //J.convertSklToBins(PacsetBaseModel<T, F>::bins, J.convertSklToBinsRapidJson(PacsetBaseModel<T, F>::bins, PacsetBaseModel<T, F>::bin_sizes, PacsetBaseModel<T, F>::bin_start, PacsetBaseModel<T, F>::bin_node_sizes); } inline void pack(){ std::string layout = Config::getValue("layout"); auto bin = PacsetBaseModel<T, F>::bins[0]; int num_bins = std::stoi(Config::getValue("numthreads")); for(int i=0; i<num_bins; ++i){ Packer<T, F> packer_obj(layout); if(Config::getValue("intertwine") != std::string("notfound")) packer_obj.setDepthIntertwined(std::atoi(Config::getValue("intertwine").c_str())); //should pack in place packer_obj.pack(PacsetBaseModel<T, F>::bins[i], PacsetBaseModel<T, F>::bin_sizes[i], PacsetBaseModel<T, F>::bin_start[i] ); setBinNodeSizes(i, PacsetBaseModel<T, F>::bins[i].size()); } } inline int mmapAndPredict(const std::vector<T>& observation, std::vector<double> &preds, int obsnum) { int num_classes = std::stoi(Config::getValue("numclasses")); int num_threads = std::stoi(Config::getValue("numthreads")); int num_bins = PacsetBaseModel<T, F>::bin_sizes.size(); std::vector<double> elapsed_arr; std::string modelfname = Config::getValue("modelfilename"); MemoryMapped mmapped_obj(modelfname.c_str(), 0); Node<T, F> *data = (Node<T, F>*)mmapped_obj.getData(); std::unordered_set<int> blocks_accessed; int next_node = 0; int block_offset = 0; int offset = 0; double leaf_sum = 0; std::vector<int> offsets; int curr_offset = 0; int total_num_trees = 0; for (auto val: PacsetBaseModel<T, F>::bin_node_sizes){ offsets.push_back(curr_offset); curr_offset += val; } #pragma omp parallel for num_threads(num_threads) for(int bin_counter=0; bin_counter<num_bins; ++bin_counter){ int block_number = 0; Node<T, F> *bin = data + offsets[bin_counter]; std::vector<int> curr_node(PacsetBaseModel<T, F>::bin_sizes[bin_counter]); std::vector<double> last_node(PacsetBaseModel<T, F>::bin_sizes[bin_counter]); int i, feature_num=0, number_not_in_leaf=0; T feature_val; int siz = PacsetBaseModel<T, F>::bin_sizes[bin_counter]; total_num_trees += siz; for(i=0; i<siz; ++i){ curr_node[i] = PacsetBaseModel<T, F>::bin_start[bin_counter][i]; //bin[curr_node[i]].printNode(); __builtin_prefetch(&bin[curr_node[i]], 0, 3); #ifdef BLOCK_LOGGING block_number = (curr_node[i] + block_offset) / BLOCK_SIZE; #pragma omp critical blocks_accessed.insert(block_number); #endif } do{ number_not_in_leaf = 0; for( i=0; i<siz; ++i){ if(curr_node[i] > 0){ feature_num = bin[curr_node[i]].getFeature(); feature_val = observation[feature_num]; if(bin[curr_node[i]].getLeft() == -1){ last_node[i] = bin[curr_node[i]].getThreshold(); curr_node[i] = -1; } else { curr_node[i] = bin[curr_node[i]].nextNode(feature_val); __builtin_prefetch(&bin[curr_node[i]], 0, 3); ++number_not_in_leaf; } } } }while(number_not_in_leaf); double sum=0; for(i=0; i<siz; ++i){ sum += last_node[i]; } leaf_sum +=sum; block_offset += PacsetBaseModel<T, F>::bin_node_sizes[bin_counter]; } preds.clear(); preds.push_back((double)leaf_sum); preds.push_back((double)total_num_trees); #ifdef BLOCK_LOGGING return blocks_accessed.size(); #else return 0; #endif } inline void predict(const std::vector<std::vector<T>>& observation, std::vector<int>& preds, std::vector<int>&results, bool mmap) { } inline void predict(const std::vector<std::vector<T>>& observation, std::vector<double>& preds, std::vector<double>&results, bool mmap) { //Predicts the class for a vector of observations //By calling predict for a single observation and //tallying the observations // int num_classes = std::stoi(Config::getValue("numclasses")); int num_bins; std::vector<double> elapsed_arr; int blocks; std::vector<int> num_blocks; int ct=1; for(auto single_obs : observation){ auto start = std::chrono::steady_clock::now(); if (mmap) blocks = mmapAndPredict(single_obs, preds, ct); else{ blocks = mmapAndPredict(single_obs, preds, ct); } num_blocks.push_back(blocks); results.push_back((double)preds[0] / (double)preds[1] ); auto end = std::chrono::steady_clock::now(); ct+=1; } } inline void serialize() { auto bins = PacsetBaseModel<T, F>::bins; int num_classes = std::stoi(Config::getValue("numclasses")); int num_bins = bins.size(); std::vector<int> bin_sizes = PacsetBaseModel<T, F>::bin_sizes; std::vector<int> bin_node_sizes = PacsetBaseModel<T, F>::bin_node_sizes; std::vector<std::vector<int>> bin_start = PacsetBaseModel<T, F>::bin_start; std::string format = Config::getValue("format"); //Write the metadata needed to reconstruct bins and for prediction //TODO: change filename std::string filename; if(Config::getValue("metadatafilename") == std::string("notfound")) filename = "metadata.txt"; else filename = Config::getValue("metadatafilename"); std::fstream fout; fout.open(filename, std::ios::out ); //Number of classes fout<<num_classes<<"\n"; //Number of bins fout<<num_bins<<"\n"; //Number of trees in each bin for(auto i: bin_sizes){ fout<<i<<"\n"; } //Number of nodes in each bin for(auto i: bin_node_sizes){ fout<<i<<"\n"; } //start position of each bin for(auto bin: bin_start){ for(auto tree_start: bin){ fout<<tree_start<<"\n"; } } fout.close(); if(format == std::string("notfound") || format == std::string("binary")){ std::string modelfname = Config::getValue("packfilename"); std::string filename; if(modelfname != std::string("notfound")) filename = modelfname; else filename = "packedmodel.bin"; //Write the nodes fout.open(filename, std::ios::binary | std::ios::out ); Node<T, F> node_to_write; for(auto bin: bins){ for(auto node: bin){ node_to_write = node; fout.write((char*)&node_to_write, sizeof(node_to_write)); } } fout.close(); } else{ //Write the nodes std::string modelfname = Config::getValue("packfilename"); std::string filename; if(modelfname != std::string("notfound")) filename = modelfname; else filename = "packedmodel.txt"; std::cout<<"filename: "<<filename <<"\n"; fout.open(filename, std::ios::out ); for(auto bin: bins){ for(auto node: bin){ fout<<node.getLeft()<<", "<<node.getRight() <<", "<<node.getFeature()<<", "<<node.getThreshold()<<"\n"; } } fout.close(); } } inline void deserialize(){ //Write the metadata needed to reconstruct bins and for prediction //TODO: change filename int num_classes, num_bins; std::string filename = Config::getValue("metadatafilename"); //std::string filename = "metadata.txt"; std::fstream f; f.open(filename, std::ios::in ); //Number of classes f>>num_classes; Config::setConfigItem("numclasses", std::to_string(num_classes)); //Number of bins f>>num_bins; Config::setConfigItem("numthreads", std::to_string(num_bins)); std::vector<int> num_trees_bin; std::vector<int> num_nodes_bin; std::vector<std::vector<int>> bin_tree_start; int val; //Number of trees in each bin for(int i=0; i<num_bins; ++i){ f>>val; num_trees_bin.push_back(val); } //Number of nodes in each bin for(int i=0; i<num_bins; ++i){ f>>val; num_nodes_bin.push_back(val); } std::vector<int> temp; //start position of each bin for(int i=0; i<num_bins; ++i){ for(int j=0; j<num_trees_bin[i]; ++j){ f>>val; temp.push_back(val); } bin_tree_start.push_back(temp); temp.clear(); } f.close(); setMembers(num_trees_bin, num_nodes_bin, bin_tree_start); } }; #endif
GB_binop__second_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__second_uint64 // A.*B function (eWiseMult): GB_AemultB__second_uint64 // A*D function (colscale): GB_AxD__second_uint64 // D*A function (rowscale): GB_DxB__second_uint64 // C+=B function (dense accum): GB_Cdense_accumB__second_uint64 // C+=b function (dense accum): GB_Cdense_accumb__second_uint64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__second_uint64 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar GB_bind2nd__second_uint64 // C=A'+scalar GB_bind2nd_tran__second_uint64 // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = bij #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = y ; // op is second #define GB_OP_IS_SECOND \ 1 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_UINT64 || GxB_NO_SECOND_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__second_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__second_uint64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__second_uint64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__second_uint64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__second_uint64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__second_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__second_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = Bx [p] ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__second_uint64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB_bind2nd_tran__second_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__second_uint64 // A.*B function (eWiseMult): GB_AemultB__second_uint64 // A*D function (colscale): GB_AxD__second_uint64 // D*A function (rowscale): GB_DxB__second_uint64 // C+=B function (dense accum): GB_Cdense_accumB__second_uint64 // C+=b function (dense accum): GB_Cdense_accumb__second_uint64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__second_uint64 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar GB_bind2nd__second_uint64 // C=A'+scalar GB_bind2nd_tran__second_uint64 // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = bij #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = y ; // op is second #define GB_OP_IS_SECOND \ 1 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_UINT64 || GxB_NO_SECOND_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__second_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__second_uint64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__second_uint64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__second_uint64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__second_uint64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__second_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__second_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = Bx [p] ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__second_uint64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB_bind2nd_tran__second_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__second_uint64 // A.*B function (eWiseMult): GB_AemultB__second_uint64 // A*D function (colscale): GB_AxD__second_uint64 // D*A function (rowscale): GB_DxB__second_uint64 // C+=B function (dense accum): GB_Cdense_accumB__second_uint64 // C+=b function (dense accum): GB_Cdense_accumb__second_uint64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__second_uint64 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar GB_bind2nd__second_uint64 // C=A'+scalar GB_bind2nd_tran__second_uint64 // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = bij #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = y ; // op is second #define GB_OP_IS_SECOND \ 1 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_UINT64 || GxB_NO_SECOND_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__second_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__second_uint64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__second_uint64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__second_uint64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__second_uint64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__second_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__second_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = Bx [p] ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__second_uint64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB_bind2nd_tran__second_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
edge_vol_int.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/libxsmm/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include "edge_proxy_common.h" #include <libxsmm_intrinsics_x86.h> #include <libxsmm.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) # include <omp.h> #endif /*#define EDGE_HP_1G*/ /*#define HANDLE_AMOK*/ #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) #include <sys/mman.h> #include <linux/mman.h> #endif LIBXSMM_INLINE void* edge_hp_malloc( size_t nbytes, size_t alignment ) { void* ret_ptr = NULL; #if defined(EDGE_HP_1G) size_t num_large_pages = nbytes / (1073741824L); if ( nbytes > num_large_pages*1073741824L ) { num_large_pages++; } nbytes = (size_t) num_large_pages * 1073741824L; printf("trying to allocate %ld 1G pages\n", num_large_pages); /*ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 );*/ ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 ); if ( (ret_ptr == (void *)(-1)) ) { fprintf(stderr,"1G mmap call failed\n"); exit(1); } #elif defined(EDGE_HP_2M) size_t num_large_pages = nbytes / (2097152UL); if ( nbytes > num_large_pages*2097152UL ) { num_large_pages++; } nbytes = (size_t) num_large_pages * 2097152UL; printf("trying to allocate %ld 2M pages\n", num_large_pages); /*ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 );*/ ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 ); if ( (ret_ptr == (void *)(-1)) ) { fprintf(stderr,"2M mmap call failed\n"); exit(1); } #else ret_ptr = libxsmm_aligned_malloc( nbytes, alignment ); #endif return ret_ptr; } LIBXSMM_INLINE void edge_hp_free( void* ptr, size_t nbytes ) { LIBXSMM_UNUSED( nbytes ); #if defined(EDGE_HP_1G) /* to be implemented */ #elif defined(EDGE_HP_2M) /* to be implemented */ #else libxsmm_free( ptr ); #endif } #if defined(__AVX512F__) LIBXSMM_INLINE void matMulFusedAC( unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c ) { unsigned int l_m, l_n, l_k; const __m512d beta = _mm512_set1_pd( i_beta ); LIBXSMM_UNUSED(i_r); for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd( _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ), beta ) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc); } } for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ); for( l_k = 0; l_k < i_k; l_k++ ) { const __m512d alpha = _mm512_set1_pd( i_b[l_k*i_ldB + l_n] ); vc = _mm512_fmadd_pd( alpha, _mm512_loadu_pd( &(i_a[l_m*i_ldA*8 + l_k*8 + 0]) ), vc); } _mm512_storeu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc ); } } } LIBXSMM_INLINE void matMulFusedBC( unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c ) { unsigned int l_m, l_n, l_k; const __m512d beta = _mm512_set1_pd( i_beta ); LIBXSMM_UNUSED(i_r); for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd( _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ), beta ) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc); } } for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ); for( l_k = 0; l_k < i_k; l_k++ ) { const __m512d alpha = _mm512_set1_pd( i_a[l_m*i_ldA + l_k] ); vc = _mm512_fmadd_pd( alpha, _mm512_loadu_pd( &(i_b[l_k*i_ldB*8 + l_n*8 + 0]) ), vc); } _mm512_storeu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc ); } } } #endif LIBXSMM_INLINE void amok_detect( const double* i_runtimes, size_t* io_amoks, const size_t i_workers ) { double time_avg; size_t i; time_avg = 0.0; for (i = 0; i < i_workers; i++) { if ( io_amoks[8*i] == 0 ) { time_avg += i_runtimes[8*i]; } } time_avg = time_avg/((double)(i_workers-io_amoks[8*i_workers])); /* let detect amoks */ for (i = 0; i < i_workers; i++) { if ( io_amoks[8*i] == 0 ) { if ( i_runtimes[8*i] > time_avg*1.07 ) { /* this is the amok condition */ io_amoks[8*i_workers]++; io_amoks[8*i] = 1; } } } } LIBXSMM_INLINE void amok_balance( const size_t* i_amoks, const size_t i_workers, const size_t i_worksize, const size_t i_mytid, size_t* io_chunk, size_t* io_mystart, size_t* io_myend ) { size_t l_chunk, l_start, l_end; size_t l_cur_amoks = i_amoks[8*i_workers]; size_t l_non_amoks = i_workers - l_cur_amoks; l_chunk = (i_worksize % l_non_amoks == 0) ? (i_worksize / l_non_amoks) : ((i_worksize / l_non_amoks) + 1); if (i_amoks[8*i_mytid] != 0) { l_start = 0; l_end = 0; } else { size_t l_tid_offset = 0; size_t l_z; for ( l_z = 0; l_z < i_mytid; l_z++) { if ( i_amoks[8*l_z] != 0 ) { l_tid_offset++; } } l_tid_offset = i_mytid - l_tid_offset; l_start = (l_tid_offset * l_chunk < i_worksize) ? (l_tid_offset * l_chunk) : i_worksize; l_end = ((l_tid_offset+1) * l_chunk < i_worksize) ? ((l_tid_offset+1) * l_chunk) : i_worksize; } *io_chunk = l_chunk; *io_mystart = l_start; *io_myend = l_end; } int main(int argc, char* argv[]) { char* mat_a = 0; unsigned int *mat_a_rowptr, *mat_a_colidx; unsigned int mat_a_rowcount, mat_a_colcount, mat_a_nnz; double* mat_a_values; libxsmm_dmmfunction a_kernel; char* mat_b = 0; unsigned int *mat_b_rowptr, *mat_b_colidx; unsigned int mat_b_rowcount, mat_b_colcount, mat_b_nnz; double* mat_b_values; libxsmm_dmmfunction b_kernel; char* mat_c = 0; unsigned int *mat_c_rowptr, *mat_c_colidx; unsigned int mat_c_rowcount, mat_c_colcount, mat_c_nnz; double* mat_c_values; libxsmm_dmmfunction c_kernel; char* mat_st = 0; unsigned int *mat_st_rowptr, *mat_st_colidx; unsigned int mat_st_rowcount, mat_st_colcount, mat_st_nnz; double* mat_st_values; libxsmm_dmmfunction st_kernel; int num_modes = 9; int num_quants = 9; size_t num_elems = 0; size_t num_cfr = 8; size_t num_reps = 1; size_t elem_size; /* OpenMP: signed induction variables */ int i, j; const libxsmm_gemm_descriptor *l_xgemm_desc_stiff = 0, *l_xgemm_desc_star = 0; libxsmm_descriptor_blob l_xgemm_blob_stiff, l_xgemm_blob_star; const libxsmm_gemm_prefetch_type prefetch = LIBXSMM_GEMM_PREFETCH_NONE; const int flags = LIBXSMM_GEMM_FLAGS('N', 'N'); const double alpha = 1, beta = 1; double flops_vol; double* q; double* qt; double* qs; double* star; double* global; unsigned long long l_start, l_end; double l_total; unsigned int l_num_threads; unsigned int l_star_ent = num_quants*num_quants; double* l_total_thread; double* l_cur_thread_time; double time_max; double time_min; double time_avg; size_t* amoks; /* read cmd */ if ((argc > 1 && !strncmp(argv[1], "-h", 3)) || (argc != 8)) { printf("Usage: %s stif1 stif2 stif3 star nModes nElems nReps\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* some empty lines at the beginning */ printf("\n"); i = 1; if (argc > (int)i) mat_a = argv[i++]; if (argc > (int)i) mat_b = argv[i++]; if (argc > (int)i) mat_c = argv[i++]; if (argc > (int)i) mat_st = argv[i++]; if (argc > (int)i) num_modes = atoi(argv[i++]); if (argc > (int)i) num_elems = atoi(argv[i++]); if (argc > (int)i) num_reps = atoi(argv[i++]); elem_size = num_modes*num_quants*num_cfr; #if defined(_OPENMP) #pragma omp parallel { #pragma omp master { l_num_threads = omp_get_num_threads(); } } #else l_num_threads = 1; #endif l_total_thread = (double*)malloc(8*l_num_threads*sizeof(double)); l_cur_thread_time = (double*)malloc(8*l_num_threads*sizeof(double)); amoks = (size_t*)malloc(8*(l_num_threads+1)*sizeof(size_t)); for ( i = 0; i < 8*((int)l_num_threads+1); i++ ) { amoks[i] = 0; } /* read matrices */ printf("reading sparse matrices... "); edge_sparse_csr_reader_double( mat_a, &mat_a_rowptr, &mat_a_colidx, &mat_a_values, &mat_a_rowcount, &mat_a_colcount, &mat_a_nnz ); edge_sparse_csr_reader_double( mat_b, &mat_b_rowptr, &mat_b_colidx, &mat_b_values, &mat_b_rowcount, &mat_b_colcount, &mat_b_nnz ); edge_sparse_csr_reader_double( mat_c, &mat_c_rowptr, &mat_c_colidx, &mat_c_values, &mat_c_rowcount, &mat_c_colcount, &mat_c_nnz ); edge_sparse_csr_reader_double( mat_st, &mat_st_rowptr, &mat_st_colidx, &mat_st_values, &mat_st_rowcount, &mat_st_colcount, &mat_st_nnz ); printf("done!\n\n"); /* generate kernels */ printf("generating code... "); l_xgemm_desc_stiff = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_stiff, num_quants, num_modes, num_modes, num_modes, 0, num_modes, alpha, beta, flags, prefetch); l_xgemm_desc_star = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_star, num_quants, num_modes, num_quants, 0, num_modes, num_modes, alpha, beta, flags, prefetch); a_kernel = libxsmm_create_packed_spxgemm_csr( l_xgemm_desc_stiff, (unsigned int)num_cfr, mat_a_rowptr, mat_a_colidx, (const void*)mat_a_values ).dmm; b_kernel = libxsmm_create_packed_spxgemm_csr( l_xgemm_desc_stiff, (unsigned int)num_cfr, mat_b_rowptr, mat_b_colidx, (const void*)mat_b_values ).dmm; c_kernel = libxsmm_create_packed_spxgemm_csr( l_xgemm_desc_stiff, (unsigned int)num_cfr, mat_c_rowptr, mat_c_colidx, (const void*)mat_c_values ).dmm; st_kernel = libxsmm_create_packed_spxgemm_csr( l_xgemm_desc_star, (unsigned int)num_cfr, mat_st_rowptr, mat_st_colidx, (const void*)mat_st_values ).dmm; if ( a_kernel == 0 ) { printf("a kernel could not be built -> exit!"); exit(-1); } if ( b_kernel == 0 ) { printf("b kernel could not be built -> exit!"); exit(-1); } if ( b_kernel == 0 ) { printf("c kernel could not be built -> exit!"); exit(-1); } if ( st_kernel == 0 ) { printf("st kernel could not be built -> exit!"); exit(-1); } printf("done!\n\n"); /* copying code to 1 GB page */ #if 0 #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) printf("copying code to 1GB page...\n"); onegcode = (void*)edge_hp_malloc( 5*1024*1024, 2097152 ); memcpy( onegcode, (void*) a_kernel, 1505 ); memcpy( onegcode+(1*1024*1024)+64, (void*) b_kernel, 2892 ); memcpy( onegcode+(2*1024*1024)+128, (void*) c_kernel, 3249 ); memcpy( onegcode+(3*1024*1024)+196, (void*)st_kernel, 11010 ); a_kernel = (libxsmm_dmmfunction)onegcode; b_kernel = (libxsmm_dmmfunction)(onegcode+(1*1024*1024)+64); c_kernel = (libxsmm_dmmfunction)(onegcode+(2*1024*1024)+128); st_kernel = (libxsmm_dmmfunction)(onegcode+(3*1024*1024)+196); printf("...done\n\n"); #endif #endif /* create unknowns and t-unknowns */ printf("allocating and initializing fake data... \n"); /* DoFs */ printf(" q: %f MiB\n", ((double)(num_elems*num_modes*num_quants*num_cfr*sizeof(double))) / ( 1024.0*1024.0) ); q = (double*)edge_hp_malloc( num_elems*num_modes*num_quants*num_cfr*sizeof(double), 2097152); /* tDofs */ printf(" qt: %f MiB\n", ((double)(num_elems*num_modes*num_quants*num_cfr*sizeof(double))) / ( 1024.0*1024.0) ); qt = (double*)edge_hp_malloc( num_elems*num_modes*num_quants*num_cfr*sizeof(double), 2097152); /* star matrices */ printf(" star: %f MiB\n", ((double)(num_elems*3*l_star_ent*sizeof(double))) / ( 1024.0*1024.0 ) ); star = (double*)edge_hp_malloc( num_elems*3*l_star_ent*sizeof(double), 2097152); /* stiffness matrices */ printf("global: %f MiB\n", ((double)(3*num_modes*num_modes*sizeof(double))) / ( 1024.0*1024 ) ); global = (double*)edge_hp_malloc( 3*num_modes*num_modes*sizeof(double), 2097152); /* per thread scratch */ printf(" t: %f MiB\n", ((double)(l_num_threads*num_modes*num_quants*num_cfr*sizeof(double)))/ ( 1024.0*1024.0) ); qs = (double*)edge_hp_malloc( l_num_threads*num_modes*num_quants*num_cfr*sizeof(double), 2097152); for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { q[i*elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { qt[i*elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)l_num_threads; i++) { for (j = 0; j < (int)elem_size; j++) { qs[i*elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)mat_st_nnz*3; j++) { star[(i*3*mat_st_nnz)+j] = libxsmm_rng_f64(); } } for (i = 0; i < 3; i++) { for (j = 0; j < num_modes*num_modes; j++) { global[(i*num_modes*num_modes)+j] = libxsmm_rng_f64(); } } printf("allocation done!\n\n"); printf("running benchmark...\n"); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i, j) #endif { #if defined(_OPENMP) int mytid = omp_get_thread_num(); #else int mytid = 0; #endif libxsmm_timer_tickint mystart, myend; #if defined(HANDLE_AMOK) size_t cur_amoks = 0; size_t non_amoks = l_num_threads; #endif size_t l_el_chunk = 0; size_t l_el_start = 0; size_t l_el_end = 0; /* initial work distribution */ amok_balance( amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end ); for (i = 0; i < (int)num_reps; i++) { #if defined(HANDLE_AMOK) /* did we had an amok? */ if (cur_amoks != amoks[8*l_num_threads]) { cur_amoks = amoks[8*l_num_threads]; non_amoks = l_num_threads - cur_amoks; /* re-balance work */ amok_balance( amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end ); } #endif mystart = libxsmm_timer_tick(); for (j = (int)l_el_start; j < (int)l_el_end; j++) { #if 1 st_kernel( star+(j*3*mat_st_nnz) , qt+(j*elem_size), qs+(mytid*elem_size) ); a_kernel( qs+(mytid*elem_size), global , q+(j*elem_size) ); st_kernel( star+(j*3*mat_st_nnz)+mat_st_nnz , qt+(j*elem_size), qs+(mytid*elem_size) ); b_kernel( qs+(mytid*elem_size), global+(num_modes*num_modes) , q+(j*elem_size) ); st_kernel( star+(j*3*mat_st_nnz)+(2*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) ); c_kernel( qs+(mytid*elem_size), global+(2*num_modes*num_modes), q+(j*elem_size) ); #else matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) ); matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global, q+(j*elem_size) ); matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz)+mat_st_nnz, qt+(j*elem_size), qs+(mytid*elem_size) ); matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global+(num_modes*num_modes) , q+(j*elem_size) ); matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz)+(2*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) ); matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global+(2*num_modes*num_modes), q+(j*elem_size) ); #endif } myend = libxsmm_timer_tick(); l_cur_thread_time[8*mytid] = libxsmm_timer_duration( mystart, myend ); l_total_thread[8*mytid] += libxsmm_timer_duration( mystart, myend ); #if defined(_OPENMP) #pragma omp barrier #endif #if defined(HANDLE_AMOK) /* checking for amoks is centralized business */ if (mytid == 0) { /* amok check */ amok_detect( l_cur_thread_time, amoks, l_num_threads ); } #if defined(_OPENMP) #pragma omp barrier #endif #endif } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("...done!\n\n"); /* some timing stats */ time_max = 0.0; time_min = 80000000; time_avg = 0.0; for (i = 0; i < (int)l_num_threads; i++) { if( amoks[8*i] == 0 ) { if( l_total_thread[8*i] > time_max) time_max = l_total_thread[8*i]; if( l_total_thread[8*i] < time_min) time_min = l_total_thread[8*i]; time_avg += l_total_thread[8*i]; } } time_avg = time_avg/((double)(l_num_threads-amoks[8*l_num_threads])); flops_vol = (double)num_quants * (double)mat_a_nnz * (double)num_cfr * 2.0; flops_vol += (double)num_quants * (double)mat_b_nnz * (double)num_cfr * 2.0; flops_vol += (double)num_quants * (double)mat_c_nnz * (double)num_cfr * 2.0; flops_vol += (double)num_modes * (double)mat_st_nnz * (double)num_cfr * 6.0; /* 3 star matrix mul */ printf("%fs time for vol (asm), min %f, max %f, avg %f, #amoks %llu, amok-threads ", l_total, time_min, time_max, time_avg, (unsigned long long)amoks[8*l_num_threads]); for ( i = 0; i < (int)l_num_threads; i++ ) { if ( amoks[8*i] != 0 ) { printf("%i,", i); } } printf("\n"); printf("%f GFLOPS for vol (asm)\n", ((double)num_elems * (double)num_reps * flops_vol) / (l_total * 1.0e9)); printf("%f GiB/s for vol (asm)\n", (double)((double)num_elems * (double)elem_size * 8.0 * 3.0 * (double)num_reps) / (l_total * 1024.0*1024.0*1024.0) ); printf("done!\n\n"); /* some empty lines at the end */ printf("\n\n"); return 0; }
/* * Alexander Heinecke (Intel Corp.) **************************************************************************** */ #include "edge_proxy_common.h" #include <libxsmm_intrinsics_x86.h> #include <libxsmm.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> /* #define EDGE_HP_1G */ /* #define HANDLE_AMOK */ #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) #include <sys/mman.h> #include <linux/mman.h> #endif LIBXSMM_INLINE void * edge_hp_malloc(size_t nbytes, size_t alignment) { void *ret_ptr = NULL; #if defined(EDGE_HP_1G) size_t num_large_pages = nbytes / (1073741824L); if (nbytes > num_large_pages * 1073741824L) { num_large_pages++; } nbytes = (size_t) num_large_pages *1073741824L; printf("trying to allocate %ld 1G pages\n", num_large_pages); /* * ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, * MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 ); */ ret_ptr = mmap(NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0); if ((ret_ptr == (void *)(-1))) { fprintf(stderr, "1G mmap call failed\n"); exit(1); } #elif defined(EDGE_HP_2M) size_t num_large_pages = nbytes / (2097152UL); if (nbytes > num_large_pages * 2097152UL) { num_large_pages++; } nbytes = (size_t) num_large_pages *2097152UL; printf("trying to allocate %ld 2M pages\n", num_large_pages); /* * ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, * MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 ); */ ret_ptr = mmap(NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0); if ((ret_ptr == (void *)(-1))) { fprintf(stderr, "2M mmap call failed\n"); exit(1); } #else ret_ptr = libxsmm_aligned_malloc(nbytes, alignment); #endif return ret_ptr; } LIBXSMM_INLINE void edge_hp_free(void *ptr, size_t nbytes) { LIBXSMM_UNUSED(nbytes); #if defined(EDGE_HP_1G) /* to be implemented */ #elif defined(EDGE_HP_2M) /* to be implemented */ #else libxsmm_free(ptr); #endif } #if defined(__AVX512F__) LIBXSMM_INLINE void matMulFusedAC(unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c) { unsigned int l_m, l_n, l_k; const __m512d beta = _mm512_set1_pd(i_beta); LIBXSMM_UNUSED(i_r); for (l_m = 0; l_m < i_m; l_m++) { for (l_n = 0; l_n < i_n; l_n++) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd(_mm512_loadu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0])), beta) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0]), vc); } } for (l_m = 0; l_m < i_m; l_m++) { for (l_n = 0; l_n < i_n; l_n++) { __m512d vc = _mm512_loadu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0])); for (l_k = 0; l_k < i_k; l_k++) { const __m512d alpha = _mm512_set1_pd(i_b[l_k * i_ldB + l_n]); vc = _mm512_fmadd_pd(alpha, _mm512_loadu_pd(&(i_a[l_m * i_ldA * 8 + l_k * 8 + 0])), vc); } _mm512_storeu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0]), vc); } } } LIBXSMM_INLINE void matMulFusedBC(unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c) { unsigned int l_m, l_n, l_k; const __m512d beta = _mm512_set1_pd(i_beta); LIBXSMM_UNUSED(i_r); for (l_m = 0; l_m < i_m; l_m++) { for (l_n = 0; l_n < i_n; l_n++) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd(_mm512_loadu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0])), beta) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0]), vc); } } for (l_m = 0; l_m < i_m; l_m++) { for (l_n = 0; l_n < i_n; l_n++) { __m512d vc = _mm512_loadu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0])); for (l_k = 0; l_k < i_k; l_k++) { const __m512d alpha = _mm512_set1_pd(i_a[l_m * i_ldA + l_k]); vc = _mm512_fmadd_pd(alpha, _mm512_loadu_pd(&(i_b[l_k * i_ldB * 8 + l_n * 8 + 0])), vc); } _mm512_storeu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0]), vc); } } } #endif LIBXSMM_INLINE void amok_detect(const double *i_runtimes, size_t * io_amoks, const size_t i_workers) { double time_avg; size_t i; time_avg = 0.0; for (i = 0; i < i_workers; i++) { if (io_amoks[8 * i] == 0) { time_avg += i_runtimes[8 * i]; } } time_avg = time_avg / ((double)(i_workers - io_amoks[8 * i_workers])); /* let detect amoks */ for (i = 0; i < i_workers; i++) { if (io_amoks[8 * i] == 0) { if (i_runtimes[8 * i] > time_avg * 1.07) { /* this is the amok condition */ io_amoks[8 * i_workers]++; io_amoks[8 * i] = 1; } } } } LIBXSMM_INLINE void amok_balance(const size_t * i_amoks, const size_t i_workers, const size_t i_worksize, const size_t i_mytid, size_t * io_chunk, size_t * io_mystart, size_t * io_myend) { size_t l_chunk, l_start, l_end; size_t l_cur_amoks = i_amoks[8 * i_workers]; size_t l_non_amoks = i_workers - l_cur_amoks; l_chunk = (i_worksize % l_non_amoks == 0) ? (i_worksize / l_non_amoks) : ((i_worksize / l_non_amoks) + 1); if (i_amoks[8 * i_mytid] != 0) { l_start = 0; l_end = 0; } else { size_t l_tid_offset = 0; size_t l_z; for (l_z = 0; l_z < i_mytid; l_z++) { if (i_amoks[8 * l_z] != 0) { l_tid_offset++; } } l_tid_offset = i_mytid - l_tid_offset; l_start = (l_tid_offset * l_chunk < i_worksize) ? (l_tid_offset * l_chunk) : i_worksize; l_end = ((l_tid_offset + 1) * l_chunk < i_worksize) ? ((l_tid_offset + 1) * l_chunk) : i_worksize; } *io_chunk = l_chunk; *io_mystart = l_start; *io_myend = l_end; } int main(int argc, char *argv[]) { char *mat_a = 0; unsigned int *mat_a_rowptr, *mat_a_colidx; unsigned int mat_a_rowcount, mat_a_colcount, mat_a_nnz; double *mat_a_values; libxsmm_dmmfunction a_kernel; char *mat_b = 0; unsigned int *mat_b_rowptr, *mat_b_colidx; unsigned int mat_b_rowcount, mat_b_colcount, mat_b_nnz; double *mat_b_values; libxsmm_dmmfunction b_kernel; char *mat_c = 0; unsigned int *mat_c_rowptr, *mat_c_colidx; unsigned int mat_c_rowcount, mat_c_colcount, mat_c_nnz; double *mat_c_values; libxsmm_dmmfunction c_kernel; char *mat_st = 0; unsigned int *mat_st_rowptr, *mat_st_colidx; unsigned int mat_st_rowcount, mat_st_colcount, mat_st_nnz; double *mat_st_values; libxsmm_dmmfunction st_kernel; int num_modes = 9; int num_quants = 9; size_t num_elems = 0; size_t num_cfr = 8; size_t num_reps = 1; size_t elem_size; /* OpenMP: signed induction variables */ int i, j; const libxsmm_gemm_descriptor *l_xgemm_desc_stiff = 0, *l_xgemm_desc_star = 0; libxsmm_descriptor_blob l_xgemm_blob_stiff, l_xgemm_blob_star; const libxsmm_gemm_prefetch_type prefetch = LIBXSMM_GEMM_PREFETCH_NONE; const int flags = LIBXSMM_GEMM_FLAGS('N', 'N'); const double alpha = 1, beta = 1; double flops_vol; double *q; double *qt; double *qs; double *star; double *global; unsigned long long l_start, l_end; double l_total; unsigned int l_num_threads; unsigned int l_star_ent = num_quants * num_quants; double *l_total_thread; double *l_cur_thread_time; double time_max; double time_min; double time_avg; size_t *amoks; /* read cmd */ if ((argc > 1 && !strncmp(argv[1], "-h", 3)) || (argc != 8)) { printf("Usage: %s stif1 stif2 stif3 star nModes nElems nReps\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* some empty lines at the beginning */ printf("\n"); i = 1; if (argc > (int)i) mat_a = argv[i++]; if (argc > (int)i) mat_b = argv[i++]; if (argc > (int)i) mat_c = argv[i++]; if (argc > (int)i) mat_st = argv[i++]; if (argc > (int)i) num_modes = atoi(argv[i++]); if (argc > (int)i) num_elems = atoi(argv[i++]); if (argc > (int)i) num_reps = atoi(argv[i++]); elem_size = num_modes * num_quants * num_cfr; l_total_thread = (double *)malloc(8 * l_num_threads * sizeof(double)); l_cur_thread_time = (double *)malloc(8 * l_num_threads * sizeof(double)); amoks = (size_t *) malloc(8 * (l_num_threads + 1) * sizeof(size_t)); for (i = 0; i < 8 * ((int)l_num_threads + 1); i++) { amoks[i] = 0; } /* read matrices */ printf("reading sparse matrices... "); edge_sparse_csr_reader_double(mat_a, &mat_a_rowptr, &mat_a_colidx, &mat_a_values, &mat_a_rowcount, &mat_a_colcount, &mat_a_nnz); edge_sparse_csr_reader_double(mat_b, &mat_b_rowptr, &mat_b_colidx, &mat_b_values, &mat_b_rowcount, &mat_b_colcount, &mat_b_nnz); edge_sparse_csr_reader_double(mat_c, &mat_c_rowptr, &mat_c_colidx, &mat_c_values, &mat_c_rowcount, &mat_c_colcount, &mat_c_nnz); edge_sparse_csr_reader_double(mat_st, &mat_st_rowptr, &mat_st_colidx, &mat_st_values, &mat_st_rowcount, &mat_st_colcount, &mat_st_nnz); printf("done!\n\n"); /* generate kernels */ printf("generating code... "); l_xgemm_desc_stiff = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_stiff, num_quants, num_modes, num_modes, num_modes, 0, num_modes, alpha, beta, flags, prefetch); l_xgemm_desc_star = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_star, num_quants, num_modes, num_quants, 0, num_modes, num_modes, alpha, beta, flags, prefetch); a_kernel = libxsmm_create_packed_spxgemm_csr(l_xgemm_desc_stiff, (unsigned int)num_cfr, mat_a_rowptr, mat_a_colidx, (const void *)mat_a_values).dmm; b_kernel = libxsmm_create_packed_spxgemm_csr(l_xgemm_desc_stiff, (unsigned int)num_cfr, mat_b_rowptr, mat_b_colidx, (const void *)mat_b_values).dmm; c_kernel = libxsmm_create_packed_spxgemm_csr(l_xgemm_desc_stiff, (unsigned int)num_cfr, mat_c_rowptr, mat_c_colidx, (const void *)mat_c_values).dmm; st_kernel = libxsmm_create_packed_spxgemm_csr(l_xgemm_desc_star, (unsigned int)num_cfr, mat_st_rowptr, mat_st_colidx, (const void *)mat_st_values).dmm; if (a_kernel == 0) { printf("a kernel could not be built -> exit!"); exit(-1); } if (b_kernel == 0) { printf("b kernel could not be built -> exit!"); exit(-1); } if (b_kernel == 0) { printf("c kernel could not be built -> exit!"); exit(-1); } if (st_kernel == 0) { printf("st kernel could not be built -> exit!"); exit(-1); } printf("done!\n\n"); /* copying code to 1 GB page */ #if 0 #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) printf("copying code to 1GB page...\n"); onegcode = (void *)edge_hp_malloc(5 * 1024 * 1024, 2097152); memcpy(onegcode, (void *)a_kernel, 1505); memcpy(onegcode + (1 * 1024 * 1024) + 64, (void *)b_kernel, 2892); memcpy(onegcode + (2 * 1024 * 1024) + 128, (void *)c_kernel, 3249); memcpy(onegcode + (3 * 1024 * 1024) + 196, (void *)st_kernel, 11010); a_kernel = (libxsmm_dmmfunction) onegcode; b_kernel = (libxsmm_dmmfunction) (onegcode + (1 * 1024 * 1024) + 64); c_kernel = (libxsmm_dmmfunction) (onegcode + (2 * 1024 * 1024) + 128); st_kernel = (libxsmm_dmmfunction) (onegcode + (3 * 1024 * 1024) + 196); printf("...done\n\n"); #endif #endif /* create unknowns and t-unknowns */ printf("allocating and initializing fake data... \n"); /* DoFs */ printf(" q: %f MiB\n", ((double)(num_elems * num_modes * num_quants * num_cfr * sizeof(double))) / (1024.0 * 1024.0)); q = (double *)edge_hp_malloc(num_elems * num_modes * num_quants * num_cfr * sizeof(double), 2097152); /* tDofs */ printf(" qt: %f MiB\n", ((double)(num_elems * num_modes * num_quants * num_cfr * sizeof(double))) / (1024.0 * 1024.0)); qt = (double *)edge_hp_malloc(num_elems * num_modes * num_quants * num_cfr * sizeof(double), 2097152); /* star matrices */ printf(" star: %f MiB\n", ((double)(num_elems * 3 * l_star_ent * sizeof(double))) / (1024.0 * 1024.0)); star = (double *)edge_hp_malloc(num_elems * 3 * l_star_ent * sizeof(double), 2097152); /* stiffness matrices */ printf("global: %f MiB\n", ((double)(3 * num_modes * num_modes * sizeof(double))) / (1024.0 * 1024)); global = (double *)edge_hp_malloc(3 * num_modes * num_modes * sizeof(double), 2097152); /* per thread scratch */ printf(" t: %f MiB\n", ((double)(l_num_threads * num_modes * num_quants * num_cfr * sizeof(double))) / (1024.0 * 1024.0)); qs = (double *)edge_hp_malloc(l_num_threads * num_modes * num_quants * num_cfr * sizeof(double), 2097152); for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { q[i * elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { qt[i * elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)l_num_threads; i++) { for (j = 0; j < (int)elem_size; j++) { qs[i * elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)mat_st_nnz * 3; j++) { star[(i * 3 * mat_st_nnz) + j] = libxsmm_rng_f64(); } } for (i = 0; i < 3; i++) { for (j = 0; j < num_modes * num_modes; j++) { global[(i * num_modes * num_modes) + j] = libxsmm_rng_f64(); } } printf("allocation done!\n\n"); printf("running benchmark...\n"); l_start = libxsmm_timer_tick(); { libxsmm_timer_tickint mystart, myend; #if defined(HANDLE_AMOK) size_t cur_amoks = 0; size_t non_amoks = l_num_threads; #endif size_t l_el_chunk = 0; size_t l_el_start = 0; size_t l_el_end = 0; /* initial work distribution */ amok_balance(amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end); for (i = 0; i < (int)num_reps; i++) { #if defined(HANDLE_AMOK) /* did we had an amok? */ if (cur_amoks != amoks[8 * l_num_threads]) { cur_amoks = amoks[8 * l_num_threads]; non_amoks = l_num_threads - cur_amoks; /* re-balance work */ amok_balance(amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end); } #endif mystart = libxsmm_timer_tick(); for (j = (int)l_el_start; j < (int)l_el_end; j++) { #if 1 st_kernel(star + (j * 3 * mat_st_nnz), qt + (j * elem_size), qs + (mytid * elem_size)); a_kernel(qs + (mytid * elem_size), global, q + (j * elem_size)); st_kernel(star + (j * 3 * mat_st_nnz) + mat_st_nnz, qt + (j * elem_size), qs + (mytid * elem_size)); b_kernel(qs + (mytid * elem_size), global +(num_modes * num_modes), q + (j * elem_size)); st_kernel(star + (j * 3 * mat_st_nnz) + (2 * mat_st_nnz), qt + (j * elem_size), qs + (mytid * elem_size)); c_kernel(qs + (mytid * elem_size), global +(2 * num_modes * num_modes), q + (j * elem_size)); #else matMulFusedBC(8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star + (j * 3 * mat_st_nnz), qt + (j * elem_size), qs + (mytid * elem_size)); matMulFusedAC(8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs + (mytid * elem_size), global, q + (j * elem_size)); matMulFusedBC(8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star + (j * 3 * mat_st_nnz) + mat_st_nnz, qt + (j * elem_size), qs + (mytid * elem_size)); matMulFusedAC(8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs + (mytid * elem_size), global +(num_modes * num_modes), q + (j * elem_size)); matMulFusedBC(8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star + (j * 3 * mat_st_nnz) + (2 * mat_st_nnz), qt + (j * elem_size), qs + (mytid * elem_size)); matMulFusedAC(8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs + (mytid * elem_size), global +(2 * num_modes * num_modes), q + (j * elem_size)); #endif } myend = libxsmm_timer_tick(); l_cur_thread_time[8 * mytid] = libxsmm_timer_duration(mystart, myend); l_total_thread[8 * mytid] += libxsmm_timer_duration(mystart, myend); #if defined(HANDLE_AMOK) /* checking for amoks is centralized business */ if (mytid == 0) { /* amok check */ amok_detect(l_cur_thread_time, amoks, l_num_threads); } #endif } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("...done!\n\n"); /* some timing stats */ time_max = 0.0; time_min = 80000000; time_avg = 0.0; for (i = 0; i < (int)l_num_threads; i++) { if (amoks[8 * i] == 0) { if (l_total_thread[8 * i] > time_max) time_max = l_total_thread[8 * i]; if (l_total_thread[8 * i] < time_min) time_min = l_total_thread[8 * i]; time_avg += l_total_thread[8 * i]; } } time_avg = time_avg / ((double)(l_num_threads - amoks[8 * l_num_threads])); flops_vol = (double)num_quants *(double)mat_a_nnz *(double)num_cfr *2.0; flops_vol += (double)num_quants *(double)mat_b_nnz *(double)num_cfr *2.0; flops_vol += (double)num_quants *(double)mat_c_nnz *(double)num_cfr *2.0; flops_vol += (double)num_modes *(double)mat_st_nnz *(double)num_cfr *6.0; /* 3 star matrix mul */ printf("%fs time for vol (asm), min %f, max %f, avg %f, #amoks %llu, amok-threads ", l_total, time_min, time_max, time_avg, (unsigned long long)amoks[8 * l_num_threads]); for (i = 0; i < (int)l_num_threads; i++) { if (amoks[8 * i] != 0) { printf("%i,", i); } } printf("\n"); printf("%f GFLOPS for vol (asm)\n", ((double)num_elems * (double)num_reps * flops_vol) / (l_total * 1.0e9)); printf("%f GiB/s for vol (asm)\n", (double)((double)num_elems * (double)elem_size * 8.0 * 3.0 * (double)num_reps) / (l_total * 1024.0 * 1024.0 * 1024.0)); printf("done!\n\n"); /* some empty lines at the end */ printf("\n\n"); return 0; }
/* * Alexander Heinecke (Intel Corp.) **************************************************************************** */ #include "edge_proxy_common.h" #include <libxsmm_intrinsics_x86.h> #include <libxsmm.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) #include <omp.h> #endif /* #define EDGE_HP_1G */ /* #define HANDLE_AMOK */ #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) #include <sys/mman.h> #include <linux/mman.h> #endif LIBXSMM_INLINE void * edge_hp_malloc(size_t nbytes, size_t alignment) { void *ret_ptr = NULL; #if defined(EDGE_HP_1G) size_t num_large_pages = nbytes / (1073741824L); if (nbytes > num_large_pages * 1073741824L) { num_large_pages++; } nbytes = (size_t) num_large_pages *1073741824L; printf("trying to allocate %ld 1G pages\n", num_large_pages); /* * ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, * MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 ); */ ret_ptr = mmap(NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0); if ((ret_ptr == (void *)(-1))) { fprintf(stderr, "1G mmap call failed\n"); exit(1); } #elif defined(EDGE_HP_2M) size_t num_large_pages = nbytes / (2097152UL); if (nbytes > num_large_pages * 2097152UL) { num_large_pages++; } nbytes = (size_t) num_large_pages *2097152UL; printf("trying to allocate %ld 2M pages\n", num_large_pages); /* * ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, * MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 ); */ ret_ptr = mmap(NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0); if ((ret_ptr == (void *)(-1))) { fprintf(stderr, "2M mmap call failed\n"); exit(1); } #else ret_ptr = libxsmm_aligned_malloc(nbytes, alignment); #endif return ret_ptr; } LIBXSMM_INLINE void edge_hp_free(void *ptr, size_t nbytes) { LIBXSMM_UNUSED(nbytes); #if defined(EDGE_HP_1G) /* to be implemented */ #elif defined(EDGE_HP_2M) /* to be implemented */ #else libxsmm_free(ptr); #endif } #if defined(__AVX512F__) LIBXSMM_INLINE void matMulFusedAC(unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c) { unsigned int l_m, l_n, l_k; const __m512d beta = _mm512_set1_pd(i_beta); LIBXSMM_UNUSED(i_r); for (l_m = 0; l_m < i_m; l_m++) { for (l_n = 0; l_n < i_n; l_n++) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd(_mm512_loadu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0])), beta) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0]), vc); } } for (l_m = 0; l_m < i_m; l_m++) { for (l_n = 0; l_n < i_n; l_n++) { __m512d vc = _mm512_loadu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0])); for (l_k = 0; l_k < i_k; l_k++) { const __m512d alpha = _mm512_set1_pd(i_b[l_k * i_ldB + l_n]); vc = _mm512_fmadd_pd(alpha, _mm512_loadu_pd(&(i_a[l_m * i_ldA * 8 + l_k * 8 + 0])), vc); } _mm512_storeu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0]), vc); } } } LIBXSMM_INLINE void matMulFusedBC(unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c) { unsigned int l_m, l_n, l_k; const __m512d beta = _mm512_set1_pd(i_beta); LIBXSMM_UNUSED(i_r); for (l_m = 0; l_m < i_m; l_m++) { for (l_n = 0; l_n < i_n; l_n++) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd(_mm512_loadu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0])), beta) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0]), vc); } } for (l_m = 0; l_m < i_m; l_m++) { for (l_n = 0; l_n < i_n; l_n++) { __m512d vc = _mm512_loadu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0])); for (l_k = 0; l_k < i_k; l_k++) { const __m512d alpha = _mm512_set1_pd(i_a[l_m * i_ldA + l_k]); vc = _mm512_fmadd_pd(alpha, _mm512_loadu_pd(&(i_b[l_k * i_ldB * 8 + l_n * 8 + 0])), vc); } _mm512_storeu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0]), vc); } } } #endif LIBXSMM_INLINE void amok_detect(const double *i_runtimes, size_t * io_amoks, const size_t i_workers) { double time_avg; size_t i; time_avg = 0.0; for (i = 0; i < i_workers; i++) { if (io_amoks[8 * i] == 0) { time_avg += i_runtimes[8 * i]; } } time_avg = time_avg / ((double)(i_workers - io_amoks[8 * i_workers])); /* let detect amoks */ for (i = 0; i < i_workers; i++) { if (io_amoks[8 * i] == 0) { if (i_runtimes[8 * i] > time_avg * 1.07) { /* this is the amok condition */ io_amoks[8 * i_workers]++; io_amoks[8 * i] = 1; } } } } LIBXSMM_INLINE void amok_balance(const size_t * i_amoks, const size_t i_workers, const size_t i_worksize, const size_t i_mytid, size_t * io_chunk, size_t * io_mystart, size_t * io_myend) { size_t l_chunk, l_start, l_end; size_t l_cur_amoks = i_amoks[8 * i_workers]; size_t l_non_amoks = i_workers - l_cur_amoks; l_chunk = (i_worksize % l_non_amoks == 0) ? (i_worksize / l_non_amoks) : ((i_worksize / l_non_amoks) + 1); if (i_amoks[8 * i_mytid] != 0) { l_start = 0; l_end = 0; } else { size_t l_tid_offset = 0; size_t l_z; for (l_z = 0; l_z < i_mytid; l_z++) { if (i_amoks[8 * l_z] != 0) { l_tid_offset++; } } l_tid_offset = i_mytid - l_tid_offset; l_start = (l_tid_offset * l_chunk < i_worksize) ? (l_tid_offset * l_chunk) : i_worksize; l_end = ((l_tid_offset + 1) * l_chunk < i_worksize) ? ((l_tid_offset + 1) * l_chunk) : i_worksize; } *io_chunk = l_chunk; *io_mystart = l_start; *io_myend = l_end; } int main(int argc, char *argv[]) { char *mat_a = 0; unsigned int *mat_a_rowptr, *mat_a_colidx; unsigned int mat_a_rowcount, mat_a_colcount, mat_a_nnz; double *mat_a_values; libxsmm_dmmfunction a_kernel; char *mat_b = 0; unsigned int *mat_b_rowptr, *mat_b_colidx; unsigned int mat_b_rowcount, mat_b_colcount, mat_b_nnz; double *mat_b_values; libxsmm_dmmfunction b_kernel; char *mat_c = 0; unsigned int *mat_c_rowptr, *mat_c_colidx; unsigned int mat_c_rowcount, mat_c_colcount, mat_c_nnz; double *mat_c_values; libxsmm_dmmfunction c_kernel; char *mat_st = 0; unsigned int *mat_st_rowptr, *mat_st_colidx; unsigned int mat_st_rowcount, mat_st_colcount, mat_st_nnz; double *mat_st_values; libxsmm_dmmfunction st_kernel; int num_modes = 9; int num_quants = 9; size_t num_elems = 0; size_t num_cfr = 8; size_t num_reps = 1; size_t elem_size; /* OpenMP: signed induction variables */ int i, j; const libxsmm_gemm_descriptor *l_xgemm_desc_stiff = 0, *l_xgemm_desc_star = 0; libxsmm_descriptor_blob l_xgemm_blob_stiff, l_xgemm_blob_star; const libxsmm_gemm_prefetch_type prefetch = LIBXSMM_GEMM_PREFETCH_NONE; const int flags = LIBXSMM_GEMM_FLAGS('N', 'N'); const double alpha = 1, beta = 1; double flops_vol; double *q; double *qt; double *qs; double *star; double *global; unsigned long long l_start, l_end; double l_total; unsigned int l_num_threads; unsigned int l_star_ent = num_quants * num_quants; double *l_total_thread; double *l_cur_thread_time; double time_max; double time_min; double time_avg; size_t *amoks; /* read cmd */ if ((argc > 1 && !strncmp(argv[1], "-h", 3)) || (argc != 8)) { printf("Usage: %s stif1 stif2 stif3 star nModes nElems nReps\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* some empty lines at the beginning */ printf("\n"); i = 1; if (argc > (int)i) mat_a = argv[i++]; if (argc > (int)i) mat_b = argv[i++]; if (argc > (int)i) mat_c = argv[i++]; if (argc > (int)i) mat_st = argv[i++]; if (argc > (int)i) num_modes = atoi(argv[i++]); if (argc > (int)i) num_elems = atoi(argv[i++]); if (argc > (int)i) num_reps = atoi(argv[i++]); elem_size = num_modes * num_quants * num_cfr; #if defined(_OPENMP) #pragma omp parallel { #pragma omp master { l_num_threads = omp_get_num_threads(); } } #else l_num_threads = 1; #endif l_total_thread = (double *)malloc(8 * l_num_threads * sizeof(double)); l_cur_thread_time = (double *)malloc(8 * l_num_threads * sizeof(double)); amoks = (size_t *) malloc(8 * (l_num_threads + 1) * sizeof(size_t)); for (i = 0; i < 8 * ((int)l_num_threads + 1); i++) { amoks[i] = 0; } /* read matrices */ printf("reading sparse matrices... "); edge_sparse_csr_reader_double(mat_a, &mat_a_rowptr, &mat_a_colidx, &mat_a_values, &mat_a_rowcount, &mat_a_colcount, &mat_a_nnz); edge_sparse_csr_reader_double(mat_b, &mat_b_rowptr, &mat_b_colidx, &mat_b_values, &mat_b_rowcount, &mat_b_colcount, &mat_b_nnz); edge_sparse_csr_reader_double(mat_c, &mat_c_rowptr, &mat_c_colidx, &mat_c_values, &mat_c_rowcount, &mat_c_colcount, &mat_c_nnz); edge_sparse_csr_reader_double(mat_st, &mat_st_rowptr, &mat_st_colidx, &mat_st_values, &mat_st_rowcount, &mat_st_colcount, &mat_st_nnz); printf("done!\n\n"); /* generate kernels */ printf("generating code... "); l_xgemm_desc_stiff = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_stiff, num_quants, num_modes, num_modes, num_modes, 0, num_modes, alpha, beta, flags, prefetch); l_xgemm_desc_star = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_star, num_quants, num_modes, num_quants, 0, num_modes, num_modes, alpha, beta, flags, prefetch); a_kernel = libxsmm_create_packed_spxgemm_csr(l_xgemm_desc_stiff, (unsigned int)num_cfr, mat_a_rowptr, mat_a_colidx, (const void *)mat_a_values).dmm; b_kernel = libxsmm_create_packed_spxgemm_csr(l_xgemm_desc_stiff, (unsigned int)num_cfr, mat_b_rowptr, mat_b_colidx, (const void *)mat_b_values).dmm; c_kernel = libxsmm_create_packed_spxgemm_csr(l_xgemm_desc_stiff, (unsigned int)num_cfr, mat_c_rowptr, mat_c_colidx, (const void *)mat_c_values).dmm; st_kernel = libxsmm_create_packed_spxgemm_csr(l_xgemm_desc_star, (unsigned int)num_cfr, mat_st_rowptr, mat_st_colidx, (const void *)mat_st_values).dmm; if (a_kernel == 0) { printf("a kernel could not be built -> exit!"); exit(-1); } if (b_kernel == 0) { printf("b kernel could not be built -> exit!"); exit(-1); } if (b_kernel == 0) { printf("c kernel could not be built -> exit!"); exit(-1); } if (st_kernel == 0) { printf("st kernel could not be built -> exit!"); exit(-1); } printf("done!\n\n"); /* copying code to 1 GB page */ #if 0 #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) printf("copying code to 1GB page...\n"); onegcode = (void *)edge_hp_malloc(5 * 1024 * 1024, 2097152); memcpy(onegcode, (void *)a_kernel, 1505); memcpy(onegcode + (1 * 1024 * 1024) + 64, (void *)b_kernel, 2892); memcpy(onegcode + (2 * 1024 * 1024) + 128, (void *)c_kernel, 3249); memcpy(onegcode + (3 * 1024 * 1024) + 196, (void *)st_kernel, 11010); a_kernel = (libxsmm_dmmfunction) onegcode; b_kernel = (libxsmm_dmmfunction) (onegcode + (1 * 1024 * 1024) + 64); c_kernel = (libxsmm_dmmfunction) (onegcode + (2 * 1024 * 1024) + 128); st_kernel = (libxsmm_dmmfunction) (onegcode + (3 * 1024 * 1024) + 196); printf("...done\n\n"); #endif #endif /* create unknowns and t-unknowns */ printf("allocating and initializing fake data... \n"); /* DoFs */ printf(" q: %f MiB\n", ((double)(num_elems * num_modes * num_quants * num_cfr * sizeof(double))) / (1024.0 * 1024.0)); q = (double *)edge_hp_malloc(num_elems * num_modes * num_quants * num_cfr * sizeof(double), 2097152); /* tDofs */ printf(" qt: %f MiB\n", ((double)(num_elems * num_modes * num_quants * num_cfr * sizeof(double))) / (1024.0 * 1024.0)); qt = (double *)edge_hp_malloc(num_elems * num_modes * num_quants * num_cfr * sizeof(double), 2097152); /* star matrices */ printf(" star: %f MiB\n", ((double)(num_elems * 3 * l_star_ent * sizeof(double))) / (1024.0 * 1024.0)); star = (double *)edge_hp_malloc(num_elems * 3 * l_star_ent * sizeof(double), 2097152); /* stiffness matrices */ printf("global: %f MiB\n", ((double)(3 * num_modes * num_modes * sizeof(double))) / (1024.0 * 1024)); global = (double *)edge_hp_malloc(3 * num_modes * num_modes * sizeof(double), 2097152); /* per thread scratch */ printf(" t: %f MiB\n", ((double)(l_num_threads * num_modes * num_quants * num_cfr * sizeof(double))) / (1024.0 * 1024.0)); qs = (double *)edge_hp_malloc(l_num_threads * num_modes * num_quants * num_cfr * sizeof(double), 2097152); for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { q[i * elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { qt[i * elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)l_num_threads; i++) { for (j = 0; j < (int)elem_size; j++) { qs[i * elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)mat_st_nnz * 3; j++) { star[(i * 3 * mat_st_nnz) + j] = libxsmm_rng_f64(); } } for (i = 0; i < 3; i++) { for (j = 0; j < num_modes * num_modes; j++) { global[(i * num_modes * num_modes) + j] = libxsmm_rng_f64(); } } printf("allocation done!\n\n"); printf("running benchmark...\n"); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) #pragma omp parallel private(i, j) #endif { #if defined(_OPENMP) int mytid = omp_get_thread_num(); #else int mytid = 0; #endif libxsmm_timer_tickint mystart, myend; #if defined(HANDLE_AMOK) size_t cur_amoks = 0; size_t non_amoks = l_num_threads; #endif size_t l_el_chunk = 0; size_t l_el_start = 0; size_t l_el_end = 0; /* initial work distribution */ amok_balance(amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end); for (i = 0; i < (int)num_reps; i++) { #if defined(HANDLE_AMOK) /* did we had an amok? */ if (cur_amoks != amoks[8 * l_num_threads]) { cur_amoks = amoks[8 * l_num_threads]; non_amoks = l_num_threads - cur_amoks; /* re-balance work */ amok_balance(amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end); } #endif mystart = libxsmm_timer_tick(); for (j = (int)l_el_start; j < (int)l_el_end; j++) { #if 1 st_kernel(star + (j * 3 * mat_st_nnz), qt + (j * elem_size), qs + (mytid * elem_size)); a_kernel(qs + (mytid * elem_size), global, q + (j * elem_size)); st_kernel(star + (j * 3 * mat_st_nnz) + mat_st_nnz, qt + (j * elem_size), qs + (mytid * elem_size)); b_kernel(qs + (mytid * elem_size), global +(num_modes * num_modes), q + (j * elem_size)); st_kernel(star + (j * 3 * mat_st_nnz) + (2 * mat_st_nnz), qt + (j * elem_size), qs + (mytid * elem_size)); c_kernel(qs + (mytid * elem_size), global +(2 * num_modes * num_modes), q + (j * elem_size)); #else matMulFusedBC(8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star + (j * 3 * mat_st_nnz), qt + (j * elem_size), qs + (mytid * elem_size)); matMulFusedAC(8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs + (mytid * elem_size), global, q + (j * elem_size)); matMulFusedBC(8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star + (j * 3 * mat_st_nnz) + mat_st_nnz, qt + (j * elem_size), qs + (mytid * elem_size)); matMulFusedAC(8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs + (mytid * elem_size), global +(num_modes * num_modes), q + (j * elem_size)); matMulFusedBC(8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star + (j * 3 * mat_st_nnz) + (2 * mat_st_nnz), qt + (j * elem_size), qs + (mytid * elem_size)); matMulFusedAC(8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs + (mytid * elem_size), global +(2 * num_modes * num_modes), q + (j * elem_size)); #endif } myend = libxsmm_timer_tick(); l_cur_thread_time[8 * mytid] = libxsmm_timer_duration(mystart, myend); l_total_thread[8 * mytid] += libxsmm_timer_duration(mystart, myend); #if defined(_OPENMP) #pragma omp barrier #endif #if defined(HANDLE_AMOK) /* checking for amoks is centralized business */ if (mytid == 0) { /* amok check */ amok_detect(l_cur_thread_time, amoks, l_num_threads); } #if defined(_OPENMP) #pragma omp barrier #endif #endif } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("...done!\n\n"); /* some timing stats */ time_max = 0.0; time_min = 80000000; time_avg = 0.0; for (i = 0; i < (int)l_num_threads; i++) { if (amoks[8 * i] == 0) { if (l_total_thread[8 * i] > time_max) time_max = l_total_thread[8 * i]; if (l_total_thread[8 * i] < time_min) time_min = l_total_thread[8 * i]; time_avg += l_total_thread[8 * i]; } } time_avg = time_avg / ((double)(l_num_threads - amoks[8 * l_num_threads])); flops_vol = (double)num_quants *(double)mat_a_nnz *(double)num_cfr *2.0; flops_vol += (double)num_quants *(double)mat_b_nnz *(double)num_cfr *2.0; flops_vol += (double)num_quants *(double)mat_c_nnz *(double)num_cfr *2.0; flops_vol += (double)num_modes *(double)mat_st_nnz *(double)num_cfr *6.0; /* 3 star matrix mul */ printf("%fs time for vol (asm), min %f, max %f, avg %f, #amoks %llu, amok-threads ", l_total, time_min, time_max, time_avg, (unsigned long long)amoks[8 * l_num_threads]); for (i = 0; i < (int)l_num_threads; i++) { if (amoks[8 * i] != 0) { printf("%i,", i); } } printf("\n"); printf("%f GFLOPS for vol (asm)\n", ((double)num_elems * (double)num_reps * flops_vol) / (l_total * 1.0e9)); printf("%f GiB/s for vol (asm)\n", (double)((double)num_elems * (double)elem_size * 8.0 * 3.0 * (double)num_reps) / (l_total * 1024.0 * 1024.0 * 1024.0)); printf("done!\n\n"); /* some empty lines at the end */ printf("\n\n"); return 0; }
example2.c
// calculation example of electric field intensity distributions #include "emf_mie_ms.h" int main(int argc,char *argv[]) { MSPD msp; FILE *fp1,*fp2; double complex e[3],h[3]; double rang,dr,r[3],*ie,*ih; int max,i,j,ca; read_dat_ms(argv[1],&msp); // read data file print_data_ms(&msp); // print data max=200; rang=4.0*msp.bm.lambda_0; dr=rang*2/(double)(max-1); ca=2; ie=(double *)m_alloc2(max,sizeof(double),"example2.c,ie"); ih=(double *)m_alloc2(max,sizeof(double),"example2.c,ih"); // x=0 plane if((fp1=fopen("Ie_yz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp1,"%s\n","# y z electric_field_intensity"); if((fp2=fopen("Ih_yz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp2,"%s\n","# y z magnetic_field_intensity"); r[0]=0.0; for(i=0;i<max;i++){ r[1]=-rang+(double)i*dr; #pragma omp parallel for schedule(dynamic) firstprivate(r) private(e,h) // omp parallel for(j=0;j<max;j++){ r[2]=-rang+(double)j*dr; total_EH_ms(e,h,r,&msp); // total field ie[j]=creal(e[0]*conj(e[0]))+creal(e[1]*conj(e[1]))+creal(e[2]*conj(e[2])); ih[j]=creal(h[0]*conj(h[0]))+creal(h[1]*conj(h[1]))+creal(h[2]*conj(h[2])); } for(j=0;j<max;j++){ r[2]=-rang+(double)j*dr; fprintf(fp1,"%g %g %15.14e\n",r[1],r[2],ie[j]); fprintf(fp2,"%g %g %15.14e\n",r[1],r[2],ih[j]); } fprintf(fp1,"\n"); fprintf(fp2,"\n"); } fclose(fp1); fclose(fp2); // y=0 plane if((fp1=fopen("Ie_xz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp1,"%s\n","# x z electric_field_intensity"); if((fp2=fopen("Ih_xz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp2,"%s\n","# x z magnetic_field_intensity"); r[1]=0.0; for(i=0;i<ca*max;i++){ r[0]=-rang+(double)i*dr; #pragma omp parallel for schedule(dynamic) firstprivate(r) private(e,h) // omp parallel for(j=0;j<max;j++){ r[2]=-rang+(double)j*dr; total_EH_ms(e,h,r,&msp); ie[j]=creal(e[0]*conj(e[0]))+creal(e[1]*conj(e[1]))+creal(e[2]*conj(e[2])); ih[j]=creal(h[0]*conj(h[0]))+creal(h[1]*conj(h[1]))+creal(h[2]*conj(h[2])); } for(j=0;j<max;j++){ r[2]=-rang+(double)j*dr; fprintf(fp1,"%g %g %15.14e\n",r[0],r[2],ie[j]); fprintf(fp2,"%g %g %15.14e\n",r[0],r[2],ih[j]); } fprintf(fp1,"\n"); fprintf(fp2,"\n"); } fclose(fp1); fclose(fp2); // z=0 plane if((fp1=fopen("Ie_xy.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp1,"%s\n","# x y electric_field_intensity"); if((fp2=fopen("Ih_xy.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp2,"%s\n","# x y magnetic_field_intensity"); r[2]=0.0; for(i=0;i<ca*max;i++){ r[0]=-rang+(double)i*dr; #pragma omp parallel for schedule(dynamic) firstprivate(r) private(e,h) // omp parallel for(j=0;j<max;j++){ r[1]=-rang+(double)j*dr; total_EH_ms(e,h,r,&msp); ie[j]=creal(e[0]*conj(e[0]))+creal(e[1]*conj(e[1]))+creal(e[2]*conj(e[2])); ih[j]=creal(h[0]*conj(h[0]))+creal(h[1]*conj(h[1]))+creal(h[2]*conj(h[2])); } for(j=0;j<max;j++){ r[1]=-rang+(double)j*dr; fprintf(fp1,"%g %g %15.14e\n",r[0],r[1],ie[j]); fprintf(fp2,"%g %g %15.14e\n",r[0],r[1],ih[j]); } fprintf(fp1,"\n"); fprintf(fp2,"\n"); } fclose(fp1); fclose(fp2); printf("Intensity plot is finished\n"); free(ie); free(ih); free_ms(&msp); return 0; }
// calculation example of electric field intensity distributions #include "emf_mie_ms.h" int main(int argc, char *argv[]) { MSPD msp; FILE *fp1, *fp2; double complex e[3], h[3]; double rang, dr, r[3], *ie, *ih; int max, i, j, ca; read_dat_ms(argv[1], &msp); //read data file print_data_ms(&msp); //print data max = 200; rang = 4.0 * msp.bm.lambda_0; dr = rang * 2 / (double)(max - 1); ca = 2; ie = (double *)m_alloc2(max, sizeof(double), "example2.c,ie"); ih = (double *)m_alloc2(max, sizeof(double), "example2.c,ih"); //x = 0 plane if ((fp1 = fopen("Ie_yz.txt", "wt")) == NULL) { printf("Can not open the file.\n"); exit(1); } fprintf(fp1, "%s\n", "# y z electric_field_intensity"); if ((fp2 = fopen("Ih_yz.txt", "wt")) == NULL) { printf("Can not open the file.\n"); exit(1); } fprintf(fp2, "%s\n", "# y z magnetic_field_intensity"); r[0] = 0.0; for (i = 0; i < max; i++) { r[1] = -rang + (double)i *dr; for (j = 0; j < max; j++) { r[2] = -rang + (double)j *dr; total_EH_ms(e, h, r, &msp); //total field ie[j] = creal(e[0] * conj(e[0])) + creal(e[1] * conj(e[1])) + creal(e[2] * conj(e[2])); ih[j] = creal(h[0] * conj(h[0])) + creal(h[1] * conj(h[1])) + creal(h[2] * conj(h[2])); } for (j = 0; j < max; j++) { r[2] = -rang + (double)j *dr; fprintf(fp1, "%g %g %15.14e\n", r[1], r[2], ie[j]); fprintf(fp2, "%g %g %15.14e\n", r[1], r[2], ih[j]); } fprintf(fp1, "\n"); fprintf(fp2, "\n"); } fclose(fp1); fclose(fp2); //y = 0 plane if ((fp1 = fopen("Ie_xz.txt", "wt")) == NULL) { printf("Can not open the file.\n"); exit(1); } fprintf(fp1, "%s\n", "# x z electric_field_intensity"); if ((fp2 = fopen("Ih_xz.txt", "wt")) == NULL) { printf("Can not open the file.\n"); exit(1); } fprintf(fp2, "%s\n", "# x z magnetic_field_intensity"); r[1] = 0.0; for (i = 0; i < ca * max; i++) { r[0] = -rang + (double)i *dr; for (j = 0; j < max; j++) { r[2] = -rang + (double)j *dr; total_EH_ms(e, h, r, &msp); ie[j] = creal(e[0] * conj(e[0])) + creal(e[1] * conj(e[1])) + creal(e[2] * conj(e[2])); ih[j] = creal(h[0] * conj(h[0])) + creal(h[1] * conj(h[1])) + creal(h[2] * conj(h[2])); } for (j = 0; j < max; j++) { r[2] = -rang + (double)j *dr; fprintf(fp1, "%g %g %15.14e\n", r[0], r[2], ie[j]); fprintf(fp2, "%g %g %15.14e\n", r[0], r[2], ih[j]); } fprintf(fp1, "\n"); fprintf(fp2, "\n"); } fclose(fp1); fclose(fp2); //z = 0 plane if ((fp1 = fopen("Ie_xy.txt", "wt")) == NULL) { printf("Can not open the file.\n"); exit(1); } fprintf(fp1, "%s\n", "# x y electric_field_intensity"); if ((fp2 = fopen("Ih_xy.txt", "wt")) == NULL) { printf("Can not open the file.\n"); exit(1); } fprintf(fp2, "%s\n", "# x y magnetic_field_intensity"); r[2] = 0.0; for (i = 0; i < ca * max; i++) { r[0] = -rang + (double)i *dr; for (j = 0; j < max; j++) { r[1] = -rang + (double)j *dr; total_EH_ms(e, h, r, &msp); ie[j] = creal(e[0] * conj(e[0])) + creal(e[1] * conj(e[1])) + creal(e[2] * conj(e[2])); ih[j] = creal(h[0] * conj(h[0])) + creal(h[1] * conj(h[1])) + creal(h[2] * conj(h[2])); } for (j = 0; j < max; j++) { r[1] = -rang + (double)j *dr; fprintf(fp1, "%g %g %15.14e\n", r[0], r[1], ie[j]); fprintf(fp2, "%g %g %15.14e\n", r[0], r[1], ih[j]); } fprintf(fp1, "\n"); fprintf(fp2, "\n"); } fclose(fp1); fclose(fp2); printf("Intensity plot is finished\n"); free(ie); free(ih); free_ms(&msp); return 0; }
// calculation example of electric field intensity distributions #include "emf_mie_ms.h" int main(int argc, char *argv[]) { MSPD msp; FILE *fp1, *fp2; double complex e[3], h[3]; double rang, dr, r[3], *ie, *ih; int max, i, j, ca; read_dat_ms(argv[1], &msp); //read data file print_data_ms(&msp); //print data max = 200; rang = 4.0 * msp.bm.lambda_0; dr = rang * 2 / (double)(max - 1); ca = 2; ie = (double *)m_alloc2(max, sizeof(double), "example2.c,ie"); ih = (double *)m_alloc2(max, sizeof(double), "example2.c,ih"); //x = 0 plane if ((fp1 = fopen("Ie_yz.txt", "wt")) == NULL) { printf("Can not open the file.\n"); exit(1); } fprintf(fp1, "%s\n", "# y z electric_field_intensity"); if ((fp2 = fopen("Ih_yz.txt", "wt")) == NULL) { printf("Can not open the file.\n"); exit(1); } fprintf(fp2, "%s\n", "# y z magnetic_field_intensity"); r[0] = 0.0; for (i = 0; i < max; i++) { r[1] = -rang + (double)i *dr; #pragma omp parallel for schedule(dynamic) firstprivate(r) private(e,h) // omp parallel for (j = 0; j < max; j++) { r[2] = -rang + (double)j *dr; total_EH_ms(e, h, r, &msp); //total field ie[j] = creal(e[0] * conj(e[0])) + creal(e[1] * conj(e[1])) + creal(e[2] * conj(e[2])); ih[j] = creal(h[0] * conj(h[0])) + creal(h[1] * conj(h[1])) + creal(h[2] * conj(h[2])); } for (j = 0; j < max; j++) { r[2] = -rang + (double)j *dr; fprintf(fp1, "%g %g %15.14e\n", r[1], r[2], ie[j]); fprintf(fp2, "%g %g %15.14e\n", r[1], r[2], ih[j]); } fprintf(fp1, "\n"); fprintf(fp2, "\n"); } fclose(fp1); fclose(fp2); //y = 0 plane if ((fp1 = fopen("Ie_xz.txt", "wt")) == NULL) { printf("Can not open the file.\n"); exit(1); } fprintf(fp1, "%s\n", "# x z electric_field_intensity"); if ((fp2 = fopen("Ih_xz.txt", "wt")) == NULL) { printf("Can not open the file.\n"); exit(1); } fprintf(fp2, "%s\n", "# x z magnetic_field_intensity"); r[1] = 0.0; for (i = 0; i < ca * max; i++) { r[0] = -rang + (double)i *dr; #pragma omp parallel for schedule(dynamic) firstprivate(r) private(e,h) // omp parallel for (j = 0; j < max; j++) { r[2] = -rang + (double)j *dr; total_EH_ms(e, h, r, &msp); ie[j] = creal(e[0] * conj(e[0])) + creal(e[1] * conj(e[1])) + creal(e[2] * conj(e[2])); ih[j] = creal(h[0] * conj(h[0])) + creal(h[1] * conj(h[1])) + creal(h[2] * conj(h[2])); } for (j = 0; j < max; j++) { r[2] = -rang + (double)j *dr; fprintf(fp1, "%g %g %15.14e\n", r[0], r[2], ie[j]); fprintf(fp2, "%g %g %15.14e\n", r[0], r[2], ih[j]); } fprintf(fp1, "\n"); fprintf(fp2, "\n"); } fclose(fp1); fclose(fp2); //z = 0 plane if ((fp1 = fopen("Ie_xy.txt", "wt")) == NULL) { printf("Can not open the file.\n"); exit(1); } fprintf(fp1, "%s\n", "# x y electric_field_intensity"); if ((fp2 = fopen("Ih_xy.txt", "wt")) == NULL) { printf("Can not open the file.\n"); exit(1); } fprintf(fp2, "%s\n", "# x y magnetic_field_intensity"); r[2] = 0.0; for (i = 0; i < ca * max; i++) { r[0] = -rang + (double)i *dr; #pragma omp parallel for schedule(dynamic) firstprivate(r) private(e,h) // omp parallel for (j = 0; j < max; j++) { r[1] = -rang + (double)j *dr; total_EH_ms(e, h, r, &msp); ie[j] = creal(e[0] * conj(e[0])) + creal(e[1] * conj(e[1])) + creal(e[2] * conj(e[2])); ih[j] = creal(h[0] * conj(h[0])) + creal(h[1] * conj(h[1])) + creal(h[2] * conj(h[2])); } for (j = 0; j < max; j++) { r[1] = -rang + (double)j *dr; fprintf(fp1, "%g %g %15.14e\n", r[0], r[1], ie[j]); fprintf(fp2, "%g %g %15.14e\n", r[0], r[1], ih[j]); } fprintf(fp1, "\n"); fprintf(fp2, "\n"); } fclose(fp1); fclose(fp2); printf("Intensity plot is finished\n"); free(ie); free(ih); free_ms(&msp); return 0; }
single_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp single argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } void foo(void); // expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}} #pragma omp single // expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}} #pragma omp single foo void test_no_clause(void) { int i; #pragma omp single foo(); #pragma omp single ++i; } void test_branch_protected_scope(void) { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp single { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause(void) { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single foo bar foo(); } void test_non_identifiers(void) { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single; foo(); #pragma omp parallel // expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp single'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single linear(x); foo(); #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single private(x); foo(); #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single, private(x); foo(); } void test_private(void) { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp single private( foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp single private(, foo(); #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp single private(, ) foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single private() foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single private(int) foo(); #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp single private(0) foo(); int x, y, z; #pragma omp parallel #pragma omp single private(x) foo(); #pragma omp parallel #pragma omp single private(x, y) foo(); #pragma omp parallel #pragma omp single private(x, y, z) foo(); } void test_firstprivate(void) { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp single firstprivate( foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp single firstprivate(, foo(); #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp single firstprivate(, ) foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single firstprivate() foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single firstprivate(int) foo(); #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp single firstprivate(0) foo(); } void test_nowait(void) { #pragma omp single nowait nowait // expected-error {{directive '#pragma omp single' cannot contain more than one 'nowait' clause}} for (int i = 0; i < 16; ++i) ; }
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } void foo(void); // expected-error@+1 {{unexpected OpenMP directive ' // expected-error@+1 {{unexpected OpenMP directive ' void test_no_clause(void) { int i; foo(); ++i; } void test_branch_protected_scope(void) { int i = 0; L1: ++i; int x[24]; if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause(void) { int i; // expected-warning@+1 {{extra tokens at the end of ' foo(); } void test_non_identifiers(void) { int i, x; // expected-warning@+1 {{extra tokens at the end of ' foo(); // expected-error@+2 {{unexpected OpenMP clause 'linear' in directive ' // expected-warning@+1 {{extra tokens at the end of ' foo(); // expected-warning@+1 {{extra tokens at the end of ' foo(); // expected-warning@+1 {{extra tokens at the end of ' foo(); } void test_private(void) { int i; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} foo(); // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} foo(); // expected-error@+1 2 {{expected expression}} foo(); // expected-error@+1 {{expected expression}} foo(); // expected-error@+1 {{expected expression}} foo(); // expected-error@+1 {{expected variable name}} foo(); int x, y, z; foo(); foo(); foo(); } void test_firstprivate(void) { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} foo(); // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} foo(); // expected-error@+1 2 {{expected expression}} foo(); // expected-error@+1 {{expected expression}} foo(); // expected-error@+1 {{expected expression}} foo(); // expected-error@+1 {{expected variable name}} foo(); } void test_nowait(void) { for (int i = 0; i < 16; ++i) ; }
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp single argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } void foo(void); // expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}} #pragma omp single // expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}} #pragma omp single foo void test_no_clause(void) { int i; #pragma omp single foo(); #pragma omp single ++i; } void test_branch_protected_scope(void) { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp single { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause(void) { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single foo bar foo(); } void test_non_identifiers(void) { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single; foo(); #pragma omp parallel // expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp single'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single linear(x); foo(); #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single private(x); foo(); #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single, private(x); foo(); } void test_private(void) { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp single private( foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp single private(, foo(); #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp single private(, ) foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single private() foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single private(int) foo(); #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp single private(0) foo(); int x, y, z; #pragma omp parallel #pragma omp single private(x) foo(); #pragma omp parallel #pragma omp single private(x, y) foo(); #pragma omp parallel #pragma omp single private(x, y, z) foo(); } void test_firstprivate(void) { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp single firstprivate( foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp single firstprivate(, foo(); #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp single firstprivate(, ) foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single firstprivate() foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single firstprivate(int) foo(); #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp single firstprivate(0) foo(); } void test_nowait(void) { #pragma omp single nowait nowait // expected-error {{directive '#pragma omp single' cannot contain more than one 'nowait' clause}} for (int i = 0; i < 16; ++i) ; }
nqueens-openmp.c
//# 601 west second street, 2nd floor, elevator B, two rights #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <math.h> #include <unistd.h> void print_board(int n, char *board){ for (int r = 0; r < n; r++) { for (int c = 0; c < n; c++) { printf("%c ", board[r*n+c]); } printf("\n"); } printf("\n"); } int calc_row(int n, int index){ return floor(index / n); } int calc_col(int n, int index){ return index % n; } int calc_index(int n, int row, int col) { return row * n + col; } void remove_rows(int n, int index, char *board){ int row = calc_row(n, index); int col_start = calc_index(n, row, 0); int col_end = calc_index(n, row, n); for(int i = col_start; i < col_end; i++) { if (board[i] != 'q') { board[i] = '*'; } } } void remove_cols(int n, int index, char *board){ int col = calc_col(n, index); int row_start = calc_index(n, 0, col); int row_end = calc_index(n, n, col); for(int i = row_start; i < row_end; i += n){ if (board[i] != 'q') { board[i] = '*'; } } } void remove_diagonals(int n, int index, char *board){ int row; int col_start; int diag; // Down and Right row = calc_row(n, index); col_start = calc_col(n, index) + 1; for (int col = col_start; col < n; col++){ if (row + 1 < n){ row += 1; diag = calc_index(n, row, col); if (board[diag] != 'q'){ board[diag] = '*'; } } } // Down and Left row = calc_row(n, index); col_start = calc_col(n, index) - 1; for (int col = col_start; col > -1; col--){ if (row + 1 < n){ row += 1; diag = calc_index(n, row, col); if (board[diag] != 'q'){ board[diag] = '*'; } } } // Up and Right row = calc_row(n, index); col_start = calc_col(n, index) + 1; for (int col = col_start; col < n; col++){ if (row - 1 >= 0){ row -= 1; diag = calc_index(n, row, col); if (board[diag] != 'q'){ board[diag] = '*'; } } } // Up and Left row = calc_row(n, index); col_start = calc_col(n, index) - 1; for (int col = col_start; col > -1; col--){ if (row - 1 >= 0){ row -= 1; diag = calc_index(n, row, col); if (board[diag] != 'q'){ board[diag] = '*'; } } } } void place_piece(int n, int index, char *board){ board[index] = 'q'; remove_rows(n, index, board); remove_cols(n, index, board); remove_diagonals(n, index, board); } int remaining_moves(int n, char *board){ int moves = 0; for (int i = 0; i < n*n; i++) { if (board[i] == '_'){ moves += 1; } } return moves; } void new_solution(int n, char *board, int s_top, char **solutions){ for (int i = 0; i < n*n; i++) { solutions[s_top][i] = board[i]; } } int validate_board(int n, char *board){ int queens = 0; for (int i = 0; i < n*n; i++) { if (board[i] == 'q') { queens += 1; } } if (queens == n) { return 1; } else { return 0; } } void new_board(int n, int top, char *board, char **stack){ for (int i = 0; i < n*n; i++) { board[i] = stack[top][i]; } } void validate_stack_size(int n, int top, int *max, char **stack){ // realloc stack if it needs to grow if (top >= *max){ *max = *max + floor(*max + n); stack = realloc(stack, (*max)*sizeof(char*)); for (int i = top; i < (*max); i++) { stack[i] = malloc((n * n)*sizeof(char)); } } } void backtrack(int n, int index, char *board, int *top, char **stack){ // if only one move is left, backtracking is not needed if (remaining_moves(n, board) > 1) { // set the place holder for the current move board[index] = '*'; // copy the backtracking array to stack for (int i = 0; i < n*n; i++) { stack[*top][i] = board[i]; } // move to the next item in the stack *top += 1; } } int main() { system("clear"); system("reset"); // The size of the board, make this an argument int n = 10; int s_max = n; // Max solutions int s_top = 0; // Current solution int sols = 0; // Total number of solutions found // Create a 2D array that will hold N*N board arrays char **istack = (char**) malloc(n*n*sizeof(char*)); for (int i = 0; i < n*n; i++) { istack[i] = (char*) malloc((n*n)*sizeof(char)); } // Create a 2D array that will hold n solutions. char **solutions = (char**) malloc(n*sizeof(char*)); for (int i = 0; i < n; i++) { solutions[i] = (char*) malloc((n*n)*sizeof(char)); } // initialize istack with every possible first move #pragma omp parallel for for (int o = 0; o < n*n; o++){ for (int i = 0; i < n*n; i++) { if (i < o){ istack[o][i] = '*'; } else { istack[o][i] = '_'; } } } #pragma omp parallel shared(n, s_top, s_max, sols, solutions) { #pragma omp for for (int init=0; init < n*n; init++) { #pragma omp task { // Thread specific variables int tid = omp_get_thread_num(); int top = 0; int max = n*n; int index = 0; int c = 0; int matches = 0; char match_found = 'f'; char successful_move = 'f'; char *board = (char*) malloc((n*n)*sizeof(char)); new_board(n, init, board, istack); // Create a stack to manage backtracking // This is a 2D array that holds boards // Top is the latst board to be added // Max is the maximum number of boards that can fit in the // memory allocation char **stack = (char**) malloc(n*n*sizeof(char*)); for (int i = 0; i < n*n; i++) { stack[i] = (char*) malloc((n*n)*sizeof(char)); } while (top >= 0) { if (remaining_moves(n, board) > 0){ index = 0; successful_move = 'f'; while (index < n*n && successful_move == 'f') { if (board[index] == '_') { validate_stack_size(n, top, &max, stack); backtrack(n, index, board, &top, stack); place_piece(n, index, board); successful_move = 't'; } else { index += 1; } } } else { #pragma omp critical { if (validate_board(n, board) == 1){ if (s_top == 0){ for (int i=0; i < n*n; i++){ solutions[s_top][i] = board[i]; } s_top += 1; sols += 1; } else { match_found = 'f'; for (int s=0; s < s_top; s++){ if (match_found == 'f'){ matches = 0; for (int c=0; c < n*n; c++){ if (solutions[s][c]==board[c]){ matches += 1; } } if (matches == n*n){ match_found = 't'; } } } if (match_found == 'f'){ if (s_top >= s_max){ s_max = s_max + floor(s_max / 2); solutions = realloc(solutions, (s_max)*sizeof(char*)); for (int i = s_top; i < (s_max); i++) { solutions[i] = malloc((n*n)*sizeof(char)); } } for (int i=0; i < n*n; i++){ solutions[s_top][i] = board[i]; } s_top += 1; sols += validate_board(n, board); } } } } top -= 1; if (top >= 0) { new_board(n, top, board, stack); } } } free(stack); free(board); } } } printf("Solutions: %d\n\n", sols); free(istack); free(solutions); }
// #601 west second street, 2nd floor, elevator B, two rights #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <math.h> #include <unistd.h> void print_board(int n, char *board) { for (int r = 0; r < n; r++) { for (int c = 0; c < n; c++) { printf("%c ", board[r * n + c]); } printf("\n"); } printf("\n"); } int calc_row(int n, int index) { return floor(index / n); } int calc_col(int n, int index) { return index % n; } int calc_index(int n, int row, int col) { return row * n + col; } void remove_rows(int n, int index, char *board) { int row = calc_row(n, index); int col_start = calc_index(n, row, 0); int col_end = calc_index(n, row, n); for (int i = col_start; i < col_end; i++) { if (board[i] != 'q') { board[i] = '*'; } } } void remove_cols(int n, int index, char *board) { int col = calc_col(n, index); int row_start = calc_index(n, 0, col); int row_end = calc_index(n, n, col); for (int i = row_start; i < row_end; i += n) { if (board[i] != 'q') { board[i] = '*'; } } } void remove_diagonals(int n, int index, char *board) { int row; int col_start; int diag; //Down and Right row = calc_row(n, index); col_start = calc_col(n, index) + 1; for (int col = col_start; col < n; col++) { if (row + 1 < n) { row += 1; diag = calc_index(n, row, col); if (board[diag] != 'q') { board[diag] = '*'; } } } //Down and Left row = calc_row(n, index); col_start = calc_col(n, index) - 1; for (int col = col_start; col > -1; col--) { if (row + 1 < n) { row += 1; diag = calc_index(n, row, col); if (board[diag] != 'q') { board[diag] = '*'; } } } //Up and Right row = calc_row(n, index); col_start = calc_col(n, index) + 1; for (int col = col_start; col < n; col++) { if (row - 1 >= 0) { row -= 1; diag = calc_index(n, row, col); if (board[diag] != 'q') { board[diag] = '*'; } } } //Up and Left row = calc_row(n, index); col_start = calc_col(n, index) - 1; for (int col = col_start; col > -1; col--) { if (row - 1 >= 0) { row -= 1; diag = calc_index(n, row, col); if (board[diag] != 'q') { board[diag] = '*'; } } } } void place_piece(int n, int index, char *board) { board[index] = 'q'; remove_rows(n, index, board); remove_cols(n, index, board); remove_diagonals(n, index, board); } int remaining_moves(int n, char *board) { int moves = 0; for (int i = 0; i < n * n; i++) { if (board[i] == '_') { moves += 1; } } return moves; } void new_solution(int n, char *board, int s_top, char **solutions) { for (int i = 0; i < n * n; i++) { solutions[s_top][i] = board[i]; } } int validate_board(int n, char *board) { int queens = 0; for (int i = 0; i < n * n; i++) { if (board[i] == 'q') { queens += 1; } } if (queens == n) { return 1; } else { return 0; } } void new_board(int n, int top, char *board, char **stack) { for (int i = 0; i < n * n; i++) { board[i] = stack[top][i]; } } void validate_stack_size(int n, int top, int *max, char **stack) { //realloc stack if it needs to grow if (top >= *max) { *max = *max + floor(*max + n); stack = realloc(stack, (*max) * sizeof(char *)); for (int i = top; i < (*max); i++) { stack[i] = malloc((n * n) * sizeof(char)); } } } void backtrack(int n, int index, char *board, int *top, char **stack) { //if only one move is left, backtracking is not needed if (remaining_moves(n, board) > 1) { //set the place holder for the current move board[index] = '*'; //copy the backtracking array to stack for (int i = 0; i < n * n; i++) { stack[*top][i] = board[i]; } //move to the next item in the stack * top += 1; } } int main() { system("clear"); system("reset"); //The size of the board, make this an argument int n = 10; int s_max = n; //Max solutions int s_top = 0; //Current solution int sols = 0; //Total number of solutions found // Create a 2 D array that will hold N * N board arrays char **istack = (char **)malloc(n * n * sizeof(char *)); for (int i = 0; i < n * n; i++) { istack[i] = (char *)malloc((n * n) * sizeof(char)); } //Create a 2 D array that will hold n solutions. char **solutions = (char **)malloc(n * sizeof(char *)); for (int i = 0; i < n; i++) { solutions[i] = (char *)malloc((n * n) * sizeof(char)); } //initialize istack with every possible first move for (int o = 0; o < n * n; o++) { for (int i = 0; i < n * n; i++) { if (i < o) { istack[o][i] = '*'; } else { istack[o][i] = '_'; } } } for (int init = 0; init < n * n; init++) { #pragma omp task { //Thread specific variables int tid = omp_get_thread_num(); int top = 0; int max = n * n; int index = 0; int c = 0; int matches = 0; char match_found = 'f'; char successful_move = 'f'; char *board = (char *)malloc((n * n) * sizeof(char)); new_board(n, init, board, istack); //Create a stack to manage backtracking // This is a 2 D array that holds boards // Top is the latst board to be added // Max is the maximum number of boards that can fit in the // memory allocation char **stack = (char **)malloc(n * n * sizeof(char *)); for (int i = 0; i < n * n; i++) { stack[i] = (char *)malloc((n * n) * sizeof(char)); } while (top >= 0) { if (remaining_moves(n, board) > 0) { index = 0; successful_move = 'f'; while (index < n * n && successful_move == 'f') { if (board[index] == '_') { validate_stack_size(n, top, &max, stack); backtrack(n, index, board, &top, stack); place_piece(n, index, board); successful_move = 't'; } else { index += 1; } } } else { if (validate_board(n, board) == 1) { if (s_top == 0) { for (int i = 0; i < n * n; i++) { solutions[s_top][i] = board[i]; } s_top += 1; sols += 1; } else { match_found = 'f'; for (int s = 0; s < s_top; s++) { if (match_found == 'f') { matches = 0; for (int c = 0; c < n * n; c++) { if (solutions[s][c] == board[c]) { matches += 1; } } if (matches == n * n) { match_found = 't'; } } } if (match_found == 'f') { if (s_top >= s_max) { s_max = s_max + floor(s_max / 2); solutions = realloc(solutions, (s_max) * sizeof(char *)); for (int i = s_top; i < (s_max); i++) { solutions[i] = malloc((n * n) * sizeof(char)); } } for (int i = 0; i < n * n; i++) { solutions[s_top][i] = board[i]; } s_top += 1; sols += validate_board(n, board); } } } top -= 1; if (top >= 0) { new_board(n, top, board, stack); } } } free(stack); free(board); } } printf("Solutions: %d\n\n", sols); free(istack); free(solutions); }
// #601 west second street, 2nd floor, elevator B, two rights #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <math.h> #include <unistd.h> void print_board(int n, char *board) { for (int r = 0; r < n; r++) { for (int c = 0; c < n; c++) { printf("%c ", board[r * n + c]); } printf("\n"); } printf("\n"); } int calc_row(int n, int index) { return floor(index / n); } int calc_col(int n, int index) { return index % n; } int calc_index(int n, int row, int col) { return row * n + col; } void remove_rows(int n, int index, char *board) { int row = calc_row(n, index); int col_start = calc_index(n, row, 0); int col_end = calc_index(n, row, n); for (int i = col_start; i < col_end; i++) { if (board[i] != 'q') { board[i] = '*'; } } } void remove_cols(int n, int index, char *board) { int col = calc_col(n, index); int row_start = calc_index(n, 0, col); int row_end = calc_index(n, n, col); for (int i = row_start; i < row_end; i += n) { if (board[i] != 'q') { board[i] = '*'; } } } void remove_diagonals(int n, int index, char *board) { int row; int col_start; int diag; //Down and Right row = calc_row(n, index); col_start = calc_col(n, index) + 1; for (int col = col_start; col < n; col++) { if (row + 1 < n) { row += 1; diag = calc_index(n, row, col); if (board[diag] != 'q') { board[diag] = '*'; } } } //Down and Left row = calc_row(n, index); col_start = calc_col(n, index) - 1; for (int col = col_start; col > -1; col--) { if (row + 1 < n) { row += 1; diag = calc_index(n, row, col); if (board[diag] != 'q') { board[diag] = '*'; } } } //Up and Right row = calc_row(n, index); col_start = calc_col(n, index) + 1; for (int col = col_start; col < n; col++) { if (row - 1 >= 0) { row -= 1; diag = calc_index(n, row, col); if (board[diag] != 'q') { board[diag] = '*'; } } } //Up and Left row = calc_row(n, index); col_start = calc_col(n, index) - 1; for (int col = col_start; col > -1; col--) { if (row - 1 >= 0) { row -= 1; diag = calc_index(n, row, col); if (board[diag] != 'q') { board[diag] = '*'; } } } } void place_piece(int n, int index, char *board) { board[index] = 'q'; remove_rows(n, index, board); remove_cols(n, index, board); remove_diagonals(n, index, board); } int remaining_moves(int n, char *board) { int moves = 0; for (int i = 0; i < n * n; i++) { if (board[i] == '_') { moves += 1; } } return moves; } void new_solution(int n, char *board, int s_top, char **solutions) { for (int i = 0; i < n * n; i++) { solutions[s_top][i] = board[i]; } } int validate_board(int n, char *board) { int queens = 0; for (int i = 0; i < n * n; i++) { if (board[i] == 'q') { queens += 1; } } if (queens == n) { return 1; } else { return 0; } } void new_board(int n, int top, char *board, char **stack) { for (int i = 0; i < n * n; i++) { board[i] = stack[top][i]; } } void validate_stack_size(int n, int top, int *max, char **stack) { //realloc stack if it needs to grow if (top >= *max) { *max = *max + floor(*max + n); stack = realloc(stack, (*max) * sizeof(char *)); for (int i = top; i < (*max); i++) { stack[i] = malloc((n * n) * sizeof(char)); } } } void backtrack(int n, int index, char *board, int *top, char **stack) { //if only one move is left, backtracking is not needed if (remaining_moves(n, board) > 1) { //set the place holder for the current move board[index] = '*'; //copy the backtracking array to stack for (int i = 0; i < n * n; i++) { stack[*top][i] = board[i]; } //move to the next item in the stack * top += 1; } } int main() { system("clear"); system("reset"); //The size of the board, make this an argument int n = 10; int s_max = n; //Max solutions int s_top = 0; //Current solution int sols = 0; //Total number of solutions found // Create a 2 D array that will hold N * N board arrays char **istack = (char **)malloc(n * n * sizeof(char *)); for (int i = 0; i < n * n; i++) { istack[i] = (char *)malloc((n * n) * sizeof(char)); } //Create a 2 D array that will hold n solutions. char **solutions = (char **)malloc(n * sizeof(char *)); for (int i = 0; i < n; i++) { solutions[i] = (char *)malloc((n * n) * sizeof(char)); } //initialize istack with every possible first move #pragma omp parallel for for (int o = 0; o < n * n; o++) { for (int i = 0; i < n * n; i++) { if (i < o) { istack[o][i] = '*'; } else { istack[o][i] = '_'; } } } #pragma omp parallel shared(n, s_top, s_max, sols, solutions) { #pragma omp for for (int init = 0; init < n * n; init++) { #pragma omp task { //Thread specific variables int tid = omp_get_thread_num(); int top = 0; int max = n * n; int index = 0; int c = 0; int matches = 0; char match_found = 'f'; char successful_move = 'f'; char *board = (char *)malloc((n * n) * sizeof(char)); new_board(n, init, board, istack); //Create a stack to manage backtracking // This is a 2 D array that holds boards // Top is the latst board to be added // Max is the maximum number of boards that can fit in the // memory allocation char **stack = (char **)malloc(n * n * sizeof(char *)); for (int i = 0; i < n * n; i++) { stack[i] = (char *)malloc((n * n) * sizeof(char)); } while (top >= 0) { if (remaining_moves(n, board) > 0) { index = 0; successful_move = 'f'; while (index < n * n && successful_move == 'f') { if (board[index] == '_') { validate_stack_size(n, top, &max, stack); backtrack(n, index, board, &top, stack); place_piece(n, index, board); successful_move = 't'; } else { index += 1; } } } else { #pragma omp critical { if (validate_board(n, board) == 1) { if (s_top == 0) { for (int i = 0; i < n * n; i++) { solutions[s_top][i] = board[i]; } s_top += 1; sols += 1; } else { match_found = 'f'; for (int s = 0; s < s_top; s++) { if (match_found == 'f') { matches = 0; for (int c = 0; c < n * n; c++) { if (solutions[s][c] == board[c]) { matches += 1; } } if (matches == n * n) { match_found = 't'; } } } if (match_found == 'f') { if (s_top >= s_max) { s_max = s_max + floor(s_max / 2); solutions = realloc(solutions, (s_max) * sizeof(char *)); for (int i = s_top; i < (s_max); i++) { solutions[i] = malloc((n * n) * sizeof(char)); } } for (int i = 0; i < n * n; i++) { solutions[s_top][i] = board[i]; } s_top += 1; sols += validate_board(n, board); } } } } top -= 1; if (top >= 0) { new_board(n, top, board, stack); } } } free(stack); free(board); } } } printf("Solutions: %d\n\n", sols); free(istack); free(solutions); }
pi03.c
#include <omp.h> #include <stdio.h> static long num_steps = 100000; double step; #define NUM_THREADS 8 void main () { int i,id; double x, sum, pi=0.0; step = 1.0/(double) num_steps; omp_set_num_threads(NUM_THREADS); #pragma omp parallel private (x,i,sum) { id = omp_get_thread_num(); for (i=id,sum=0.0;i< num_steps;i=i+NUM_THREADS){ x = (i+0.5)*step; sum += 4.0/(1.0+x*x); } #pragma omp critical pi += sum*step; #pragma omp barrier #pragma omp master printf("Pi = %lf\n",pi); } printf("Pi = %lf\n",pi); }
#include <omp.h> #include <stdio.h> static long num_steps = 100000; double step; #define NUM_THREADS 8 void main() { int i, id; double x, sum, pi = 0.0; step = 1.0 / (double)num_steps; omp_set_num_threads(NUM_THREADS); id = omp_get_thread_num(); for (i = id, sum = 0.0; i < num_steps; i = i + NUM_THREADS) { x = (i + 0.5) * step; sum += 4.0 / (1.0 + x * x); } pi += sum * step; printf("Pi = %lf\n", pi); printf("Pi = %lf\n", pi); }
#include <omp.h> #include <stdio.h> static long num_steps = 100000; double step; #define NUM_THREADS 8 void main() { int i, id; double x, sum, pi = 0.0; step = 1.0 / (double)num_steps; omp_set_num_threads(NUM_THREADS); #pragma omp parallel private (x,i,sum) { id = omp_get_thread_num(); for (i = id, sum = 0.0; i < num_steps; i = i + NUM_THREADS) { x = (i + 0.5) * step; sum += 4.0 / (1.0 + x * x); } #pragma omp critical pi += sum * step; #pragma omp barrier #pragma omp master printf("Pi = %lf\n", pi); } printf("Pi = %lf\n", pi); }
debug_test_system.h
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2013, Knut Reinert, FU Berlin // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: Manuel Holtgrewe <[email protected]> // ========================================================================== // The SeqAn testing infrastructure. Based on ideas from the OpenMS // "ClassTest.h". // ========================================================================== // TODO(holtgrew): This could use some cleanup. // SEQAN_NO_GENERATED_FORWARDS #ifndef SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_ #define SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_ #include <iostream> // stdout, stderr #include <iomanip> #include <cstring> // strrpos #include <cstdlib> // exit() #include <cstdio> #include <cstdarg> // va_start, va_list, va_end #include <set> #include <vector> #include <string> #ifdef PLATFORM_WINDOWS #include <Windows.h> // DeleteFile() #else // #ifdef PLATFORM_WINDOWS #include <unistd.h> // unlink() #include <sys/stat.h> // mkdir() #include <dirent.h> // DIR #if SEQAN_HAS_EXECINFO #include <execinfo.h> // backtrace(), backtrace_symbols() #endif // #if SEQAN_HAS_EXECINFO #include <cxxabi.h> // __cxa_demangle() #include <signal.h> #endif // #ifdef PLATFORM_WINDOWS /** .Macro.SEQAN_FAIL ..cat:Assertions ..summary:Force abortion of program, regardless of debugging settings. ..signature:SEQAN_FAIL(msg[, args]) ..param.msg:A format string. ..param.args:An optional list of arguments. ..remarks:Use this if something really unexpected happens inside your functions and there is no way to report this through the API. A good example would be logic errors, e.g. invalid values. ..example.text:In the following example, the $SEQAN_FAIL$ is there if a possible value is added to $MyEnum$ but the function $foo$ is not updated accordingly. ..example.code: enum MyEnum { VALUE_ONE, VALUE_TWO }; bool foo(MyEnum x) { switch (x) { case VALUE_ONE: // do something return true; case VALUE_TWO: // do something return true; } SEQAN_FAIL("Logic error. Should never reach here. x == %d.", x); return false; } ..include:seqan/basic.h ..see:Macro.SEQAN_CHECK */ #define SEQAN_FAIL(...) \ do { \ ::seqan::ClassTest::forceFail(__FILE__, __LINE__, \ __VA_ARGS__); \ ::seqan::ClassTest::fail(); \ } while (false) /** .Macro.SEQAN_CHECK ..cat:Assertions ..summary:Force abortion of program if a condition is not met, regardless of debugging settings. ..signature:SEQAN_CHECK(condition, msg[, args]) ..param.msg:A format string. ..param.args:An optional list of arguments. ..remarks:Use this if something really unexpected happens inside your functions and there is no way to report this through the API. A good example would be logic errors, e.g. invalid values. ..example.text:In the following example, the $SEQAN_CHECK$ stops program execution if a value is added to $MyEnum$ but the function $foo$ is not updated accordingly. ..example.code: enum MyEnum { VALUE_ONE, VALUE_TWO }; bool foo(MyEnum x) { SEQAN_CHECK((x == VALUE_ONE || x == VALUE_TWO), "Invalid value for x == %d.", x); switch (x) { case VALUE_ONE: // do something return true; case VALUE_TWO: // do something return true; } return false; // Should never reach here, checked above with SEQAN_CHECK. } ..include:seqan/basic.h ..see:Macro.SEQAN_FAIL */ #define SEQAN_CHECK(_arg1, ...) \ do { \ if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \ (_arg1), # _arg1, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // SeqAn's has three global debug/testing levels: testing, debug and // release. Depending on the level, the SEQAN_ASSERT_* and // SEQAN_CHECKPOINT macros will be enabled. // // Note that this is independent of the <cassert> assertions and // NDEBUG being defined. // // The levels are enabled by the values of the macros // SEQAN_ENABLE_TESTING and SEQAN_ENABLE_DEBUG. By setting a macro to // 0, one disables the level and by setting the macro to 1, one // enables a level. Enabling testing also enables debug, overriding a // value of 0 for SEQAN_ENABLE_DEBUG. // // If the level is release (both the macros for debug and testing are // 0), the assertions will be disabled. If the level is debug then // the assertions will be enabled. If the level is testing then the // checkpoint macros will also be enabled. // // The default is to enable debugging but disable testing. // // You can print the current level using the function seqan::printDebugLevel(). /** .Macro.SEQAN_ENABLE_TESTING ..cat:Testing & Debugging ..summary:Indicates whether testing is enabled. ..signature:SEQAN_ENABLE_DEBUG ..remarks:When enabled (set to 1), testing is enabled. This means the macros for the tests (@Macro.SEQAN_BEGIN_TESTSUITE@, @Macro.SEQAN_DEFINE_TEST@, @Macro.SEQAN_CALL_TEST@, and @Macro.SEQAN_END_TESTSUITE@) will be enabled. This makes failing assertions raise exceptions instead of call $abort()$ and enables checkpoints. ..remarks:By default, this is set to 0. ..remarks:If @Macro.SEQAN_ENABLE_CHECKPOINTS@ is not defined before including $<seqan/basic.h>$, then @Macro.SEQAN_ENABLE_CHECKPOINTS@ will be set to the value of @Macro.SEQAN_ENABLE_TESTING@ (after the default initialization to 0). ..remarks:If you want to change this value, you have to define this value before including any SeqAn header. ..remarks:If set to 1 then @Macro.SEQAN_ENABLE_TESTING@ is force-set to 0 as well. ..see:Macro.SEQAN_ENABLE_DEBUG ..see:Macro.SEQAN_ENABLE_CHECKPOINTS */ // Set default for SEQAN_ENABLE_TESTING. #ifndef SEQAN_ENABLE_TESTING #define SEQAN_ENABLE_TESTING 0 #endif // #ifndef SEQAN_ENABLE_TESTING /** .Macro.SEQAN_ENABLE_DEBUG ..cat:Testing & Debugging ..summary:Indicates whether debugging is enabled. ..signature:SEQAN_ENABLE_DEBUG ..remarks:When enabled (set to 1), debugging is enabled. This means the assertion macros are expanded to actual code and not to nothing. ..remarks:By default, this is set to 0 if $NDEBUG$ is defined and to 1 if $NDEBUG$ is not defined. ..remarks:If you want to change this value, you have to define this value before including any SeqAn header. ..remarks:Force-enabled if @Macro.SEQAN_ENABLE_TESTING@ is set to 1. ..see:Macro.SEQAN_ENABLE_TESTING ..see:Macro.SEQAN_ENABLE_CHECKPOINTS */ // Set default for SEQAN_ENABLE_DEBUG. #ifndef SEQAN_ENABLE_DEBUG #ifdef NDEBUG #define SEQAN_ENABLE_DEBUG 0 #else // #ifdef NDEBUG #define SEQAN_ENABLE_DEBUG 1 #endif // #ifdef NDEBUG #endif // #ifndef SEQAN_ENABLE_DEBUG // Force-enable debugging if testing is enabled. #if SEQAN_ENABLE_TESTING #undef SEQAN_ENABLE_DEBUG #define SEQAN_ENABLE_DEBUG 1 #endif // #if SEQAN_ENABLE_TESTING /** .Macro.SEQAN_ENABLE_CHECKPOINTS ..cat:Testing & Debugging ..summary:Indicates whether checkpoints are enabled. ..signature:SEQAN_ENABLE_CHECKPOINTS ..remarks:When enabled (set to 1), checkpoints are enabled. This means the $SEQAN_CHECKPOINT$ macros are expanded to actual code and not to nothing. ..remarks:By default, this is set to $SEQAN_ENABLE_TESTING$. ..remarks:Checkpoints can come at large increases of running time in your tests. Disable them when your test run too slow. ..remarks:If you want to change this value, you have to define this value before including any SeqAn header. ..example.text:Disable checkpoints in a program. ..example.code: // Disable SeqAn checkpoints in this program. #define SEQAN_ENABLE_CHECKPOINTS 0 // Any SeqAn headers or headers including SeqAn headers have to come AFTER the // definition of SEQAN_ENABLE_CHECKPOINT above. #include <seqan/base.h> int main(int argc, char const ** argv) { // Any call to SeqAn functions will NOT log any checkpoints. return 0; } ..see:Macro.SEQAN_ENABLE_DEBUG ..see:Macro.SEQAN_ENABLE_TESTING */ // Allow disabling checkpoints independent of testing. #ifndef SEQAN_ENABLE_CHECKPOINTS #define SEQAN_ENABLE_CHECKPOINTS 0 // SEQAN_ENABLE_TESTING #endif // #ifndef SEQAN_ENABLE_CHECKPOINTS /** .Macro.SEQAN_TYPEDEF_FOR_DEBUG ..cat:Testing & Debugging ..summary: When using typedefs that are only used in debug mode then they have to be marked with macro. ..signature:SEQAN_TYPEDEF_FOR_DEBUG ..example.code: typedef int TInt SEQAN_TYPEDEF_FOR_DEBUG; */ #if !SEQAN_ENABLE_DEBUG # if defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7))) # define SEQAN_TYPEDEF_FOR_DEBUG __attribute__((unused)) # else # define SEQAN_TYPEDEF_FOR_DEBUG # endif #else # define SEQAN_TYPEDEF_FOR_DEBUG #endif // TODO(holtgrew): This one is for profiling and in tests. #if defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7))) # define SEQAN_UNUSED_TYPEDEF __attribute__((unused)) #else # define SEQAN_UNUSED_TYPEDEF #endif namespace seqan { // SEQAN_CXX_FLAGS_ contains the compiler flags, SEQAN_CXX_FLAGS is a string // literal with this value. #if !defined(SEQAN_CXX_FLAGS_) #define SEQAN_CXX_FLAGS_ SEQAN_CXX_FLAGS_NOT_SET #endif // !defined(SEQAN_CXX_FLAGS__) #define SEQAN_MKSTRING_(str) # str #define SEQAN_MKSTRING(str) SEQAN_MKSTRING_(str) #define SEQAN_CXX_FLAGS SEQAN_MKSTRING(SEQAN_CXX_FLAGS_) //#undef SEQAN_MKSTRING //#undef SEQAN_MKSTRING_ /** .Function.printDebugLevel ..cat:Testing & Debugging ..summary:Print the current SeqAn debug level and the compiler flags to the given stream. ..signature:printDebugLevel(stream) ..param.stream:The stream to print to, e.g. $std::cout$. ..include:seqan/basic.h */ template <typename TStream> void printDebugLevel(TStream & stream) { stream << "SEQAN_ENABLE_DEBUG == " << SEQAN_ENABLE_DEBUG << std::endl; stream << "SEQAN_ENABLE_TESTING == " << SEQAN_ENABLE_TESTING << std::endl; stream << "SEQAN_ENABLE_CHECKPOINTS == " << SEQAN_ENABLE_CHECKPOINTS << std::endl; stream << "SEQAN_CXX_FLAGS == \"" << SEQAN_CXX_FLAGS << "\"" << std::endl; } #if defined(PLATFORM_WINDOWS) || !SEQAN_HAS_EXECINFO template <typename TSize> void printStackTrace(TSize /*maxFrames*/) {} #else // print a demangled stack backtrace of the caller function template <typename TSize> void printStackTrace(TSize maxFrames) { void * addrlist[256]; char temp[4096]; char addr[20]; char offset[20]; size_t size; int status; char * symname; char * demangled; std::cerr << std::endl << "stack trace:" << std::endl; int addrlist_len = backtrace(addrlist, maxFrames); char ** symbollist = backtrace_symbols(addrlist, addrlist_len); for (int i = 1; i < addrlist_len; ++i) { offset[0] = 0; addr[0] = 0; demangled = NULL; // LINUX FORMAT: // ./sam2svg [0x473b8c] // /lib/libc.so.6 [0x7f40d2526f60] // ./sam2svg(_Z2f3v+0x10) [0x47200c] // ./sam2svg(_Z2f2v+0xd) [0x472021] // ./sam2svg(main+0x1367) [0x4735fc] // /lib/libc.so.6(__libc_start_main+0xe6) [0x7f40d25131a6] // if (3 == sscanf(symbollist[i], "%*[^(](%4095[^+]+%[^)]) %s", temp, offset, addr)) { symname = temp; if (NULL != (demangled = abi::__cxa_demangle(temp, NULL, &size, &status))) { symname = demangled; } } // MAC OS X FORMAT: // 1 sam2svg 0x0000000100003a39 _ZN5seqanL28signalHandlerPrintStackTraceEi + 21 // 2 libSystem.B.dylib 0x00007fff87a6d67a _sigtramp + 26 // 3 libSystem.B.dylib 0x00007fff87a76df7 tiny_free_do_recirc_to_depot + 980 // 4 sam2svg 0x00000001000021b9 _Z2f2v + 9 // 5 sam2svg 0x00000001000034b1 main + 4546 // 6 sam2svg 0x0000000100002190 start + 52 else if (3 == sscanf(symbollist[i], "%*d %*s %s %s %*s %s", addr, temp, offset)) { symname = temp; if (NULL != (demangled = abi::__cxa_demangle(temp, NULL, &size, &status))) { symname = demangled; } } // LINUX FORMAT: // ./sam2svg [0x473b8c] // /lib/libc.so.6 [0x7f40d2526f60] else if (2 == sscanf(symbollist[i], "%s %s", temp, addr)) { symname = temp; } // DEFAULT: else { symname = symbollist[i]; } std::cerr << std::setw(3) << i - 1; std::cerr << std::setw(20) << addr; std::cerr << " " << symname; if (offset[0] != 0) std::cerr << " + " << offset; std::cerr << std::endl; free(demangled); } std::cerr << std::endl; // Only the array must be freed according to man page, not the contents. free(symbollist); } static void signalHandlerPrintStackTrace(int signum) { std::cerr << std::endl; printStackTrace(20); signal(signum, SIG_DFL); kill(getpid(), signum); } inline int _deploySignalHandlers() { signal(SIGSEGV, signalHandlerPrintStackTrace); // segfault signal(SIGFPE, signalHandlerPrintStackTrace); // divide by zero // ... return 0; } #if SEQAN_ENABLE_DEBUG // automatically deploy signal handlers that output the stack trace on a trap (in debug mode) template <typename T> struct SignalHandlersDummy_ { static const int i; }; template <typename T> const int SignalHandlersDummy_<T>::i = _deploySignalHandlers(); namespace { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-variable" #endif // ifdef __clang__ volatile int signalHandlersDummy_ = SignalHandlersDummy_<void>::i; #ifdef __clang__ #pragma clang diagnostic pop #endif // ifdef __clang__ } #endif // #if SEQAN_ENABLE_DEBUG #endif // defined(PLATFORM_WINDOWS) || !SEQAN_HAS_EXECINFO // Namespace for the testing infrastructure. // // This namespace contains the variables and functions that are used // in the macros below to perform the tests. namespace ClassTest { // Raised when an assertion fails in test mode. struct AssertionFailedException {}; // Container for static global data for the tests. struct StaticData { // Number of tests that were run. static int & testCount() { static int result = 0; return result; } // Number of errors that occurred. static int & errorCount() { static int result = 0; return result; } // Number of skipped tests. static int & skippedCount() { static int result = 0; return result; } // Flag whether there was an error in this test. static bool & thisTestOk() { static bool result = 0; return result; } // Flag whether this test was skipped. static bool & thisTestSkipped() { static bool result = 0; return result; } // Name of the current test. static const char * & currentTestName() { const char * defaultValue = ""; static const char * result = const_cast<char *>(defaultValue); return result; } // Base path to the binary. Extrapolated from __FILE__. static char * & basePath() { const char * defaultValue = "."; static char * result = const_cast<char *>(defaultValue); return result; } // Base path to the directory containing "core" and "extras." // Extrapolated from __FILE__. static char * & pathToRoot() { const char * defaultValue = "."; static char * result = const_cast<char *>(defaultValue); return result; } // Total number of checkpoints in header file. static int & totalCheckPointCount() { static int result = 0; return result; } // Total number of checkpoints found in binary files. static int & foundCheckPointCount() { static int result = 0; return result; } // Names of temporary files as returned by tempFileName. This // global state is used to remove any existing such files // after completing the testsuite. static::std::vector<std::string> & tempFileNames() { static::std::vector<std::string> filenames; return filenames; } }; // Open a temporary file, unlink it, return posix handle. Note: This has not been tested yet. // TODO(holtgrew): Not used yet and Windows code does not work. /* inline int openTempFile() { #ifdef PLATFORM_WINDOWS char * fileName = _tempnam(NULL, "SQN"); if (!fileName) { ::std::cerr << "Cannot create a unique temporary filename" << ::std::endl; exit(1); } int result = open(fileName, _O_RDWR | OPEN_TEMPORARY); free(fileName); return result; #else // A Unix... char filenameBuffer[100]; strcpy(filenameBuffer, "/tmp/SEQANXXXXXXXXXX"); int result = mkstemp(filenameBuffer); unlink(filenameBuffer); return result; #endif // ifdef PLATFORM_WINDOWS } */ // Return the path to a temporary file, in a static buffer in this // function. This is not thread safe! inline const char * tempFileName() { //IOREV _duplicate_ overlaps with some stuff in system/file_sync.h, should be moved to io-module static char fileNameBuffer[1000]; #ifdef PLATFORM_WINDOWS_VS static char filePathBuffer[1000]; // Gets the temp path env string (no guarantee it's a valid path). DWORD dwRetVal = 0; dwRetVal = GetTempPath(1000, // length of the buffer filePathBuffer); // buffer for path if (dwRetVal > 1000 || (dwRetVal == 0)) { std::cerr << "GetTempPath failed" << std::endl; exit(1); } UINT uRetVal = 0; uRetVal = GetTempFileName(filePathBuffer, // directory for tmp files TEXT("SEQAN."), // temp file name prefix 0, // create unique name fileNameBuffer); // buffer for name if (uRetVal == 0) { std::cerr << "GetTempFileName failed" << std::endl; exit(1); } DeleteFile(fileNameBuffer); CreateDirectoryA(fileNameBuffer, NULL); StaticData::tempFileNames().push_back(fileNameBuffer); strcat(fileNameBuffer, "\\test_file"); return fileNameBuffer; #else // ifdef PLATFORM_WINDOWS_VS strcpy(fileNameBuffer, "/tmp/SEQAN.XXXXXXXXXXXXXXXXXXXX"); #ifdef PLATFORM_WINDOWS_MINGW // There is no mkstemp in MinGW but it does not complain about tmpnam. tmpnam(fileNameBuffer); #else // ifdef PLATFORM_WINDOWS_MINGW int _tmp = mkstemp(fileNameBuffer); (void) _tmp; unlink(fileNameBuffer); mkdir(fileNameBuffer, 0777); StaticData::tempFileNames().push_back(fileNameBuffer); strcat(fileNameBuffer, "/test_file"); #endif // #ifdef PLATFORM_WINDOWS_MINGW return fileNameBuffer; #endif // ifdef PLATFORM_WINDOWS_VS } // Initialize the testing infrastructure. // // Used through SEQAN_BEGIN_TESTSUITE(test_name) inline void beginTestSuite(const char * testSuiteName, const char * argv0) { // First things first: Print test suite name and current debug level. std::cout << "TEST SUITE " << testSuiteName << std::endl; printDebugLevel(std::cout); (void)testSuiteName; StaticData::testCount() = 0; StaticData::skippedCount() = 0; StaticData::errorCount() = 0; StaticData::totalCheckPointCount() = 0; StaticData::foundCheckPointCount() = 0; // Get path to argv0. const char * end = argv0; const char * ptr = std::min(strchr(argv0, '\\'), strchr(argv0, '/')); // On Windows, we can have both \ and /. for (; ptr != 0; ptr = std::min(strchr(ptr + 1, '\\'), strchr(ptr + 1, '/'))) end = ptr; int rpos = end - argv0; if (rpos <= 0) { StaticData::basePath() = new char[2]; strcpy(StaticData::basePath(), "."); } else { int len = rpos; StaticData::basePath() = new char[len]; strncpy(StaticData::basePath(), argv0, len); } // Get path to projects. const char * file = __FILE__; int pos = -1; for (size_t i = 0; i < strlen(file) - strlen("core"); ++i) { if (strncmp(file + i, "core", strlen("core")) == 0) { pos = i; } } for (; pos > 0 && *(file + pos - 1) != '/' && *(file + pos - 1) != '\\'; --pos) continue; if (pos == -1) { std::cerr << "Could not extrapolate path to repository from __FILE__ == \"" << __FILE__ << "\"" << std::endl; exit(1); } StaticData::pathToRoot() = new char[pos]; strncpy(StaticData::pathToRoot(), file, pos); StaticData::pathToRoot()[pos - 1] = '\0'; #ifdef PLATFORM_WINDOWS_VS // Set CRT reporting such that everything goes to stderr and there are // no popups causing timeouts. _set_error_mode(_OUT_TO_STDERR); _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR); _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR); _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR); #endif // PLATFORM_WINDOWS_VS } // Run test suite finalization. // // Used through SEQAN_END_TESTSUITE // // Prints a bottom banner with the error count and returns the // program's return code. inline int endTestSuite() { delete[] StaticData::basePath(); delete[] StaticData::pathToRoot(); std::cout << "**************************************" << std::endl; std::cout << " Total Check Points : " << StaticData::totalCheckPointCount() << std::endl; std::cout << " Found Check Points : " << StaticData::foundCheckPointCount() << std::endl; std::cout << " Lost Check Points : " << StaticData::totalCheckPointCount() - StaticData::foundCheckPointCount() << std::endl; std::cout << "--------------------------------------" << std::endl; std::cout << " Total Tests: " << StaticData::testCount() << std::endl; std::cout << " Skipped: " << StaticData::skippedCount() << std::endl; std::cout << " Errors: " << StaticData::errorCount() << std::endl; std::cout << "**************************************" << std::endl; // TODO(holtgrew): Re-enable that all check points have to be found for the test to return 1; /* if (StaticData::totalCheckPointCount() != StaticData::foundCheckPointCount()) return 1; */ // Delete all temporary files that still exist. for (unsigned i = 0; i < StaticData::tempFileNames().size(); ++i) { #ifdef PLATFORM_WINDOWS HANDLE hFind; WIN32_FIND_DATA data; std::string temp = StaticData::tempFileNames()[i].c_str() + std::string("\\*"); hFind = FindFirstFile(temp.c_str(), &data); if (hFind != INVALID_HANDLE_VALUE) { do { std::string tempp = StaticData::tempFileNames()[i].c_str() + std::string("\\") + data.cFileName; DeleteFile(tempp.c_str()); } while (FindNextFile(hFind, &data)); FindClose(hFind); } RemoveDirectory(StaticData::tempFileNames()[i].c_str()); #else // #ifdef PLATFORM_WINDOWS DIR * dpdf; struct dirent * epdf; dpdf = opendir(StaticData::tempFileNames()[i].c_str()); if (dpdf != NULL) { while ((epdf = readdir(dpdf)) != NULL) { std::string temp = StaticData::tempFileNames()[i].c_str() + std::string("/") + std::string(epdf->d_name); unlink(temp.c_str()); } } rmdir(StaticData::tempFileNames()[i].c_str()); #endif // #ifdef PLATFORM_WINDOWS } if (StaticData::errorCount() != 0) return 1; return 0; } // Run test initialization. inline void beginTest(const char * testName) { StaticData::currentTestName() = testName; StaticData::thisTestOk() = true; StaticData::thisTestSkipped() = false; StaticData::testCount() += 1; } // Run test finalization. inline void endTest() { if (StaticData::thisTestSkipped()) { std::cout << StaticData::currentTestName() << " SKIPPED" << std::endl; } else if (StaticData::thisTestOk()) { std::cout << StaticData::currentTestName() << " OK" << std::endl; } else { std::cerr << StaticData::currentTestName() << " FAILED" << std::endl; } } // Marks the current test as "skipped". inline void skipCurrentTest() { StaticData::thisTestSkipped() = true; StaticData::skippedCount() += 1; } // Called by the macro SEQAN_ASSERT_FAIL. inline void forceFail(const char * file, int line, const char * comment, ...) { StaticData::errorCount() += 1; std::cerr << file << ":" << line << " FAILED! "; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; } // Similar to forceFail above, but accepting a va_list parameter. inline void vforceFail(const char * file, int line, const char * comment, va_list argp) { StaticData::errorCount() += 1; std::cerr << file << ":" << line << " FAILED! "; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; } // Same as forceFail above, but with comment set to 0. inline void forceFail(const char * file, int line) { forceFail(file, line, 0); } // Called by the macro SEQAN_ASSERT_EQ. // // Tests that the given two value are equal. Returns true iff the // two values are equal. template <typename T1, typename T2> bool testEqual(char const * file, int line, T1 const & value1, char const * expression1, T2 const & value2, char const * expression2, char const * comment, ...) { if (!(value1 == value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " == " << expression2 << " was: " << value1 << " != " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testEqual above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 == value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " == " << expression2 << " was: " << value1 << " != " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testEqual above, but with comment set to 0. template <typename T1, typename T2> bool testEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testEqual(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_IN_DELTA. // // Tests that the given two value are equal. Returns true iff the // two values are equal. template <typename T1, typename T2, typename T3> bool testInDelta(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const T3 & value3, const char * expression3, const char * comment, ...) { if (!(value1 >= value2 - value3 && value1 <= value2 + value3)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " in [" << expression2 << " - " << expression3 << ", " << expression2 << " + " << expression3 << "] was: " << value1 << " not in [" << value2 - value3 << ", " << value2 + value3 << "]"; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testInDelta above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2, typename T3> bool vtestInDelta(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const T3 & value3, const char * expression3, const char * comment, va_list argp) { if (!(value1 >= value2 - value3 && value1 <= value2 + value3)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " in [" << expression2 << " - " << expression3 << ", " << expression2 << " + " << expression3 << "] was: " << value1 << " not in [" << value2 - value3 << ", " << value2 + value3 << "]"; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testInDelta above, but with comment set to 0. template <typename T1, typename T2, typename T3> bool testInDelta(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const T3 & value3, const char * expression3) { return testInDelta(file, line, value1, expression1, value2, expression2, value3, expression3, 0); } // Called by the macro SEQAN_ASSERT_NEQ. // // Tests that the given two value are not equal. Returns true iff // the two values are equal. template <typename T1, typename T2> bool testNotEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 != value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " != " << expression2 << " was: " << value1 << " == " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testNotEqual above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestNotEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 != value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " != " << expression2 << " was: " << value1 << " == " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testNotEqual above, but with comment set to 0. template <typename T1, typename T2> bool testNotEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testNotEqual(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_GEQ. // // Tests that the first value is greater than or equal to the // second one. Returns true iff the test yields true. template <typename T1, typename T2> bool testGeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 >= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " >= " << expression2 << " was: " << value1 << " < " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testGeq above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestGeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 >= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " >= " << expression2 << " was: " << value1 << " < " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testGeq above, but with comment set to 0. template <typename T1, typename T2> bool testGeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testGeq(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_GT. // // Tests that the first value is greater than the second one. // Returns true iff the test yields true. template <typename T1, typename T2> bool testGt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 > value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " > " << expression2 << " was: " << value1 << " <= " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testGt above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestGt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 > value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " > " << expression2 << " was: " << value1 << " <= " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testGt above, but with comment set to 0. template <typename T1, typename T2> bool testGt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testGt(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_LEQ. // // Tests that the first value is less than or equal to the second // one. Returns true iff the test yields true. template <typename T1, typename T2> bool testLeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 <= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " <= " << expression2 << " was: " << value1 << " > " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testLeq above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestLeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 <= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " <= " << expression2 << " was: " << value1 << " > " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testLeq above, but with comment set to 0. template <typename T1, typename T2> bool testLeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testLeq(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_LT. // // Tests that the first value is greater than the second one. // Returns true iff the test yields true. template <typename T1, typename T2> bool testLt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 < value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " < " << expression2 << " was: " << value1 << " >= " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testLt above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestLt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 < value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " < " << expression2 << " was: " << value1 << " >= " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testLt above, but comment is 0. template <typename T1, typename T2> bool testLt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testLt(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT. // // Test that the given argument evaluates to true. template <typename T> bool testTrue(const char * file, int line, const T & value_, const char * expression_, const char * comment, ...) { if (!(value_)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be true but was " << (value_); if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testTrue above, but accepts a va_list instead of variadic // parameters. template <typename T> bool vtestTrue(const char * file, int line, const T & value_, const char * expression_, const char * comment, va_list argp) { if (!(value_)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be true but was " << (value_); if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testTrue above, but comment will automatically be set to 0. template <typename T> bool testTrue(const char * file, int line, const T & value_, const char * expression_) { return testTrue(file, line, value_, expression_, 0); } // Called by the macro SEQAN_ASSERT. // // Test that the given argument evaluates to false. template <typename T> bool testFalse(const char * file, int line, const T & value_, const char * expression_, const char * comment, ...) { if (value_) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be false but was " << (value_); if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testFalse above, but accepts a va_list instead of variadic // parameters. template <typename T> bool vtestFalse(const char * file, int line, const T & value_, const char * expression_, const char * comment, va_list argp) { if (value_) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be false but was " << (value_); if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testFalse above, but comment will automatically be set to 0. template <typename T> bool testFalse(const char * file, int line, const T & value_, const char * expression_) { return testFalse(file, line, value_, expression_, 0); } // Represents a check point in a file. struct CheckPoint { // Path to the file. const char * file; // Line in the file. unsigned int line; // Less-than comparator for check points. bool operator<(const CheckPoint & other) const { int c = strcmp(file, other.file); if (c < 0) return true; if (c == 0 && line < other.line) return true; return false; } }; // Wrapper for a set of check points. // TODO(holtgrew): Simply store the set? struct CheckPointStore { static::std::set<CheckPoint> & data() { static::std::set<CheckPoint> result; return result; } }; // Puts the given check point into the CheckPointStore's data. inline bool registerCheckPoint(unsigned int line, const char * file) { const char * file_name = strrchr(file, '/'); const char * file_name_2 = strrchr(file, '\\'); if (file_name_2 > file_name) file_name = file_name_2; if (!file_name) file_name = file; else ++file_name; CheckPoint cp = {file_name, line}; #ifdef _OMP #pragma omp critical #endif // #ifdef _OMP CheckPointStore::data().insert(cp); return true; } // Test whether the given check point exists in the check point // store. inline void testCheckPoint(const char * file, unsigned int line) { StaticData::totalCheckPointCount() += 1; CheckPoint cp = {file, line}; if (CheckPointStore::data().find(cp) == CheckPointStore::data().end()) { std::cerr << file << ":" << line << " -- Check point lost." << std::endl; return; } StaticData::foundCheckPointCount() += 1; } // Verify the check points for the given file. inline void verifyCheckPoints(const char * file) { char const * file_name = strrchr(file, '/'); char const * file_name_2 = strrchr(file, '\\'); if (file_name_2 > file_name) file_name = file_name_2; if (!file_name) file_name = file; else ++file_name; int len = strlen(StaticData::pathToRoot()) + strlen("/") + strlen(file) + 1; char * absolutePath = new char[len]; absolutePath[0] = '\0'; strcat(absolutePath, StaticData::pathToRoot()); strcat(absolutePath, "/"); strcat(absolutePath, file); FILE * fl = ::std::fopen(absolutePath, "r"); delete[] absolutePath; if (!fl) { std::cerr << file << " -- verifyCheckPoints could not find this file." << std::endl; } unsigned int line_number = 1; char buf[1 << 16]; while (::std::fgets(buf, sizeof(buf), fl)) { if (::std::strstr(buf, "SEQAN_CHECKPOINT")) { testCheckPoint(file_name, line_number); } ++line_number; } ::std::fclose(fl); } #if SEQAN_ENABLE_TESTING // If in testing mode then raise an AssertionFailedException. inline void fail() { StaticData::thisTestOk() = false; printStackTrace(20); throw AssertionFailedException(); } #else // If not in testing mode then quit with an abort. inline void fail() { printStackTrace(20); abort(); } #endif // #if SEQAN_ENABLE_TESTING } // namespace ClassTest /** .Macro.SEQAN_DEFINE_TEST ..summary:Expand to test definition. ..cat:Testing & Debugging ..signature:SEQAN_DEFINE_TEST(test_name) ..param.test_name:The name of the test. ..remarks:This macro expands to the definition of a $void$ function with $SEQAN_TEST_ + test_name$ as its name. ..example.code: SEQAN_DEFINE_TEST(test_name) { SEQAN_ASSERT_LT(0, 3); } ..see:Macro.SEQAN_SKIP_TEST ..see:Macro.SEQAN_CALL_TEST ..see:Macro.SEQAN_BEGIN_TESTSUITE ..see:Macro.SEQAN_END_TESTSUITE */ // This macro expands to function header for one test. #define SEQAN_DEFINE_TEST(test_name) \ template <bool speed_up_dummy_to_prevent_compilation_of_unused_tests_> \ void SEQAN_TEST_ ## test_name() /** .Macro.SEQAN_BEGIN_TESTSUITE ..summary:Expand to a test suite beginning. ..cat:Testing & Debugging ..signature:SEQAN_BEGIN_TESTSUITE(name) ..param.name:The name of the test suite. ..remarks:This macro expands to a $main()$ function and some initialization code that sets up the test system. ..example.code: #include <seqan/basic.h> SEQAN_BEGIN_TESTSUITE(test_foo) { SEQAN_CALL_TEST(test_foo_my_test); } SEQAN_END_TESTSUITE ..see:Macro.SEQAN_SKIP_TEST ..see:Macro.SEQAN_DEFINE_TEST ..see:Macro.SEQAN_CALL_TEST ..see:Macro.SEQAN_END_TESTSUITE */ #if SEQAN_ENABLE_TESTING // This macro expands to startup code for a test file. #define SEQAN_BEGIN_TESTSUITE(suite_name) \ int main(int argc, char ** argv) { \ (void) argc; \ ::seqan::ClassTest::beginTestSuite(# suite_name, argv[0]); /** .Macro.SEQAN_END_TESTSUITE ..summary:Expand to a test suite ending. ..cat:Testing & Debugging ..signature:SEQAN_END_TESTSUITE ..remarks:This macro expands to finalization code for a test suite. ..example.code: #include <seqan/basic.h> SEQAN_BEGIN_TESTSUITE(test_foo) { SEQAN_CALL_TEST(test_foo_my_test); } SEQAN_END_TESTSUITE ..see:Macro.SEQAN_SKIP_TEST ..see:Macro.SEQAN_DEFINE_TEST ..see:Macro.SEQAN_CALL_TEST ..see:Macro.SEQAN_BEGIN_TESTSUITE */ // This macro expands to shutdown code for a test file. #define SEQAN_END_TESTSUITE \ return ::seqan::ClassTest::endTestSuite(); \ } /** .Macro.SEQAN_CALL_TEST ..summary:Expand to calling a test. ..cat:Testing & Debugging ..signature:SEQAN_CALL_TEST(test_name) ..param.test_name:The name of the test. ..remarks:This expects the test to be defined with @Macro.SEQAN_DEFINE_TEST@. This macro will expand to code that calls the code inside a try/catch block. Use this macro within a test suite, only. ..example.code: // Within a test suite. SEQAN_CALL_TEST(test_name); ..see:Macro.SEQAN_SKIP_TEST ..see:Macro.SEQAN_DEFINE_TEST ..see:Macro.SEQAN_BEGIN_TESTSUITE ..see:Macro.SEQAN_END_TESTSUITE */ // This macro expands to code to call a given test. #define SEQAN_CALL_TEST(test_name) \ do { \ ::seqan::ClassTest::beginTest(# test_name); \ try { \ SEQAN_TEST_ ## test_name<true>(); \ } catch (::seqan::ClassTest::AssertionFailedException e) { \ /* Swallow exception, go on with next test. */ \ (void) e; /* Get rid of unused variable warning. */ \ } \ ::seqan::ClassTest::endTest(); \ } while (false) /** .Macro.SEQAN_SKIP_TEST ..cat:Testing & Debugging ..summary:Force the test to return without failing and mark it as skipped. ..signature:SEQAN_SKIP_TEST ..example.code: SEQAN_DEFINE_TEST(test_skipped) { SEQAN_SKIP_TEST; } ..see:Macro.SEQAN_DEFINE_TEST ..see:Macro.SEQAN_CALL_TEST ..see:Macro.SEQAN_BEGIN_TESTSUITE ..see:Macro.SEQAN_END_TESTSUITE */ // This macro returns from the current function and logs a "skipped" // event for the current test. #define SEQAN_SKIP_TEST \ do { \ ::seqan::ClassTest::skipCurrentTest(); \ return; \ } while (false) #endif // #if SEQAN_ENABLE_TESTING // variadic macros are not supported by VS 2003 and before #if !defined(_MSC_VER) || (_MSC_VER >= 1400) #if SEQAN_ENABLE_DEBUG /** .Macro.SEQAN_ASSERT ..cat:Assertions ..summary:Test that the given expression can be coerced to $true$. ..signature:SEQAN_ASSERT(expression) ..signature:SEQAN_ASSERT_MSG(expression, message[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT(0); // will fail SEQAN_ASSERT(1); // will run through SEQAN_ASSERT_MSG(0, "message %d", 2); // Will fail with message. ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_NOT ..cat:Assertions ..summary:Test that the given expression can be coerced to $false$. ..signature:SEQAN_ASSERT(expression) ..signature:SEQAN_ASSERT_MSG(expression, message[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_NOT(0); // will run through SEQAN_ASSERT_NOT(1); // will fail SEQAN_ASSERT_NOT_MSG(0, "msg %s", "test"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_EQ ..cat:Assertions ..summary:Test that two given expressions are equal, as defined by the matching call to the $operator=(,)$. ..signature:SEQAN_ASSERT_EQ(expression1, expression2) ..signature:SEQAN_ASSERT_EQ_MSG(expression1, expression2, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_EQ(0, false); // will run through SEQAN_ASSERT_EQ(1, false); // will fail SEQAN_ASSERT_EQ(1, "foo"); // will not compile SEQAN_ASSERT_EQ_MSG(1, false, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_NEQ ..cat:Assertions ..summary:Test that two given expressions are not equal, as defined by the matching call to the $operator!=(,)$. ..signature:SEQAN_ASSERT_NEQ(expression) ..signature:SEQAN_ASSERT_NEQ_MSG(expression, message[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_NEQ(0, false); // will fail SEQAN_ASSERT_NEQ(1, false); // will run through SEQAN_ASSERT_NEQ(1, "foo"); // will not compile SEQAN_ASSERT_NEQ_MSG(1, false, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_LT ..cat:Assertions ..summary:Test that the two given expressions are in the less-than relation as defined by the matching call to operator<(,). ..signature:SEQAN_ASSERT_LT(expression1, expression2) ..signature:SEQAN_ASSERT_LT(expression1, expression2, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_LT(0, 1); // will run through SEQAN_ASSERT_LT(1, 1); // will not run through SEQAN_ASSERT_LT_MSG(1, 1, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_LEQ ..cat:Assertions ..summary:Test that the two given expressions are in the less-than-or-equal relation as defined by the matching call to operator<=(,). ..signature:SEQAN_ASSERT_LEQ(expression1, expression2) ..signature:SEQAN_ASSERT_LEQ_MSG(expression1, expression2, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_LEQ(1, 1); // will run through SEQAN_ASSERT_LEQ(1, 2); // will not run through SEQAN_ASSERT_LEQ_MSG(1, 2, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_GT ..cat:Assertions ..summary:Test that the two given expressions are in the greather-than relation as defined by the matching call to operator>(,). ..signature:SEQAN_ASSERT_GT(expression1, expression2) ..signature:SEQAN_ASSERT_GT_MSG(expression1, expression2, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_GT(2, 1); // will run through SEQAN_ASSERT_GT(1, 1); // will not run through SEQAN_ASSERT_GT_MSG(1, 1, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_GEQ ..cat:Assertions ..summary:Test that the two given expressions are in the greater-than-or-equal relation as defined by the matching call to operator>=(,). ..signature:SEQAN_ASSERT_GEQ(expression1, expression2) ..signature:SEQAN_ASSERT_GEQ_MSG(expression1, expression2, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_GEQ(1, 1); // will run through SEQAN_ASSERT_GEQ(0, 1); // will not run through SEQAN_ASSERT_GEQ_MSG(0, 1, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_IN_DELTA ..cat:Assertions ..summary:Test that the given expression can be coerced to $true$. ..signature:SEQAN_ASSERT_IN_DELTA(x, y, delta) ..signature:SEQAN_ASSERT_IN_DELTA_MSG(x, y, delta, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_IN_DELTA(0, 0, 0.1); // will run through SEQAN_ASSERT_IN_DELTA(1, -2, 1); // will fail SEQAN_ASSERT_IN_DELTA(1, "foo"); // will not compile SEQAN_ASSERT_IN_DELTA_MSG(1, 0, 0.1, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL */ // Force a test failure. // // Usage: SEQAN_ASSERT_FAIL("Failed at position %d", pos); #define SEQAN_ASSERT_FAIL(...) \ do { \ ::seqan::ClassTest::forceFail(__FILE__, __LINE__, \ __VA_ARGS__); \ ::seqan::ClassTest::fail(); \ } while (false) // Equality assertion without a comment. // // Usage: SEQAN_ASSERT_EQ(4, 4); #define SEQAN_ASSERT_EQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Equality assertion with a comment. // // Usage: SEQAN_ASSERT_EQ(4, 4); #define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // In-delta-environment assertion without a comment. // // Usage: SEQAN_ASSERT_IN_DELTA(4.1, 4, 0.1); #define SEQAN_ASSERT_IN_DELTA(_arg1, _arg2, _arg3) \ do { \ if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ (_arg3), # _arg3)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // In-delta-environment assertion witha comment. // // Usage: SEQAN_ASSERT_IN_DELTA_MSG(4.1, 4, 0.1, "3.9 <= 4.1 <= 4.1"); #define SEQAN_ASSERT_IN_DELTA_MSG(_arg1, _arg2, _arg3, ...) \ do { \ if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ (_arg3), # _arg3, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Inequality assertion without a comment. // // Usage: SEQAN_ASSERT_NEQ(4, 5); #define SEQAN_ASSERT_NEQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Inequality assertion with a comment. // // Usage: SEQAN_ASSERT_NEQ(4, 5); #define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than-or-equal assertion without a comment. #define SEQAN_ASSERT_LEQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than-or-equal assertion with a comment. #define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than assertion without a comment. #define SEQAN_ASSERT_LT(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than assertion with a comment. #define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than-or-equal assertion without a comment. #define SEQAN_ASSERT_GEQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than-or-equal assertion with a comment. #define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than assertion without a comment. #define SEQAN_ASSERT_GT(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than assertion with a comment. #define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // TODO(holtgrew): Rename to SEQAN_ASSERT once that name is free.; // Trueness assertion with a comment. // // Usage: SEQAN_ASSERT(false); #define SEQAN_ASSERT(_arg1) \ do { \ if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \ (_arg1), # _arg1)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // TODO(holtgrew): Rename to SEQAN_ASSERT once that name is free.; // Trueness assertion with a comment. #define SEQAN_ASSERT_MSG(_arg1, ...) \ do { \ if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \ (_arg1), # _arg1, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Falseness assertion without a comment. // // Usage: SEQAN_ASSERT_NOT(false); #define SEQAN_ASSERT_NOT(_arg1) \ do { \ if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \ (_arg1), # _arg1)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Falseness assertion with a comment. #define SEQAN_ASSERT_NOT_MSG(_arg1, ...) \ do { \ if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \ (_arg1), # _arg1, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) #else // #if SEQAN_ENABLE_DEBUG #define SEQAN_ASSERT_EQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_NEQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_LEQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_LT(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_GEQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_GT(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT(_arg1) do {} while (false) #define SEQAN_ASSERT_MSG(_arg1, ...) do {} while (false) #define SEQAN_ASSERT_NOT(_arg1) do {} while (false) #define SEQAN_ASSERT_NOT_MSG(_arg1, ...) do {} while (false) #define SEQAN_ASSERT_FAIL(...) do {} while (false) #endif // #if SEQAN_ENABLE_DEBUG #else // no variadic macros #if SEQAN_ENABLE_DEBUG inline void SEQAN_ASSERT_FAIL(const char * comment, ...) { va_list args; va_start(args, comment); ::seqan::ClassTest::vforceFail("", 0, comment, args); ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3) { if (!::seqan::ClassTest::testInDelta("", 0, _arg1, "", _arg2, "", _arg3, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA_MSG(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestInDelta("", 0, _arg1, "", _arg2, "", _arg3, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_EQ(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testEqual("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_EQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestEqual("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_NEQ(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testNotEqual("", _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_NEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestNotEqual("", _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_LEQ(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testLeq("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_LEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestLeq("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_LT(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testLt("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_LT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestLt("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_GEQ(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testGeq("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_GEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestGeq("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_GT(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testGt("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_GT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestGt("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1> void SEQAN_ASSERT(T1 const & _arg1) { if (!::seqan::ClassTest::testTrue("", 0, _arg1, "")) ::seqan::ClassTest::fail(); } template <typename T1> void SEQAN_ASSERT_MSG(T1 const & _arg1, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestTrue("", 0, _arg1, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1> void SEQAN_ASSERT_NOT(T1 const & _arg1) { if (!::seqan::ClassTest::testFalse("", 0, _arg1, "")) ::seqan::ClassTest::fail(); } template <typename T1> void SEQAN_ASSERT_NOT_MSG(T1 const & _arg1, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestFalse("", 0, _arg1, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } #else // #if SEQAN_ENABLE_DEBUG inline void SEQAN_ASSERT_FAIL(const char * comment, ...) {} template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3) {} template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA_MSG(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_EQ(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_EQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_NEQ(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_NEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_LEQ(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_LEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_LT(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_LT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_GEQ(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_GEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_GT(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_GT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1> void SEQAN_ASSERT(T1 const & _arg1) {} template <typename T1> void SEQAN_ASSERT_MSG(T1 const & _arg1, const char * comment, ...) {} template <typename T1> void SEQAN_ASSERT_NOT(T1 const & _arg1) {} template <typename T1> void SEQAN_ASSERT_NOT_MSG(T1 const & _arg1, const char * comment, ...) {} #endif // #if SEQAN_ENABLE_DEBUG #endif // no variadic macros // Returns a string (of type char*) with the path to the called binary. // // Use this to locate files relative to the test binary. #define SEQAN_PROGRAM_PATH \ ::seqan::ClassTest::StaticData::basePath() // TODO(holtgrew): Subject to change wiht restructuring. /** .Macro.SEQAN_PATH_TO_ROOT ..cat:Testing & Debugging ..summary:Return path to the checkout root directory (i.e. containing core/extras). ..returns:$char const *$, string with the path to the parent directory of the tests directory. ..signature:SEQAN_PATH_TO_ROOT() ..remarks:The pointed to string is initialized on program startup by the code generated by @Macro.SEQAN_BEGIN_TESTSUITE@. ..example.code: const char *p = SEQAN_PATH_TO_ROOT); char buffer[1000]; strcpy(buffer, p); strcat(buffer, "/tests/files/example.txt"); FILE *f = fopen(buffer, "w"); fprintf(f, "Test Data"); fclose(f); ..see:Macro.SEQAN_TEMP_FILENAME */ // Returns a const char * string with the path to the projects directory. #define SEQAN_PATH_TO_ROOT() \ ::seqan::ClassTest::StaticData::pathToRoot() // Returns the POSIX int file handle to an open file. // TODO(holtgrewe): Uncomment if openTempFile has been implemented. // #define SEQAN_OPEN_TEMP_FILE() (::seqan::ClassTest::openTempFile()) /** .Macro.SEQAN_TEMP_FILENAME ..cat:Testing & Debugging ..summary:Generates the name to a temporary file. ..returns:$char const *$, string with the path to a temporary file. ..signature:SEQAN_TEMP_FILENAME() ..remarks:The pointed to string is stored in a buffer and is overwritten by the next call to this macro. Copy it out if you need it. ..example.code: const char *p = SEQAN_TEMP_FILENAME(); buffer char tempFilename[1000]; strcpy(tempFilename, p); FILE *f = fopen(tempFilename, "w"); fprintf(f, "Test Data"); fclose(f); ..see:Macro.SEQAN_PATH_TO_ROOT */ // Returns a temporary filename. #define SEQAN_TEMP_FILENAME() (::seqan::ClassTest::tempFileName()) /** .Macro.SEQAN_VERIFY_CHECKPOINTS ..cat:Testing & Debugging ..summary:Verify check points for the given file name. ..signature:SEQAN_VERIFY_CHECKPOINTS(path) ..param.path:Path to the file to verify check points for. Relative to parent directory of tests. ..example.code: SEQAN_VERIFY_CHECKPOINTS("core/include/seqan/basic_alphabet.h"); ..see:Macro.SEQAN_CHECKPOINT .Macro.SEQAN_CHECKPOINT ..cat:Testing & Debugging ..summary:Generate a check point. ..signature:SEQAN_CHECKPOINT ..remarks:Whever the code executes the instructions generated by this macro, the check point for this line will be set in global testing state. Use @Macro.SEQAN_VERIFY_CHECKPOINTS@ to verify whether all checkpoints have been reached in a file up to this point. SEQAN_CHECKPOINT; ..see:Macro.SEQAN_VERIFY_CHECKPOINTS */ #if SEQAN_ENABLE_CHECKPOINTS // Create a check point at the point where the macro is placed. // TODO(holtgrew): Should be called SEQAN_CHECK_POINT to be consistent. #define SEQAN_CHECKPOINT \ ::seqan::ClassTest::registerCheckPoint(__LINE__, __FILE__); // Call the check point verification code for the given file. #define SEQAN_VERIFY_CHECKPOINTS(filename) \ ::seqan::ClassTest::verifyCheckPoints(filename) #else // #if SEQAN_ENABLE_CHECKPOINTS #define SEQAN_CHECKPOINT // If checkpoints are to be verified if testing is disabled then print // a warning. #define SEQAN_VERIFY_CHECKPOINTS(filename) \ do { \ fprintf(stderr, ("WARNING: Check point verification is " \ "disabled. Trying to verify %s from %s:%d.\n"), \ filename, __FILE__, __LINE__); \ } while (false) #endif // #if SEQAN_ENABLE_CHECKPOINTS #if !SEQAN_ENABLE_TESTING #define SEQAN_BEGIN_TESTSUITE(suite_name) \ int main(int argc, char ** argv) { \ (void) argc; \ (void) argv; \ fprintf(stderr, "Warning: SEQAN_ENABLE_TESTING is wrong and you used the macro SEQAN_BEGIN_TESTSUITE!\n"); #define SEQAN_END_TESTSUITE \ return 0; \ } #define SEQAN_CALL_TEST(test_name) do { SEQAN_TEST_ ## test_name(); } while (false) #define SEQAN_SKIP_TEST do {} while (false) #endif // #if !SEQAN_ENABLE_TESTING } // namespace seqan #endif // SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2013, Knut Reinert, FU Berlin // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: Manuel Holtgrewe <[email protected]> // ========================================================================== // The SeqAn testing infrastructure. Based on ideas from the OpenMS // "ClassTest.h". // ========================================================================== // TODO(holtgrew): This could use some cleanup. // SEQAN_NO_GENERATED_FORWARDS #ifndef SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_ #define SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_ #include <iostream> // stdout, stderr #include <iomanip> #include <cstring> // strrpos #include <cstdlib> // exit() #include <cstdio> #include <cstdarg> // va_start, va_list, va_end #include <set> #include <vector> #include <string> #ifdef PLATFORM_WINDOWS #include <Windows.h> // DeleteFile() #else // #ifdef PLATFORM_WINDOWS #include <unistd.h> // unlink() #include <sys/stat.h> // mkdir() #include <dirent.h> // DIR #if SEQAN_HAS_EXECINFO #include <execinfo.h> // backtrace(), backtrace_symbols() #endif // #if SEQAN_HAS_EXECINFO #include <cxxabi.h> // __cxa_demangle() #include <signal.h> #endif // #ifdef PLATFORM_WINDOWS /** .Macro.SEQAN_FAIL ..cat:Assertions ..summary:Force abortion of program, regardless of debugging settings. ..signature:SEQAN_FAIL(msg[, args]) ..param.msg:A format string. ..param.args:An optional list of arguments. ..remarks:Use this if something really unexpected happens inside your functions and there is no way to report this through the API. A good example would be logic errors, e.g. invalid values. ..example.text:In the following example, the $SEQAN_FAIL$ is there if a possible value is added to $MyEnum$ but the function $foo$ is not updated accordingly. ..example.code: enum MyEnum { VALUE_ONE, VALUE_TWO }; bool foo(MyEnum x) { switch (x) { case VALUE_ONE: // do something return true; case VALUE_TWO: // do something return true; } SEQAN_FAIL("Logic error. Should never reach here. x == %d.", x); return false; } ..include:seqan/basic.h ..see:Macro.SEQAN_CHECK */ #define SEQAN_FAIL(...) \ do { \ ::seqan::ClassTest::forceFail(__FILE__, __LINE__, \ __VA_ARGS__); \ ::seqan::ClassTest::fail(); \ } while (false) /** .Macro.SEQAN_CHECK ..cat:Assertions ..summary:Force abortion of program if a condition is not met, regardless of debugging settings. ..signature:SEQAN_CHECK(condition, msg[, args]) ..param.msg:A format string. ..param.args:An optional list of arguments. ..remarks:Use this if something really unexpected happens inside your functions and there is no way to report this through the API. A good example would be logic errors, e.g. invalid values. ..example.text:In the following example, the $SEQAN_CHECK$ stops program execution if a value is added to $MyEnum$ but the function $foo$ is not updated accordingly. ..example.code: enum MyEnum { VALUE_ONE, VALUE_TWO }; bool foo(MyEnum x) { SEQAN_CHECK((x == VALUE_ONE || x == VALUE_TWO), "Invalid value for x == %d.", x); switch (x) { case VALUE_ONE: // do something return true; case VALUE_TWO: // do something return true; } return false; // Should never reach here, checked above with SEQAN_CHECK. } ..include:seqan/basic.h ..see:Macro.SEQAN_FAIL */ #define SEQAN_CHECK(_arg1, ...) \ do { \ if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \ (_arg1), # _arg1, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // SeqAn's has three global debug/testing levels: testing, debug and // release. Depending on the level, the SEQAN_ASSERT_* and // SEQAN_CHECKPOINT macros will be enabled. // // Note that this is independent of the <cassert> assertions and // NDEBUG being defined. // // The levels are enabled by the values of the macros // SEQAN_ENABLE_TESTING and SEQAN_ENABLE_DEBUG. By setting a macro to // 0, one disables the level and by setting the macro to 1, one // enables a level. Enabling testing also enables debug, overriding a // value of 0 for SEQAN_ENABLE_DEBUG. // // If the level is release (both the macros for debug and testing are // 0), the assertions will be disabled. If the level is debug then // the assertions will be enabled. If the level is testing then the // checkpoint macros will also be enabled. // // The default is to enable debugging but disable testing. // // You can print the current level using the function seqan::printDebugLevel(). /** .Macro.SEQAN_ENABLE_TESTING ..cat:Testing & Debugging ..summary:Indicates whether testing is enabled. ..signature:SEQAN_ENABLE_DEBUG ..remarks:When enabled (set to 1), testing is enabled. This means the macros for the tests (@Macro.SEQAN_BEGIN_TESTSUITE@, @Macro.SEQAN_DEFINE_TEST@, @Macro.SEQAN_CALL_TEST@, and @Macro.SEQAN_END_TESTSUITE@) will be enabled. This makes failing assertions raise exceptions instead of call $abort()$ and enables checkpoints. ..remarks:By default, this is set to 0. ..remarks:If @Macro.SEQAN_ENABLE_CHECKPOINTS@ is not defined before including $<seqan/basic.h>$, then @Macro.SEQAN_ENABLE_CHECKPOINTS@ will be set to the value of @Macro.SEQAN_ENABLE_TESTING@ (after the default initialization to 0). ..remarks:If you want to change this value, you have to define this value before including any SeqAn header. ..remarks:If set to 1 then @Macro.SEQAN_ENABLE_TESTING@ is force-set to 0 as well. ..see:Macro.SEQAN_ENABLE_DEBUG ..see:Macro.SEQAN_ENABLE_CHECKPOINTS */ // Set default for SEQAN_ENABLE_TESTING. #ifndef SEQAN_ENABLE_TESTING #define SEQAN_ENABLE_TESTING 0 #endif // #ifndef SEQAN_ENABLE_TESTING /** .Macro.SEQAN_ENABLE_DEBUG ..cat:Testing & Debugging ..summary:Indicates whether debugging is enabled. ..signature:SEQAN_ENABLE_DEBUG ..remarks:When enabled (set to 1), debugging is enabled. This means the assertion macros are expanded to actual code and not to nothing. ..remarks:By default, this is set to 0 if $NDEBUG$ is defined and to 1 if $NDEBUG$ is not defined. ..remarks:If you want to change this value, you have to define this value before including any SeqAn header. ..remarks:Force-enabled if @Macro.SEQAN_ENABLE_TESTING@ is set to 1. ..see:Macro.SEQAN_ENABLE_TESTING ..see:Macro.SEQAN_ENABLE_CHECKPOINTS */ // Set default for SEQAN_ENABLE_DEBUG. #ifndef SEQAN_ENABLE_DEBUG #ifdef NDEBUG #define SEQAN_ENABLE_DEBUG 0 #else // #ifdef NDEBUG #define SEQAN_ENABLE_DEBUG 1 #endif // #ifdef NDEBUG #endif // #ifndef SEQAN_ENABLE_DEBUG // Force-enable debugging if testing is enabled. #if SEQAN_ENABLE_TESTING #undef SEQAN_ENABLE_DEBUG #define SEQAN_ENABLE_DEBUG 1 #endif // #if SEQAN_ENABLE_TESTING /** .Macro.SEQAN_ENABLE_CHECKPOINTS ..cat:Testing & Debugging ..summary:Indicates whether checkpoints are enabled. ..signature:SEQAN_ENABLE_CHECKPOINTS ..remarks:When enabled (set to 1), checkpoints are enabled. This means the $SEQAN_CHECKPOINT$ macros are expanded to actual code and not to nothing. ..remarks:By default, this is set to $SEQAN_ENABLE_TESTING$. ..remarks:Checkpoints can come at large increases of running time in your tests. Disable them when your test run too slow. ..remarks:If you want to change this value, you have to define this value before including any SeqAn header. ..example.text:Disable checkpoints in a program. ..example.code: // Disable SeqAn checkpoints in this program. #define SEQAN_ENABLE_CHECKPOINTS 0 // Any SeqAn headers or headers including SeqAn headers have to come AFTER the // definition of SEQAN_ENABLE_CHECKPOINT above. #include <seqan/base.h> int main(int argc, char const ** argv) { // Any call to SeqAn functions will NOT log any checkpoints. return 0; } ..see:Macro.SEQAN_ENABLE_DEBUG ..see:Macro.SEQAN_ENABLE_TESTING */ // Allow disabling checkpoints independent of testing. #ifndef SEQAN_ENABLE_CHECKPOINTS #define SEQAN_ENABLE_CHECKPOINTS 0 // SEQAN_ENABLE_TESTING #endif // #ifndef SEQAN_ENABLE_CHECKPOINTS /** .Macro.SEQAN_TYPEDEF_FOR_DEBUG ..cat:Testing & Debugging ..summary: When using typedefs that are only used in debug mode then they have to be marked with macro. ..signature:SEQAN_TYPEDEF_FOR_DEBUG ..example.code: typedef int TInt SEQAN_TYPEDEF_FOR_DEBUG; */ #if !SEQAN_ENABLE_DEBUG # if defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7))) # define SEQAN_TYPEDEF_FOR_DEBUG __attribute__((unused)) # else # define SEQAN_TYPEDEF_FOR_DEBUG # endif #else # define SEQAN_TYPEDEF_FOR_DEBUG #endif // TODO(holtgrew): This one is for profiling and in tests. #if defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7))) # define SEQAN_UNUSED_TYPEDEF __attribute__((unused)) #else # define SEQAN_UNUSED_TYPEDEF #endif namespace seqan { // SEQAN_CXX_FLAGS_ contains the compiler flags, SEQAN_CXX_FLAGS is a string // literal with this value. #if !defined(SEQAN_CXX_FLAGS_) #define SEQAN_CXX_FLAGS_ SEQAN_CXX_FLAGS_NOT_SET #endif // !defined(SEQAN_CXX_FLAGS__) #define SEQAN_MKSTRING_(str) # str #define SEQAN_MKSTRING(str) SEQAN_MKSTRING_(str) #define SEQAN_CXX_FLAGS SEQAN_MKSTRING(SEQAN_CXX_FLAGS_) //#undef SEQAN_MKSTRING //#undef SEQAN_MKSTRING_ /** .Function.printDebugLevel ..cat:Testing & Debugging ..summary:Print the current SeqAn debug level and the compiler flags to the given stream. ..signature:printDebugLevel(stream) ..param.stream:The stream to print to, e.g. $std::cout$. ..include:seqan/basic.h */ template <typename TStream> void printDebugLevel(TStream & stream) { stream << "SEQAN_ENABLE_DEBUG == " << SEQAN_ENABLE_DEBUG << std::endl; stream << "SEQAN_ENABLE_TESTING == " << SEQAN_ENABLE_TESTING << std::endl; stream << "SEQAN_ENABLE_CHECKPOINTS == " << SEQAN_ENABLE_CHECKPOINTS << std::endl; stream << "SEQAN_CXX_FLAGS == \"" << SEQAN_CXX_FLAGS << "\"" << std::endl; } #if defined(PLATFORM_WINDOWS) || !SEQAN_HAS_EXECINFO template <typename TSize> void printStackTrace(TSize /*maxFrames*/) {} #else // print a demangled stack backtrace of the caller function template <typename TSize> void printStackTrace(TSize maxFrames) { void * addrlist[256]; char temp[4096]; char addr[20]; char offset[20]; size_t size; int status; char * symname; char * demangled; std::cerr << std::endl << "stack trace:" << std::endl; int addrlist_len = backtrace(addrlist, maxFrames); char ** symbollist = backtrace_symbols(addrlist, addrlist_len); for (int i = 1; i < addrlist_len; ++i) { offset[0] = 0; addr[0] = 0; demangled = NULL; // LINUX FORMAT: // ./sam2svg [0x473b8c] // /lib/libc.so.6 [0x7f40d2526f60] // ./sam2svg(_Z2f3v+0x10) [0x47200c] // ./sam2svg(_Z2f2v+0xd) [0x472021] // ./sam2svg(main+0x1367) [0x4735fc] // /lib/libc.so.6(__libc_start_main+0xe6) [0x7f40d25131a6] // if (3 == sscanf(symbollist[i], "%*[^(](%4095[^+]+%[^)]) %s", temp, offset, addr)) { symname = temp; if (NULL != (demangled = abi::__cxa_demangle(temp, NULL, &size, &status))) { symname = demangled; } } // MAC OS X FORMAT: // 1 sam2svg 0x0000000100003a39 _ZN5seqanL28signalHandlerPrintStackTraceEi + 21 // 2 libSystem.B.dylib 0x00007fff87a6d67a _sigtramp + 26 // 3 libSystem.B.dylib 0x00007fff87a76df7 tiny_free_do_recirc_to_depot + 980 // 4 sam2svg 0x00000001000021b9 _Z2f2v + 9 // 5 sam2svg 0x00000001000034b1 main + 4546 // 6 sam2svg 0x0000000100002190 start + 52 else if (3 == sscanf(symbollist[i], "%*d %*s %s %s %*s %s", addr, temp, offset)) { symname = temp; if (NULL != (demangled = abi::__cxa_demangle(temp, NULL, &size, &status))) { symname = demangled; } } // LINUX FORMAT: // ./sam2svg [0x473b8c] // /lib/libc.so.6 [0x7f40d2526f60] else if (2 == sscanf(symbollist[i], "%s %s", temp, addr)) { symname = temp; } // DEFAULT: else { symname = symbollist[i]; } std::cerr << std::setw(3) << i - 1; std::cerr << std::setw(20) << addr; std::cerr << " " << symname; if (offset[0] != 0) std::cerr << " + " << offset; std::cerr << std::endl; free(demangled); } std::cerr << std::endl; // Only the array must be freed according to man page, not the contents. free(symbollist); } static void signalHandlerPrintStackTrace(int signum) { std::cerr << std::endl; printStackTrace(20); signal(signum, SIG_DFL); kill(getpid(), signum); } inline int _deploySignalHandlers() { signal(SIGSEGV, signalHandlerPrintStackTrace); // segfault signal(SIGFPE, signalHandlerPrintStackTrace); // divide by zero // ... return 0; } #if SEQAN_ENABLE_DEBUG // automatically deploy signal handlers that output the stack trace on a trap (in debug mode) template <typename T> struct SignalHandlersDummy_ { static const int i; }; template <typename T> const int SignalHandlersDummy_<T>::i = _deploySignalHandlers(); namespace { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-variable" #endif // ifdef __clang__ volatile int signalHandlersDummy_ = SignalHandlersDummy_<void>::i; #ifdef __clang__ #pragma clang diagnostic pop #endif // ifdef __clang__ } #endif // #if SEQAN_ENABLE_DEBUG #endif // defined(PLATFORM_WINDOWS) || !SEQAN_HAS_EXECINFO // Namespace for the testing infrastructure. // // This namespace contains the variables and functions that are used // in the macros below to perform the tests. namespace ClassTest { // Raised when an assertion fails in test mode. struct AssertionFailedException {}; // Container for static global data for the tests. struct StaticData { // Number of tests that were run. static int & testCount() { static int result = 0; return result; } // Number of errors that occurred. static int & errorCount() { static int result = 0; return result; } // Number of skipped tests. static int & skippedCount() { static int result = 0; return result; } // Flag whether there was an error in this test. static bool & thisTestOk() { static bool result = 0; return result; } // Flag whether this test was skipped. static bool & thisTestSkipped() { static bool result = 0; return result; } // Name of the current test. static const char * & currentTestName() { const char * defaultValue = ""; static const char * result = const_cast<char *>(defaultValue); return result; } // Base path to the binary. Extrapolated from __FILE__. static char * & basePath() { const char * defaultValue = "."; static char * result = const_cast<char *>(defaultValue); return result; } // Base path to the directory containing "core" and "extras." // Extrapolated from __FILE__. static char * & pathToRoot() { const char * defaultValue = "."; static char * result = const_cast<char *>(defaultValue); return result; } // Total number of checkpoints in header file. static int & totalCheckPointCount() { static int result = 0; return result; } // Total number of checkpoints found in binary files. static int & foundCheckPointCount() { static int result = 0; return result; } // Names of temporary files as returned by tempFileName. This // global state is used to remove any existing such files // after completing the testsuite. static::std::vector<std::string> & tempFileNames() { static::std::vector<std::string> filenames; return filenames; } }; // Open a temporary file, unlink it, return posix handle. Note: This has not been tested yet. // TODO(holtgrew): Not used yet and Windows code does not work. /* inline int openTempFile() { #ifdef PLATFORM_WINDOWS char * fileName = _tempnam(NULL, "SQN"); if (!fileName) { ::std::cerr << "Cannot create a unique temporary filename" << ::std::endl; exit(1); } int result = open(fileName, _O_RDWR | OPEN_TEMPORARY); free(fileName); return result; #else // A Unix... char filenameBuffer[100]; strcpy(filenameBuffer, "/tmp/SEQANXXXXXXXXXX"); int result = mkstemp(filenameBuffer); unlink(filenameBuffer); return result; #endif // ifdef PLATFORM_WINDOWS } */ // Return the path to a temporary file, in a static buffer in this // function. This is not thread safe! inline const char * tempFileName() { //IOREV _duplicate_ overlaps with some stuff in system/file_sync.h, should be moved to io-module static char fileNameBuffer[1000]; #ifdef PLATFORM_WINDOWS_VS static char filePathBuffer[1000]; // Gets the temp path env string (no guarantee it's a valid path). DWORD dwRetVal = 0; dwRetVal = GetTempPath(1000, // length of the buffer filePathBuffer); // buffer for path if (dwRetVal > 1000 || (dwRetVal == 0)) { std::cerr << "GetTempPath failed" << std::endl; exit(1); } UINT uRetVal = 0; uRetVal = GetTempFileName(filePathBuffer, // directory for tmp files TEXT("SEQAN."), // temp file name prefix 0, // create unique name fileNameBuffer); // buffer for name if (uRetVal == 0) { std::cerr << "GetTempFileName failed" << std::endl; exit(1); } DeleteFile(fileNameBuffer); CreateDirectoryA(fileNameBuffer, NULL); StaticData::tempFileNames().push_back(fileNameBuffer); strcat(fileNameBuffer, "\\test_file"); return fileNameBuffer; #else // ifdef PLATFORM_WINDOWS_VS strcpy(fileNameBuffer, "/tmp/SEQAN.XXXXXXXXXXXXXXXXXXXX"); #ifdef PLATFORM_WINDOWS_MINGW // There is no mkstemp in MinGW but it does not complain about tmpnam. tmpnam(fileNameBuffer); #else // ifdef PLATFORM_WINDOWS_MINGW int _tmp = mkstemp(fileNameBuffer); (void) _tmp; unlink(fileNameBuffer); mkdir(fileNameBuffer, 0777); StaticData::tempFileNames().push_back(fileNameBuffer); strcat(fileNameBuffer, "/test_file"); #endif // #ifdef PLATFORM_WINDOWS_MINGW return fileNameBuffer; #endif // ifdef PLATFORM_WINDOWS_VS } // Initialize the testing infrastructure. // // Used through SEQAN_BEGIN_TESTSUITE(test_name) inline void beginTestSuite(const char * testSuiteName, const char * argv0) { // First things first: Print test suite name and current debug level. std::cout << "TEST SUITE " << testSuiteName << std::endl; printDebugLevel(std::cout); (void)testSuiteName; StaticData::testCount() = 0; StaticData::skippedCount() = 0; StaticData::errorCount() = 0; StaticData::totalCheckPointCount() = 0; StaticData::foundCheckPointCount() = 0; // Get path to argv0. const char * end = argv0; const char * ptr = std::min(strchr(argv0, '\\'), strchr(argv0, '/')); // On Windows, we can have both \ and /. for (; ptr != 0; ptr = std::min(strchr(ptr + 1, '\\'), strchr(ptr + 1, '/'))) end = ptr; int rpos = end - argv0; if (rpos <= 0) { StaticData::basePath() = new char[2]; strcpy(StaticData::basePath(), "."); } else { int len = rpos; StaticData::basePath() = new char[len]; strncpy(StaticData::basePath(), argv0, len); } // Get path to projects. const char * file = __FILE__; int pos = -1; for (size_t i = 0; i < strlen(file) - strlen("core"); ++i) { if (strncmp(file + i, "core", strlen("core")) == 0) { pos = i; } } for (; pos > 0 && *(file + pos - 1) != '/' && *(file + pos - 1) != '\\'; --pos) continue; if (pos == -1) { std::cerr << "Could not extrapolate path to repository from __FILE__ == \"" << __FILE__ << "\"" << std::endl; exit(1); } StaticData::pathToRoot() = new char[pos]; strncpy(StaticData::pathToRoot(), file, pos); StaticData::pathToRoot()[pos - 1] = '\0'; #ifdef PLATFORM_WINDOWS_VS // Set CRT reporting such that everything goes to stderr and there are // no popups causing timeouts. _set_error_mode(_OUT_TO_STDERR); _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR); _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR); _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR); #endif // PLATFORM_WINDOWS_VS } // Run test suite finalization. // // Used through SEQAN_END_TESTSUITE // // Prints a bottom banner with the error count and returns the // program's return code. inline int endTestSuite() { delete[] StaticData::basePath(); delete[] StaticData::pathToRoot(); std::cout << "**************************************" << std::endl; std::cout << " Total Check Points : " << StaticData::totalCheckPointCount() << std::endl; std::cout << " Found Check Points : " << StaticData::foundCheckPointCount() << std::endl; std::cout << " Lost Check Points : " << StaticData::totalCheckPointCount() - StaticData::foundCheckPointCount() << std::endl; std::cout << "--------------------------------------" << std::endl; std::cout << " Total Tests: " << StaticData::testCount() << std::endl; std::cout << " Skipped: " << StaticData::skippedCount() << std::endl; std::cout << " Errors: " << StaticData::errorCount() << std::endl; std::cout << "**************************************" << std::endl; // TODO(holtgrew): Re-enable that all check points have to be found for the test to return 1; /* if (StaticData::totalCheckPointCount() != StaticData::foundCheckPointCount()) return 1; */ // Delete all temporary files that still exist. for (unsigned i = 0; i < StaticData::tempFileNames().size(); ++i) { #ifdef PLATFORM_WINDOWS HANDLE hFind; WIN32_FIND_DATA data; std::string temp = StaticData::tempFileNames()[i].c_str() + std::string("\\*"); hFind = FindFirstFile(temp.c_str(), &data); if (hFind != INVALID_HANDLE_VALUE) { do { std::string tempp = StaticData::tempFileNames()[i].c_str() + std::string("\\") + data.cFileName; DeleteFile(tempp.c_str()); } while (FindNextFile(hFind, &data)); FindClose(hFind); } RemoveDirectory(StaticData::tempFileNames()[i].c_str()); #else // #ifdef PLATFORM_WINDOWS DIR * dpdf; struct dirent * epdf; dpdf = opendir(StaticData::tempFileNames()[i].c_str()); if (dpdf != NULL) { while ((epdf = readdir(dpdf)) != NULL) { std::string temp = StaticData::tempFileNames()[i].c_str() + std::string("/") + std::string(epdf->d_name); unlink(temp.c_str()); } } rmdir(StaticData::tempFileNames()[i].c_str()); #endif // #ifdef PLATFORM_WINDOWS } if (StaticData::errorCount() != 0) return 1; return 0; } // Run test initialization. inline void beginTest(const char * testName) { StaticData::currentTestName() = testName; StaticData::thisTestOk() = true; StaticData::thisTestSkipped() = false; StaticData::testCount() += 1; } // Run test finalization. inline void endTest() { if (StaticData::thisTestSkipped()) { std::cout << StaticData::currentTestName() << " SKIPPED" << std::endl; } else if (StaticData::thisTestOk()) { std::cout << StaticData::currentTestName() << " OK" << std::endl; } else { std::cerr << StaticData::currentTestName() << " FAILED" << std::endl; } } // Marks the current test as "skipped". inline void skipCurrentTest() { StaticData::thisTestSkipped() = true; StaticData::skippedCount() += 1; } // Called by the macro SEQAN_ASSERT_FAIL. inline void forceFail(const char * file, int line, const char * comment, ...) { StaticData::errorCount() += 1; std::cerr << file << ":" << line << " FAILED! "; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; } // Similar to forceFail above, but accepting a va_list parameter. inline void vforceFail(const char * file, int line, const char * comment, va_list argp) { StaticData::errorCount() += 1; std::cerr << file << ":" << line << " FAILED! "; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; } // Same as forceFail above, but with comment set to 0. inline void forceFail(const char * file, int line) { forceFail(file, line, 0); } // Called by the macro SEQAN_ASSERT_EQ. // // Tests that the given two value are equal. Returns true iff the // two values are equal. template <typename T1, typename T2> bool testEqual(char const * file, int line, T1 const & value1, char const * expression1, T2 const & value2, char const * expression2, char const * comment, ...) { if (!(value1 == value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " == " << expression2 << " was: " << value1 << " != " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testEqual above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 == value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " == " << expression2 << " was: " << value1 << " != " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testEqual above, but with comment set to 0. template <typename T1, typename T2> bool testEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testEqual(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_IN_DELTA. // // Tests that the given two value are equal. Returns true iff the // two values are equal. template <typename T1, typename T2, typename T3> bool testInDelta(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const T3 & value3, const char * expression3, const char * comment, ...) { if (!(value1 >= value2 - value3 && value1 <= value2 + value3)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " in [" << expression2 << " - " << expression3 << ", " << expression2 << " + " << expression3 << "] was: " << value1 << " not in [" << value2 - value3 << ", " << value2 + value3 << "]"; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testInDelta above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2, typename T3> bool vtestInDelta(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const T3 & value3, const char * expression3, const char * comment, va_list argp) { if (!(value1 >= value2 - value3 && value1 <= value2 + value3)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " in [" << expression2 << " - " << expression3 << ", " << expression2 << " + " << expression3 << "] was: " << value1 << " not in [" << value2 - value3 << ", " << value2 + value3 << "]"; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testInDelta above, but with comment set to 0. template <typename T1, typename T2, typename T3> bool testInDelta(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const T3 & value3, const char * expression3) { return testInDelta(file, line, value1, expression1, value2, expression2, value3, expression3, 0); } // Called by the macro SEQAN_ASSERT_NEQ. // // Tests that the given two value are not equal. Returns true iff // the two values are equal. template <typename T1, typename T2> bool testNotEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 != value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " != " << expression2 << " was: " << value1 << " == " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testNotEqual above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestNotEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 != value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " != " << expression2 << " was: " << value1 << " == " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testNotEqual above, but with comment set to 0. template <typename T1, typename T2> bool testNotEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testNotEqual(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_GEQ. // // Tests that the first value is greater than or equal to the // second one. Returns true iff the test yields true. template <typename T1, typename T2> bool testGeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 >= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " >= " << expression2 << " was: " << value1 << " < " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testGeq above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestGeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 >= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " >= " << expression2 << " was: " << value1 << " < " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testGeq above, but with comment set to 0. template <typename T1, typename T2> bool testGeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testGeq(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_GT. // // Tests that the first value is greater than the second one. // Returns true iff the test yields true. template <typename T1, typename T2> bool testGt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 > value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " > " << expression2 << " was: " << value1 << " <= " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testGt above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestGt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 > value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " > " << expression2 << " was: " << value1 << " <= " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testGt above, but with comment set to 0. template <typename T1, typename T2> bool testGt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testGt(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_LEQ. // // Tests that the first value is less than or equal to the second // one. Returns true iff the test yields true. template <typename T1, typename T2> bool testLeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 <= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " <= " << expression2 << " was: " << value1 << " > " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testLeq above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestLeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 <= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " <= " << expression2 << " was: " << value1 << " > " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testLeq above, but with comment set to 0. template <typename T1, typename T2> bool testLeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testLeq(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_LT. // // Tests that the first value is greater than the second one. // Returns true iff the test yields true. template <typename T1, typename T2> bool testLt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 < value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " < " << expression2 << " was: " << value1 << " >= " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testLt above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestLt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 < value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " < " << expression2 << " was: " << value1 << " >= " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testLt above, but comment is 0. template <typename T1, typename T2> bool testLt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testLt(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT. // // Test that the given argument evaluates to true. template <typename T> bool testTrue(const char * file, int line, const T & value_, const char * expression_, const char * comment, ...) { if (!(value_)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be true but was " << (value_); if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testTrue above, but accepts a va_list instead of variadic // parameters. template <typename T> bool vtestTrue(const char * file, int line, const T & value_, const char * expression_, const char * comment, va_list argp) { if (!(value_)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be true but was " << (value_); if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testTrue above, but comment will automatically be set to 0. template <typename T> bool testTrue(const char * file, int line, const T & value_, const char * expression_) { return testTrue(file, line, value_, expression_, 0); } // Called by the macro SEQAN_ASSERT. // // Test that the given argument evaluates to false. template <typename T> bool testFalse(const char * file, int line, const T & value_, const char * expression_, const char * comment, ...) { if (value_) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be false but was " << (value_); if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testFalse above, but accepts a va_list instead of variadic // parameters. template <typename T> bool vtestFalse(const char * file, int line, const T & value_, const char * expression_, const char * comment, va_list argp) { if (value_) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be false but was " << (value_); if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testFalse above, but comment will automatically be set to 0. template <typename T> bool testFalse(const char * file, int line, const T & value_, const char * expression_) { return testFalse(file, line, value_, expression_, 0); } // Represents a check point in a file. struct CheckPoint { // Path to the file. const char * file; // Line in the file. unsigned int line; // Less-than comparator for check points. bool operator<(const CheckPoint & other) const { int c = strcmp(file, other.file); if (c < 0) return true; if (c == 0 && line < other.line) return true; return false; } }; // Wrapper for a set of check points. // TODO(holtgrew): Simply store the set? struct CheckPointStore { static::std::set<CheckPoint> & data() { static::std::set<CheckPoint> result; return result; } }; // Puts the given check point into the CheckPointStore's data. inline bool registerCheckPoint(unsigned int line, const char * file) { const char * file_name = strrchr(file, '/'); const char * file_name_2 = strrchr(file, '\\'); if (file_name_2 > file_name) file_name = file_name_2; if (!file_name) file_name = file; else ++file_name; CheckPoint cp = {file_name, line}; #ifdef _OMP #endif // #ifdef _OMP CheckPointStore::data().insert(cp); return true; } // Test whether the given check point exists in the check point // store. inline void testCheckPoint(const char * file, unsigned int line) { StaticData::totalCheckPointCount() += 1; CheckPoint cp = {file, line}; if (CheckPointStore::data().find(cp) == CheckPointStore::data().end()) { std::cerr << file << ":" << line << " -- Check point lost." << std::endl; return; } StaticData::foundCheckPointCount() += 1; } // Verify the check points for the given file. inline void verifyCheckPoints(const char * file) { char const * file_name = strrchr(file, '/'); char const * file_name_2 = strrchr(file, '\\'); if (file_name_2 > file_name) file_name = file_name_2; if (!file_name) file_name = file; else ++file_name; int len = strlen(StaticData::pathToRoot()) + strlen("/") + strlen(file) + 1; char * absolutePath = new char[len]; absolutePath[0] = '\0'; strcat(absolutePath, StaticData::pathToRoot()); strcat(absolutePath, "/"); strcat(absolutePath, file); FILE * fl = ::std::fopen(absolutePath, "r"); delete[] absolutePath; if (!fl) { std::cerr << file << " -- verifyCheckPoints could not find this file." << std::endl; } unsigned int line_number = 1; char buf[1 << 16]; while (::std::fgets(buf, sizeof(buf), fl)) { if (::std::strstr(buf, "SEQAN_CHECKPOINT")) { testCheckPoint(file_name, line_number); } ++line_number; } ::std::fclose(fl); } #if SEQAN_ENABLE_TESTING // If in testing mode then raise an AssertionFailedException. inline void fail() { StaticData::thisTestOk() = false; printStackTrace(20); throw AssertionFailedException(); } #else // If not in testing mode then quit with an abort. inline void fail() { printStackTrace(20); abort(); } #endif // #if SEQAN_ENABLE_TESTING } // namespace ClassTest /** .Macro.SEQAN_DEFINE_TEST ..summary:Expand to test definition. ..cat:Testing & Debugging ..signature:SEQAN_DEFINE_TEST(test_name) ..param.test_name:The name of the test. ..remarks:This macro expands to the definition of a $void$ function with $SEQAN_TEST_ + test_name$ as its name. ..example.code: SEQAN_DEFINE_TEST(test_name) { SEQAN_ASSERT_LT(0, 3); } ..see:Macro.SEQAN_SKIP_TEST ..see:Macro.SEQAN_CALL_TEST ..see:Macro.SEQAN_BEGIN_TESTSUITE ..see:Macro.SEQAN_END_TESTSUITE */ // This macro expands to function header for one test. #define SEQAN_DEFINE_TEST(test_name) \ template <bool speed_up_dummy_to_prevent_compilation_of_unused_tests_> \ void SEQAN_TEST_ ## test_name() /** .Macro.SEQAN_BEGIN_TESTSUITE ..summary:Expand to a test suite beginning. ..cat:Testing & Debugging ..signature:SEQAN_BEGIN_TESTSUITE(name) ..param.name:The name of the test suite. ..remarks:This macro expands to a $main()$ function and some initialization code that sets up the test system. ..example.code: #include <seqan/basic.h> SEQAN_BEGIN_TESTSUITE(test_foo) { SEQAN_CALL_TEST(test_foo_my_test); } SEQAN_END_TESTSUITE ..see:Macro.SEQAN_SKIP_TEST ..see:Macro.SEQAN_DEFINE_TEST ..see:Macro.SEQAN_CALL_TEST ..see:Macro.SEQAN_END_TESTSUITE */ #if SEQAN_ENABLE_TESTING // This macro expands to startup code for a test file. #define SEQAN_BEGIN_TESTSUITE(suite_name) \ int main(int argc, char ** argv) { \ (void) argc; \ ::seqan::ClassTest::beginTestSuite(# suite_name, argv[0]); /** .Macro.SEQAN_END_TESTSUITE ..summary:Expand to a test suite ending. ..cat:Testing & Debugging ..signature:SEQAN_END_TESTSUITE ..remarks:This macro expands to finalization code for a test suite. ..example.code: #include <seqan/basic.h> SEQAN_BEGIN_TESTSUITE(test_foo) { SEQAN_CALL_TEST(test_foo_my_test); } SEQAN_END_TESTSUITE ..see:Macro.SEQAN_SKIP_TEST ..see:Macro.SEQAN_DEFINE_TEST ..see:Macro.SEQAN_CALL_TEST ..see:Macro.SEQAN_BEGIN_TESTSUITE */ // This macro expands to shutdown code for a test file. #define SEQAN_END_TESTSUITE \ return ::seqan::ClassTest::endTestSuite(); \ } /** .Macro.SEQAN_CALL_TEST ..summary:Expand to calling a test. ..cat:Testing & Debugging ..signature:SEQAN_CALL_TEST(test_name) ..param.test_name:The name of the test. ..remarks:This expects the test to be defined with @Macro.SEQAN_DEFINE_TEST@. This macro will expand to code that calls the code inside a try/catch block. Use this macro within a test suite, only. ..example.code: // Within a test suite. SEQAN_CALL_TEST(test_name); ..see:Macro.SEQAN_SKIP_TEST ..see:Macro.SEQAN_DEFINE_TEST ..see:Macro.SEQAN_BEGIN_TESTSUITE ..see:Macro.SEQAN_END_TESTSUITE */ // This macro expands to code to call a given test. #define SEQAN_CALL_TEST(test_name) \ do { \ ::seqan::ClassTest::beginTest(# test_name); \ try { \ SEQAN_TEST_ ## test_name<true>(); \ } catch (::seqan::ClassTest::AssertionFailedException e) { \ /* Swallow exception, go on with next test. */ \ (void) e; /* Get rid of unused variable warning. */ \ } \ ::seqan::ClassTest::endTest(); \ } while (false) /** .Macro.SEQAN_SKIP_TEST ..cat:Testing & Debugging ..summary:Force the test to return without failing and mark it as skipped. ..signature:SEQAN_SKIP_TEST ..example.code: SEQAN_DEFINE_TEST(test_skipped) { SEQAN_SKIP_TEST; } ..see:Macro.SEQAN_DEFINE_TEST ..see:Macro.SEQAN_CALL_TEST ..see:Macro.SEQAN_BEGIN_TESTSUITE ..see:Macro.SEQAN_END_TESTSUITE */ // This macro returns from the current function and logs a "skipped" // event for the current test. #define SEQAN_SKIP_TEST \ do { \ ::seqan::ClassTest::skipCurrentTest(); \ return; \ } while (false) #endif // #if SEQAN_ENABLE_TESTING // variadic macros are not supported by VS 2003 and before #if !defined(_MSC_VER) || (_MSC_VER >= 1400) #if SEQAN_ENABLE_DEBUG /** .Macro.SEQAN_ASSERT ..cat:Assertions ..summary:Test that the given expression can be coerced to $true$. ..signature:SEQAN_ASSERT(expression) ..signature:SEQAN_ASSERT_MSG(expression, message[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT(0); // will fail SEQAN_ASSERT(1); // will run through SEQAN_ASSERT_MSG(0, "message %d", 2); // Will fail with message. ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_NOT ..cat:Assertions ..summary:Test that the given expression can be coerced to $false$. ..signature:SEQAN_ASSERT(expression) ..signature:SEQAN_ASSERT_MSG(expression, message[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_NOT(0); // will run through SEQAN_ASSERT_NOT(1); // will fail SEQAN_ASSERT_NOT_MSG(0, "msg %s", "test"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_EQ ..cat:Assertions ..summary:Test that two given expressions are equal, as defined by the matching call to the $operator=(,)$. ..signature:SEQAN_ASSERT_EQ(expression1, expression2) ..signature:SEQAN_ASSERT_EQ_MSG(expression1, expression2, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_EQ(0, false); // will run through SEQAN_ASSERT_EQ(1, false); // will fail SEQAN_ASSERT_EQ(1, "foo"); // will not compile SEQAN_ASSERT_EQ_MSG(1, false, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_NEQ ..cat:Assertions ..summary:Test that two given expressions are not equal, as defined by the matching call to the $operator!=(,)$. ..signature:SEQAN_ASSERT_NEQ(expression) ..signature:SEQAN_ASSERT_NEQ_MSG(expression, message[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_NEQ(0, false); // will fail SEQAN_ASSERT_NEQ(1, false); // will run through SEQAN_ASSERT_NEQ(1, "foo"); // will not compile SEQAN_ASSERT_NEQ_MSG(1, false, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_LT ..cat:Assertions ..summary:Test that the two given expressions are in the less-than relation as defined by the matching call to operator<(,). ..signature:SEQAN_ASSERT_LT(expression1, expression2) ..signature:SEQAN_ASSERT_LT(expression1, expression2, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_LT(0, 1); // will run through SEQAN_ASSERT_LT(1, 1); // will not run through SEQAN_ASSERT_LT_MSG(1, 1, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_LEQ ..cat:Assertions ..summary:Test that the two given expressions are in the less-than-or-equal relation as defined by the matching call to operator<=(,). ..signature:SEQAN_ASSERT_LEQ(expression1, expression2) ..signature:SEQAN_ASSERT_LEQ_MSG(expression1, expression2, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_LEQ(1, 1); // will run through SEQAN_ASSERT_LEQ(1, 2); // will not run through SEQAN_ASSERT_LEQ_MSG(1, 2, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_GT ..cat:Assertions ..summary:Test that the two given expressions are in the greather-than relation as defined by the matching call to operator>(,). ..signature:SEQAN_ASSERT_GT(expression1, expression2) ..signature:SEQAN_ASSERT_GT_MSG(expression1, expression2, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_GT(2, 1); // will run through SEQAN_ASSERT_GT(1, 1); // will not run through SEQAN_ASSERT_GT_MSG(1, 1, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_GEQ ..cat:Assertions ..summary:Test that the two given expressions are in the greater-than-or-equal relation as defined by the matching call to operator>=(,). ..signature:SEQAN_ASSERT_GEQ(expression1, expression2) ..signature:SEQAN_ASSERT_GEQ_MSG(expression1, expression2, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_GEQ(1, 1); // will run through SEQAN_ASSERT_GEQ(0, 1); // will not run through SEQAN_ASSERT_GEQ_MSG(0, 1, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_IN_DELTA ..cat:Assertions ..summary:Test that the given expression can be coerced to $true$. ..signature:SEQAN_ASSERT_IN_DELTA(x, y, delta) ..signature:SEQAN_ASSERT_IN_DELTA_MSG(x, y, delta, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_IN_DELTA(0, 0, 0.1); // will run through SEQAN_ASSERT_IN_DELTA(1, -2, 1); // will fail SEQAN_ASSERT_IN_DELTA(1, "foo"); // will not compile SEQAN_ASSERT_IN_DELTA_MSG(1, 0, 0.1, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL */ // Force a test failure. // // Usage: SEQAN_ASSERT_FAIL("Failed at position %d", pos); #define SEQAN_ASSERT_FAIL(...) \ do { \ ::seqan::ClassTest::forceFail(__FILE__, __LINE__, \ __VA_ARGS__); \ ::seqan::ClassTest::fail(); \ } while (false) // Equality assertion without a comment. // // Usage: SEQAN_ASSERT_EQ(4, 4); #define SEQAN_ASSERT_EQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Equality assertion with a comment. // // Usage: SEQAN_ASSERT_EQ(4, 4); #define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // In-delta-environment assertion without a comment. // // Usage: SEQAN_ASSERT_IN_DELTA(4.1, 4, 0.1); #define SEQAN_ASSERT_IN_DELTA(_arg1, _arg2, _arg3) \ do { \ if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ (_arg3), # _arg3)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // In-delta-environment assertion witha comment. // // Usage: SEQAN_ASSERT_IN_DELTA_MSG(4.1, 4, 0.1, "3.9 <= 4.1 <= 4.1"); #define SEQAN_ASSERT_IN_DELTA_MSG(_arg1, _arg2, _arg3, ...) \ do { \ if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ (_arg3), # _arg3, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Inequality assertion without a comment. // // Usage: SEQAN_ASSERT_NEQ(4, 5); #define SEQAN_ASSERT_NEQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Inequality assertion with a comment. // // Usage: SEQAN_ASSERT_NEQ(4, 5); #define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than-or-equal assertion without a comment. #define SEQAN_ASSERT_LEQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than-or-equal assertion with a comment. #define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than assertion without a comment. #define SEQAN_ASSERT_LT(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than assertion with a comment. #define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than-or-equal assertion without a comment. #define SEQAN_ASSERT_GEQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than-or-equal assertion with a comment. #define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than assertion without a comment. #define SEQAN_ASSERT_GT(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than assertion with a comment. #define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // TODO(holtgrew): Rename to SEQAN_ASSERT once that name is free.; // Trueness assertion with a comment. // // Usage: SEQAN_ASSERT(false); #define SEQAN_ASSERT(_arg1) \ do { \ if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \ (_arg1), # _arg1)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // TODO(holtgrew): Rename to SEQAN_ASSERT once that name is free.; // Trueness assertion with a comment. #define SEQAN_ASSERT_MSG(_arg1, ...) \ do { \ if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \ (_arg1), # _arg1, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Falseness assertion without a comment. // // Usage: SEQAN_ASSERT_NOT(false); #define SEQAN_ASSERT_NOT(_arg1) \ do { \ if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \ (_arg1), # _arg1)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Falseness assertion with a comment. #define SEQAN_ASSERT_NOT_MSG(_arg1, ...) \ do { \ if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \ (_arg1), # _arg1, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) #else // #if SEQAN_ENABLE_DEBUG #define SEQAN_ASSERT_EQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_NEQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_LEQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_LT(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_GEQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_GT(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT(_arg1) do {} while (false) #define SEQAN_ASSERT_MSG(_arg1, ...) do {} while (false) #define SEQAN_ASSERT_NOT(_arg1) do {} while (false) #define SEQAN_ASSERT_NOT_MSG(_arg1, ...) do {} while (false) #define SEQAN_ASSERT_FAIL(...) do {} while (false) #endif // #if SEQAN_ENABLE_DEBUG #else // no variadic macros #if SEQAN_ENABLE_DEBUG inline void SEQAN_ASSERT_FAIL(const char * comment, ...) { va_list args; va_start(args, comment); ::seqan::ClassTest::vforceFail("", 0, comment, args); ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3) { if (!::seqan::ClassTest::testInDelta("", 0, _arg1, "", _arg2, "", _arg3, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA_MSG(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestInDelta("", 0, _arg1, "", _arg2, "", _arg3, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_EQ(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testEqual("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_EQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestEqual("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_NEQ(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testNotEqual("", _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_NEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestNotEqual("", _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_LEQ(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testLeq("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_LEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestLeq("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_LT(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testLt("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_LT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestLt("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_GEQ(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testGeq("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_GEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestGeq("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_GT(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testGt("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_GT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestGt("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1> void SEQAN_ASSERT(T1 const & _arg1) { if (!::seqan::ClassTest::testTrue("", 0, _arg1, "")) ::seqan::ClassTest::fail(); } template <typename T1> void SEQAN_ASSERT_MSG(T1 const & _arg1, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestTrue("", 0, _arg1, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1> void SEQAN_ASSERT_NOT(T1 const & _arg1) { if (!::seqan::ClassTest::testFalse("", 0, _arg1, "")) ::seqan::ClassTest::fail(); } template <typename T1> void SEQAN_ASSERT_NOT_MSG(T1 const & _arg1, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestFalse("", 0, _arg1, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } #else // #if SEQAN_ENABLE_DEBUG inline void SEQAN_ASSERT_FAIL(const char * comment, ...) {} template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3) {} template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA_MSG(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_EQ(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_EQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_NEQ(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_NEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_LEQ(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_LEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_LT(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_LT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_GEQ(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_GEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_GT(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_GT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1> void SEQAN_ASSERT(T1 const & _arg1) {} template <typename T1> void SEQAN_ASSERT_MSG(T1 const & _arg1, const char * comment, ...) {} template <typename T1> void SEQAN_ASSERT_NOT(T1 const & _arg1) {} template <typename T1> void SEQAN_ASSERT_NOT_MSG(T1 const & _arg1, const char * comment, ...) {} #endif // #if SEQAN_ENABLE_DEBUG #endif // no variadic macros // Returns a string (of type char*) with the path to the called binary. // // Use this to locate files relative to the test binary. #define SEQAN_PROGRAM_PATH \ ::seqan::ClassTest::StaticData::basePath() // TODO(holtgrew): Subject to change wiht restructuring. /** .Macro.SEQAN_PATH_TO_ROOT ..cat:Testing & Debugging ..summary:Return path to the checkout root directory (i.e. containing core/extras). ..returns:$char const *$, string with the path to the parent directory of the tests directory. ..signature:SEQAN_PATH_TO_ROOT() ..remarks:The pointed to string is initialized on program startup by the code generated by @Macro.SEQAN_BEGIN_TESTSUITE@. ..example.code: const char *p = SEQAN_PATH_TO_ROOT); char buffer[1000]; strcpy(buffer, p); strcat(buffer, "/tests/files/example.txt"); FILE *f = fopen(buffer, "w"); fprintf(f, "Test Data"); fclose(f); ..see:Macro.SEQAN_TEMP_FILENAME */ // Returns a const char * string with the path to the projects directory. #define SEQAN_PATH_TO_ROOT() \ ::seqan::ClassTest::StaticData::pathToRoot() // Returns the POSIX int file handle to an open file. // TODO(holtgrewe): Uncomment if openTempFile has been implemented. // #define SEQAN_OPEN_TEMP_FILE() (::seqan::ClassTest::openTempFile()) /** .Macro.SEQAN_TEMP_FILENAME ..cat:Testing & Debugging ..summary:Generates the name to a temporary file. ..returns:$char const *$, string with the path to a temporary file. ..signature:SEQAN_TEMP_FILENAME() ..remarks:The pointed to string is stored in a buffer and is overwritten by the next call to this macro. Copy it out if you need it. ..example.code: const char *p = SEQAN_TEMP_FILENAME(); buffer char tempFilename[1000]; strcpy(tempFilename, p); FILE *f = fopen(tempFilename, "w"); fprintf(f, "Test Data"); fclose(f); ..see:Macro.SEQAN_PATH_TO_ROOT */ // Returns a temporary filename. #define SEQAN_TEMP_FILENAME() (::seqan::ClassTest::tempFileName()) /** .Macro.SEQAN_VERIFY_CHECKPOINTS ..cat:Testing & Debugging ..summary:Verify check points for the given file name. ..signature:SEQAN_VERIFY_CHECKPOINTS(path) ..param.path:Path to the file to verify check points for. Relative to parent directory of tests. ..example.code: SEQAN_VERIFY_CHECKPOINTS("core/include/seqan/basic_alphabet.h"); ..see:Macro.SEQAN_CHECKPOINT .Macro.SEQAN_CHECKPOINT ..cat:Testing & Debugging ..summary:Generate a check point. ..signature:SEQAN_CHECKPOINT ..remarks:Whever the code executes the instructions generated by this macro, the check point for this line will be set in global testing state. Use @Macro.SEQAN_VERIFY_CHECKPOINTS@ to verify whether all checkpoints have been reached in a file up to this point. SEQAN_CHECKPOINT; ..see:Macro.SEQAN_VERIFY_CHECKPOINTS */ #if SEQAN_ENABLE_CHECKPOINTS // Create a check point at the point where the macro is placed. // TODO(holtgrew): Should be called SEQAN_CHECK_POINT to be consistent. #define SEQAN_CHECKPOINT \ ::seqan::ClassTest::registerCheckPoint(__LINE__, __FILE__); // Call the check point verification code for the given file. #define SEQAN_VERIFY_CHECKPOINTS(filename) \ ::seqan::ClassTest::verifyCheckPoints(filename) #else // #if SEQAN_ENABLE_CHECKPOINTS #define SEQAN_CHECKPOINT // If checkpoints are to be verified if testing is disabled then print // a warning. #define SEQAN_VERIFY_CHECKPOINTS(filename) \ do { \ fprintf(stderr, ("WARNING: Check point verification is " \ "disabled. Trying to verify %s from %s:%d.\n"), \ filename, __FILE__, __LINE__); \ } while (false) #endif // #if SEQAN_ENABLE_CHECKPOINTS #if !SEQAN_ENABLE_TESTING #define SEQAN_BEGIN_TESTSUITE(suite_name) \ int main(int argc, char ** argv) { \ (void) argc; \ (void) argv; \ fprintf(stderr, "Warning: SEQAN_ENABLE_TESTING is wrong and you used the macro SEQAN_BEGIN_TESTSUITE!\n"); #define SEQAN_END_TESTSUITE \ return 0; \ } #define SEQAN_CALL_TEST(test_name) do { SEQAN_TEST_ ## test_name(); } while (false) #define SEQAN_SKIP_TEST do {} while (false) #endif // #if !SEQAN_ENABLE_TESTING } // namespace seqan #endif // SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2013, Knut Reinert, FU Berlin // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: Manuel Holtgrewe <[email protected]> // ========================================================================== // The SeqAn testing infrastructure. Based on ideas from the OpenMS // "ClassTest.h". // ========================================================================== // TODO(holtgrew): This could use some cleanup. // SEQAN_NO_GENERATED_FORWARDS #ifndef SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_ #define SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_ #include <iostream> // stdout, stderr #include <iomanip> #include <cstring> // strrpos #include <cstdlib> // exit() #include <cstdio> #include <cstdarg> // va_start, va_list, va_end #include <set> #include <vector> #include <string> #ifdef PLATFORM_WINDOWS #include <Windows.h> // DeleteFile() #else // #ifdef PLATFORM_WINDOWS #include <unistd.h> // unlink() #include <sys/stat.h> // mkdir() #include <dirent.h> // DIR #if SEQAN_HAS_EXECINFO #include <execinfo.h> // backtrace(), backtrace_symbols() #endif // #if SEQAN_HAS_EXECINFO #include <cxxabi.h> // __cxa_demangle() #include <signal.h> #endif // #ifdef PLATFORM_WINDOWS /** .Macro.SEQAN_FAIL ..cat:Assertions ..summary:Force abortion of program, regardless of debugging settings. ..signature:SEQAN_FAIL(msg[, args]) ..param.msg:A format string. ..param.args:An optional list of arguments. ..remarks:Use this if something really unexpected happens inside your functions and there is no way to report this through the API. A good example would be logic errors, e.g. invalid values. ..example.text:In the following example, the $SEQAN_FAIL$ is there if a possible value is added to $MyEnum$ but the function $foo$ is not updated accordingly. ..example.code: enum MyEnum { VALUE_ONE, VALUE_TWO }; bool foo(MyEnum x) { switch (x) { case VALUE_ONE: // do something return true; case VALUE_TWO: // do something return true; } SEQAN_FAIL("Logic error. Should never reach here. x == %d.", x); return false; } ..include:seqan/basic.h ..see:Macro.SEQAN_CHECK */ #define SEQAN_FAIL(...) \ do { \ ::seqan::ClassTest::forceFail(__FILE__, __LINE__, \ __VA_ARGS__); \ ::seqan::ClassTest::fail(); \ } while (false) /** .Macro.SEQAN_CHECK ..cat:Assertions ..summary:Force abortion of program if a condition is not met, regardless of debugging settings. ..signature:SEQAN_CHECK(condition, msg[, args]) ..param.msg:A format string. ..param.args:An optional list of arguments. ..remarks:Use this if something really unexpected happens inside your functions and there is no way to report this through the API. A good example would be logic errors, e.g. invalid values. ..example.text:In the following example, the $SEQAN_CHECK$ stops program execution if a value is added to $MyEnum$ but the function $foo$ is not updated accordingly. ..example.code: enum MyEnum { VALUE_ONE, VALUE_TWO }; bool foo(MyEnum x) { SEQAN_CHECK((x == VALUE_ONE || x == VALUE_TWO), "Invalid value for x == %d.", x); switch (x) { case VALUE_ONE: // do something return true; case VALUE_TWO: // do something return true; } return false; // Should never reach here, checked above with SEQAN_CHECK. } ..include:seqan/basic.h ..see:Macro.SEQAN_FAIL */ #define SEQAN_CHECK(_arg1, ...) \ do { \ if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \ (_arg1), # _arg1, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // SeqAn's has three global debug/testing levels: testing, debug and // release. Depending on the level, the SEQAN_ASSERT_* and // SEQAN_CHECKPOINT macros will be enabled. // // Note that this is independent of the <cassert> assertions and // NDEBUG being defined. // // The levels are enabled by the values of the macros // SEQAN_ENABLE_TESTING and SEQAN_ENABLE_DEBUG. By setting a macro to // 0, one disables the level and by setting the macro to 1, one // enables a level. Enabling testing also enables debug, overriding a // value of 0 for SEQAN_ENABLE_DEBUG. // // If the level is release (both the macros for debug and testing are // 0), the assertions will be disabled. If the level is debug then // the assertions will be enabled. If the level is testing then the // checkpoint macros will also be enabled. // // The default is to enable debugging but disable testing. // // You can print the current level using the function seqan::printDebugLevel(). /** .Macro.SEQAN_ENABLE_TESTING ..cat:Testing & Debugging ..summary:Indicates whether testing is enabled. ..signature:SEQAN_ENABLE_DEBUG ..remarks:When enabled (set to 1), testing is enabled. This means the macros for the tests (@Macro.SEQAN_BEGIN_TESTSUITE@, @Macro.SEQAN_DEFINE_TEST@, @Macro.SEQAN_CALL_TEST@, and @Macro.SEQAN_END_TESTSUITE@) will be enabled. This makes failing assertions raise exceptions instead of call $abort()$ and enables checkpoints. ..remarks:By default, this is set to 0. ..remarks:If @Macro.SEQAN_ENABLE_CHECKPOINTS@ is not defined before including $<seqan/basic.h>$, then @Macro.SEQAN_ENABLE_CHECKPOINTS@ will be set to the value of @Macro.SEQAN_ENABLE_TESTING@ (after the default initialization to 0). ..remarks:If you want to change this value, you have to define this value before including any SeqAn header. ..remarks:If set to 1 then @Macro.SEQAN_ENABLE_TESTING@ is force-set to 0 as well. ..see:Macro.SEQAN_ENABLE_DEBUG ..see:Macro.SEQAN_ENABLE_CHECKPOINTS */ // Set default for SEQAN_ENABLE_TESTING. #ifndef SEQAN_ENABLE_TESTING #define SEQAN_ENABLE_TESTING 0 #endif // #ifndef SEQAN_ENABLE_TESTING /** .Macro.SEQAN_ENABLE_DEBUG ..cat:Testing & Debugging ..summary:Indicates whether debugging is enabled. ..signature:SEQAN_ENABLE_DEBUG ..remarks:When enabled (set to 1), debugging is enabled. This means the assertion macros are expanded to actual code and not to nothing. ..remarks:By default, this is set to 0 if $NDEBUG$ is defined and to 1 if $NDEBUG$ is not defined. ..remarks:If you want to change this value, you have to define this value before including any SeqAn header. ..remarks:Force-enabled if @Macro.SEQAN_ENABLE_TESTING@ is set to 1. ..see:Macro.SEQAN_ENABLE_TESTING ..see:Macro.SEQAN_ENABLE_CHECKPOINTS */ // Set default for SEQAN_ENABLE_DEBUG. #ifndef SEQAN_ENABLE_DEBUG #ifdef NDEBUG #define SEQAN_ENABLE_DEBUG 0 #else // #ifdef NDEBUG #define SEQAN_ENABLE_DEBUG 1 #endif // #ifdef NDEBUG #endif // #ifndef SEQAN_ENABLE_DEBUG // Force-enable debugging if testing is enabled. #if SEQAN_ENABLE_TESTING #undef SEQAN_ENABLE_DEBUG #define SEQAN_ENABLE_DEBUG 1 #endif // #if SEQAN_ENABLE_TESTING /** .Macro.SEQAN_ENABLE_CHECKPOINTS ..cat:Testing & Debugging ..summary:Indicates whether checkpoints are enabled. ..signature:SEQAN_ENABLE_CHECKPOINTS ..remarks:When enabled (set to 1), checkpoints are enabled. This means the $SEQAN_CHECKPOINT$ macros are expanded to actual code and not to nothing. ..remarks:By default, this is set to $SEQAN_ENABLE_TESTING$. ..remarks:Checkpoints can come at large increases of running time in your tests. Disable them when your test run too slow. ..remarks:If you want to change this value, you have to define this value before including any SeqAn header. ..example.text:Disable checkpoints in a program. ..example.code: // Disable SeqAn checkpoints in this program. #define SEQAN_ENABLE_CHECKPOINTS 0 // Any SeqAn headers or headers including SeqAn headers have to come AFTER the // definition of SEQAN_ENABLE_CHECKPOINT above. #include <seqan/base.h> int main(int argc, char const ** argv) { // Any call to SeqAn functions will NOT log any checkpoints. return 0; } ..see:Macro.SEQAN_ENABLE_DEBUG ..see:Macro.SEQAN_ENABLE_TESTING */ // Allow disabling checkpoints independent of testing. #ifndef SEQAN_ENABLE_CHECKPOINTS #define SEQAN_ENABLE_CHECKPOINTS 0 // SEQAN_ENABLE_TESTING #endif // #ifndef SEQAN_ENABLE_CHECKPOINTS /** .Macro.SEQAN_TYPEDEF_FOR_DEBUG ..cat:Testing & Debugging ..summary: When using typedefs that are only used in debug mode then they have to be marked with macro. ..signature:SEQAN_TYPEDEF_FOR_DEBUG ..example.code: typedef int TInt SEQAN_TYPEDEF_FOR_DEBUG; */ #if !SEQAN_ENABLE_DEBUG # if defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7))) # define SEQAN_TYPEDEF_FOR_DEBUG __attribute__((unused)) # else # define SEQAN_TYPEDEF_FOR_DEBUG # endif #else # define SEQAN_TYPEDEF_FOR_DEBUG #endif // TODO(holtgrew): This one is for profiling and in tests. #if defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7))) # define SEQAN_UNUSED_TYPEDEF __attribute__((unused)) #else # define SEQAN_UNUSED_TYPEDEF #endif namespace seqan { // SEQAN_CXX_FLAGS_ contains the compiler flags, SEQAN_CXX_FLAGS is a string // literal with this value. #if !defined(SEQAN_CXX_FLAGS_) #define SEQAN_CXX_FLAGS_ SEQAN_CXX_FLAGS_NOT_SET #endif // !defined(SEQAN_CXX_FLAGS__) #define SEQAN_MKSTRING_(str) # str #define SEQAN_MKSTRING(str) SEQAN_MKSTRING_(str) #define SEQAN_CXX_FLAGS SEQAN_MKSTRING(SEQAN_CXX_FLAGS_) //#undef SEQAN_MKSTRING //#undef SEQAN_MKSTRING_ /** .Function.printDebugLevel ..cat:Testing & Debugging ..summary:Print the current SeqAn debug level and the compiler flags to the given stream. ..signature:printDebugLevel(stream) ..param.stream:The stream to print to, e.g. $std::cout$. ..include:seqan/basic.h */ template <typename TStream> void printDebugLevel(TStream & stream) { stream << "SEQAN_ENABLE_DEBUG == " << SEQAN_ENABLE_DEBUG << std::endl; stream << "SEQAN_ENABLE_TESTING == " << SEQAN_ENABLE_TESTING << std::endl; stream << "SEQAN_ENABLE_CHECKPOINTS == " << SEQAN_ENABLE_CHECKPOINTS << std::endl; stream << "SEQAN_CXX_FLAGS == \"" << SEQAN_CXX_FLAGS << "\"" << std::endl; } #if defined(PLATFORM_WINDOWS) || !SEQAN_HAS_EXECINFO template <typename TSize> void printStackTrace(TSize /*maxFrames*/) {} #else // print a demangled stack backtrace of the caller function template <typename TSize> void printStackTrace(TSize maxFrames) { void * addrlist[256]; char temp[4096]; char addr[20]; char offset[20]; size_t size; int status; char * symname; char * demangled; std::cerr << std::endl << "stack trace:" << std::endl; int addrlist_len = backtrace(addrlist, maxFrames); char ** symbollist = backtrace_symbols(addrlist, addrlist_len); for (int i = 1; i < addrlist_len; ++i) { offset[0] = 0; addr[0] = 0; demangled = NULL; // LINUX FORMAT: // ./sam2svg [0x473b8c] // /lib/libc.so.6 [0x7f40d2526f60] // ./sam2svg(_Z2f3v+0x10) [0x47200c] // ./sam2svg(_Z2f2v+0xd) [0x472021] // ./sam2svg(main+0x1367) [0x4735fc] // /lib/libc.so.6(__libc_start_main+0xe6) [0x7f40d25131a6] // if (3 == sscanf(symbollist[i], "%*[^(](%4095[^+]+%[^)]) %s", temp, offset, addr)) { symname = temp; if (NULL != (demangled = abi::__cxa_demangle(temp, NULL, &size, &status))) { symname = demangled; } } // MAC OS X FORMAT: // 1 sam2svg 0x0000000100003a39 _ZN5seqanL28signalHandlerPrintStackTraceEi + 21 // 2 libSystem.B.dylib 0x00007fff87a6d67a _sigtramp + 26 // 3 libSystem.B.dylib 0x00007fff87a76df7 tiny_free_do_recirc_to_depot + 980 // 4 sam2svg 0x00000001000021b9 _Z2f2v + 9 // 5 sam2svg 0x00000001000034b1 main + 4546 // 6 sam2svg 0x0000000100002190 start + 52 else if (3 == sscanf(symbollist[i], "%*d %*s %s %s %*s %s", addr, temp, offset)) { symname = temp; if (NULL != (demangled = abi::__cxa_demangle(temp, NULL, &size, &status))) { symname = demangled; } } // LINUX FORMAT: // ./sam2svg [0x473b8c] // /lib/libc.so.6 [0x7f40d2526f60] else if (2 == sscanf(symbollist[i], "%s %s", temp, addr)) { symname = temp; } // DEFAULT: else { symname = symbollist[i]; } std::cerr << std::setw(3) << i - 1; std::cerr << std::setw(20) << addr; std::cerr << " " << symname; if (offset[0] != 0) std::cerr << " + " << offset; std::cerr << std::endl; free(demangled); } std::cerr << std::endl; // Only the array must be freed according to man page, not the contents. free(symbollist); } static void signalHandlerPrintStackTrace(int signum) { std::cerr << std::endl; printStackTrace(20); signal(signum, SIG_DFL); kill(getpid(), signum); } inline int _deploySignalHandlers() { signal(SIGSEGV, signalHandlerPrintStackTrace); // segfault signal(SIGFPE, signalHandlerPrintStackTrace); // divide by zero // ... return 0; } #if SEQAN_ENABLE_DEBUG // automatically deploy signal handlers that output the stack trace on a trap (in debug mode) template <typename T> struct SignalHandlersDummy_ { static const int i; }; template <typename T> const int SignalHandlersDummy_<T>::i = _deploySignalHandlers(); namespace { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-variable" #endif // ifdef __clang__ volatile int signalHandlersDummy_ = SignalHandlersDummy_<void>::i; #ifdef __clang__ #pragma clang diagnostic pop #endif // ifdef __clang__ } #endif // #if SEQAN_ENABLE_DEBUG #endif // defined(PLATFORM_WINDOWS) || !SEQAN_HAS_EXECINFO // Namespace for the testing infrastructure. // // This namespace contains the variables and functions that are used // in the macros below to perform the tests. namespace ClassTest { // Raised when an assertion fails in test mode. struct AssertionFailedException {}; // Container for static global data for the tests. struct StaticData { // Number of tests that were run. static int & testCount() { static int result = 0; return result; } // Number of errors that occurred. static int & errorCount() { static int result = 0; return result; } // Number of skipped tests. static int & skippedCount() { static int result = 0; return result; } // Flag whether there was an error in this test. static bool & thisTestOk() { static bool result = 0; return result; } // Flag whether this test was skipped. static bool & thisTestSkipped() { static bool result = 0; return result; } // Name of the current test. static const char * & currentTestName() { const char * defaultValue = ""; static const char * result = const_cast<char *>(defaultValue); return result; } // Base path to the binary. Extrapolated from __FILE__. static char * & basePath() { const char * defaultValue = "."; static char * result = const_cast<char *>(defaultValue); return result; } // Base path to the directory containing "core" and "extras." // Extrapolated from __FILE__. static char * & pathToRoot() { const char * defaultValue = "."; static char * result = const_cast<char *>(defaultValue); return result; } // Total number of checkpoints in header file. static int & totalCheckPointCount() { static int result = 0; return result; } // Total number of checkpoints found in binary files. static int & foundCheckPointCount() { static int result = 0; return result; } // Names of temporary files as returned by tempFileName. This // global state is used to remove any existing such files // after completing the testsuite. static::std::vector<std::string> & tempFileNames() { static::std::vector<std::string> filenames; return filenames; } }; // Open a temporary file, unlink it, return posix handle. Note: This has not been tested yet. // TODO(holtgrew): Not used yet and Windows code does not work. /* inline int openTempFile() { #ifdef PLATFORM_WINDOWS char * fileName = _tempnam(NULL, "SQN"); if (!fileName) { ::std::cerr << "Cannot create a unique temporary filename" << ::std::endl; exit(1); } int result = open(fileName, _O_RDWR | OPEN_TEMPORARY); free(fileName); return result; #else // A Unix... char filenameBuffer[100]; strcpy(filenameBuffer, "/tmp/SEQANXXXXXXXXXX"); int result = mkstemp(filenameBuffer); unlink(filenameBuffer); return result; #endif // ifdef PLATFORM_WINDOWS } */ // Return the path to a temporary file, in a static buffer in this // function. This is not thread safe! inline const char * tempFileName() { //IOREV _duplicate_ overlaps with some stuff in system/file_sync.h, should be moved to io-module static char fileNameBuffer[1000]; #ifdef PLATFORM_WINDOWS_VS static char filePathBuffer[1000]; // Gets the temp path env string (no guarantee it's a valid path). DWORD dwRetVal = 0; dwRetVal = GetTempPath(1000, // length of the buffer filePathBuffer); // buffer for path if (dwRetVal > 1000 || (dwRetVal == 0)) { std::cerr << "GetTempPath failed" << std::endl; exit(1); } UINT uRetVal = 0; uRetVal = GetTempFileName(filePathBuffer, // directory for tmp files TEXT("SEQAN."), // temp file name prefix 0, // create unique name fileNameBuffer); // buffer for name if (uRetVal == 0) { std::cerr << "GetTempFileName failed" << std::endl; exit(1); } DeleteFile(fileNameBuffer); CreateDirectoryA(fileNameBuffer, NULL); StaticData::tempFileNames().push_back(fileNameBuffer); strcat(fileNameBuffer, "\\test_file"); return fileNameBuffer; #else // ifdef PLATFORM_WINDOWS_VS strcpy(fileNameBuffer, "/tmp/SEQAN.XXXXXXXXXXXXXXXXXXXX"); #ifdef PLATFORM_WINDOWS_MINGW // There is no mkstemp in MinGW but it does not complain about tmpnam. tmpnam(fileNameBuffer); #else // ifdef PLATFORM_WINDOWS_MINGW int _tmp = mkstemp(fileNameBuffer); (void) _tmp; unlink(fileNameBuffer); mkdir(fileNameBuffer, 0777); StaticData::tempFileNames().push_back(fileNameBuffer); strcat(fileNameBuffer, "/test_file"); #endif // #ifdef PLATFORM_WINDOWS_MINGW return fileNameBuffer; #endif // ifdef PLATFORM_WINDOWS_VS } // Initialize the testing infrastructure. // // Used through SEQAN_BEGIN_TESTSUITE(test_name) inline void beginTestSuite(const char * testSuiteName, const char * argv0) { // First things first: Print test suite name and current debug level. std::cout << "TEST SUITE " << testSuiteName << std::endl; printDebugLevel(std::cout); (void)testSuiteName; StaticData::testCount() = 0; StaticData::skippedCount() = 0; StaticData::errorCount() = 0; StaticData::totalCheckPointCount() = 0; StaticData::foundCheckPointCount() = 0; // Get path to argv0. const char * end = argv0; const char * ptr = std::min(strchr(argv0, '\\'), strchr(argv0, '/')); // On Windows, we can have both \ and /. for (; ptr != 0; ptr = std::min(strchr(ptr + 1, '\\'), strchr(ptr + 1, '/'))) end = ptr; int rpos = end - argv0; if (rpos <= 0) { StaticData::basePath() = new char[2]; strcpy(StaticData::basePath(), "."); } else { int len = rpos; StaticData::basePath() = new char[len]; strncpy(StaticData::basePath(), argv0, len); } // Get path to projects. const char * file = __FILE__; int pos = -1; for (size_t i = 0; i < strlen(file) - strlen("core"); ++i) { if (strncmp(file + i, "core", strlen("core")) == 0) { pos = i; } } for (; pos > 0 && *(file + pos - 1) != '/' && *(file + pos - 1) != '\\'; --pos) continue; if (pos == -1) { std::cerr << "Could not extrapolate path to repository from __FILE__ == \"" << __FILE__ << "\"" << std::endl; exit(1); } StaticData::pathToRoot() = new char[pos]; strncpy(StaticData::pathToRoot(), file, pos); StaticData::pathToRoot()[pos - 1] = '\0'; #ifdef PLATFORM_WINDOWS_VS // Set CRT reporting such that everything goes to stderr and there are // no popups causing timeouts. _set_error_mode(_OUT_TO_STDERR); _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR); _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR); _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR); #endif // PLATFORM_WINDOWS_VS } // Run test suite finalization. // // Used through SEQAN_END_TESTSUITE // // Prints a bottom banner with the error count and returns the // program's return code. inline int endTestSuite() { delete[] StaticData::basePath(); delete[] StaticData::pathToRoot(); std::cout << "**************************************" << std::endl; std::cout << " Total Check Points : " << StaticData::totalCheckPointCount() << std::endl; std::cout << " Found Check Points : " << StaticData::foundCheckPointCount() << std::endl; std::cout << " Lost Check Points : " << StaticData::totalCheckPointCount() - StaticData::foundCheckPointCount() << std::endl; std::cout << "--------------------------------------" << std::endl; std::cout << " Total Tests: " << StaticData::testCount() << std::endl; std::cout << " Skipped: " << StaticData::skippedCount() << std::endl; std::cout << " Errors: " << StaticData::errorCount() << std::endl; std::cout << "**************************************" << std::endl; // TODO(holtgrew): Re-enable that all check points have to be found for the test to return 1; /* if (StaticData::totalCheckPointCount() != StaticData::foundCheckPointCount()) return 1; */ // Delete all temporary files that still exist. for (unsigned i = 0; i < StaticData::tempFileNames().size(); ++i) { #ifdef PLATFORM_WINDOWS HANDLE hFind; WIN32_FIND_DATA data; std::string temp = StaticData::tempFileNames()[i].c_str() + std::string("\\*"); hFind = FindFirstFile(temp.c_str(), &data); if (hFind != INVALID_HANDLE_VALUE) { do { std::string tempp = StaticData::tempFileNames()[i].c_str() + std::string("\\") + data.cFileName; DeleteFile(tempp.c_str()); } while (FindNextFile(hFind, &data)); FindClose(hFind); } RemoveDirectory(StaticData::tempFileNames()[i].c_str()); #else // #ifdef PLATFORM_WINDOWS DIR * dpdf; struct dirent * epdf; dpdf = opendir(StaticData::tempFileNames()[i].c_str()); if (dpdf != NULL) { while ((epdf = readdir(dpdf)) != NULL) { std::string temp = StaticData::tempFileNames()[i].c_str() + std::string("/") + std::string(epdf->d_name); unlink(temp.c_str()); } } rmdir(StaticData::tempFileNames()[i].c_str()); #endif // #ifdef PLATFORM_WINDOWS } if (StaticData::errorCount() != 0) return 1; return 0; } // Run test initialization. inline void beginTest(const char * testName) { StaticData::currentTestName() = testName; StaticData::thisTestOk() = true; StaticData::thisTestSkipped() = false; StaticData::testCount() += 1; } // Run test finalization. inline void endTest() { if (StaticData::thisTestSkipped()) { std::cout << StaticData::currentTestName() << " SKIPPED" << std::endl; } else if (StaticData::thisTestOk()) { std::cout << StaticData::currentTestName() << " OK" << std::endl; } else { std::cerr << StaticData::currentTestName() << " FAILED" << std::endl; } } // Marks the current test as "skipped". inline void skipCurrentTest() { StaticData::thisTestSkipped() = true; StaticData::skippedCount() += 1; } // Called by the macro SEQAN_ASSERT_FAIL. inline void forceFail(const char * file, int line, const char * comment, ...) { StaticData::errorCount() += 1; std::cerr << file << ":" << line << " FAILED! "; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; } // Similar to forceFail above, but accepting a va_list parameter. inline void vforceFail(const char * file, int line, const char * comment, va_list argp) { StaticData::errorCount() += 1; std::cerr << file << ":" << line << " FAILED! "; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; } // Same as forceFail above, but with comment set to 0. inline void forceFail(const char * file, int line) { forceFail(file, line, 0); } // Called by the macro SEQAN_ASSERT_EQ. // // Tests that the given two value are equal. Returns true iff the // two values are equal. template <typename T1, typename T2> bool testEqual(char const * file, int line, T1 const & value1, char const * expression1, T2 const & value2, char const * expression2, char const * comment, ...) { if (!(value1 == value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " == " << expression2 << " was: " << value1 << " != " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testEqual above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 == value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " == " << expression2 << " was: " << value1 << " != " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testEqual above, but with comment set to 0. template <typename T1, typename T2> bool testEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testEqual(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_IN_DELTA. // // Tests that the given two value are equal. Returns true iff the // two values are equal. template <typename T1, typename T2, typename T3> bool testInDelta(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const T3 & value3, const char * expression3, const char * comment, ...) { if (!(value1 >= value2 - value3 && value1 <= value2 + value3)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " in [" << expression2 << " - " << expression3 << ", " << expression2 << " + " << expression3 << "] was: " << value1 << " not in [" << value2 - value3 << ", " << value2 + value3 << "]"; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testInDelta above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2, typename T3> bool vtestInDelta(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const T3 & value3, const char * expression3, const char * comment, va_list argp) { if (!(value1 >= value2 - value3 && value1 <= value2 + value3)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " in [" << expression2 << " - " << expression3 << ", " << expression2 << " + " << expression3 << "] was: " << value1 << " not in [" << value2 - value3 << ", " << value2 + value3 << "]"; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testInDelta above, but with comment set to 0. template <typename T1, typename T2, typename T3> bool testInDelta(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const T3 & value3, const char * expression3) { return testInDelta(file, line, value1, expression1, value2, expression2, value3, expression3, 0); } // Called by the macro SEQAN_ASSERT_NEQ. // // Tests that the given two value are not equal. Returns true iff // the two values are equal. template <typename T1, typename T2> bool testNotEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 != value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " != " << expression2 << " was: " << value1 << " == " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testNotEqual above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestNotEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 != value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " != " << expression2 << " was: " << value1 << " == " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testNotEqual above, but with comment set to 0. template <typename T1, typename T2> bool testNotEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testNotEqual(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_GEQ. // // Tests that the first value is greater than or equal to the // second one. Returns true iff the test yields true. template <typename T1, typename T2> bool testGeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 >= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " >= " << expression2 << " was: " << value1 << " < " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testGeq above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestGeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 >= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " >= " << expression2 << " was: " << value1 << " < " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testGeq above, but with comment set to 0. template <typename T1, typename T2> bool testGeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testGeq(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_GT. // // Tests that the first value is greater than the second one. // Returns true iff the test yields true. template <typename T1, typename T2> bool testGt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 > value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " > " << expression2 << " was: " << value1 << " <= " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testGt above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestGt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 > value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " > " << expression2 << " was: " << value1 << " <= " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testGt above, but with comment set to 0. template <typename T1, typename T2> bool testGt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testGt(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_LEQ. // // Tests that the first value is less than or equal to the second // one. Returns true iff the test yields true. template <typename T1, typename T2> bool testLeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 <= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " <= " << expression2 << " was: " << value1 << " > " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testLeq above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestLeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 <= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " <= " << expression2 << " was: " << value1 << " > " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testLeq above, but with comment set to 0. template <typename T1, typename T2> bool testLeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testLeq(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_LT. // // Tests that the first value is greater than the second one. // Returns true iff the test yields true. template <typename T1, typename T2> bool testLt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 < value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " < " << expression2 << " was: " << value1 << " >= " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testLt above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestLt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 < value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " < " << expression2 << " was: " << value1 << " >= " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testLt above, but comment is 0. template <typename T1, typename T2> bool testLt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testLt(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT. // // Test that the given argument evaluates to true. template <typename T> bool testTrue(const char * file, int line, const T & value_, const char * expression_, const char * comment, ...) { if (!(value_)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be true but was " << (value_); if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testTrue above, but accepts a va_list instead of variadic // parameters. template <typename T> bool vtestTrue(const char * file, int line, const T & value_, const char * expression_, const char * comment, va_list argp) { if (!(value_)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be true but was " << (value_); if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testTrue above, but comment will automatically be set to 0. template <typename T> bool testTrue(const char * file, int line, const T & value_, const char * expression_) { return testTrue(file, line, value_, expression_, 0); } // Called by the macro SEQAN_ASSERT. // // Test that the given argument evaluates to false. template <typename T> bool testFalse(const char * file, int line, const T & value_, const char * expression_, const char * comment, ...) { if (value_) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be false but was " << (value_); if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testFalse above, but accepts a va_list instead of variadic // parameters. template <typename T> bool vtestFalse(const char * file, int line, const T & value_, const char * expression_, const char * comment, va_list argp) { if (value_) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be false but was " << (value_); if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testFalse above, but comment will automatically be set to 0. template <typename T> bool testFalse(const char * file, int line, const T & value_, const char * expression_) { return testFalse(file, line, value_, expression_, 0); } // Represents a check point in a file. struct CheckPoint { // Path to the file. const char * file; // Line in the file. unsigned int line; // Less-than comparator for check points. bool operator<(const CheckPoint & other) const { int c = strcmp(file, other.file); if (c < 0) return true; if (c == 0 && line < other.line) return true; return false; } }; // Wrapper for a set of check points. // TODO(holtgrew): Simply store the set? struct CheckPointStore { static::std::set<CheckPoint> & data() { static::std::set<CheckPoint> result; return result; } }; // Puts the given check point into the CheckPointStore's data. inline bool registerCheckPoint(unsigned int line, const char * file) { const char * file_name = strrchr(file, '/'); const char * file_name_2 = strrchr(file, '\\'); if (file_name_2 > file_name) file_name = file_name_2; if (!file_name) file_name = file; else ++file_name; CheckPoint cp = {file_name, line}; #ifdef _OMP #pragma omp critical #endif // #ifdef _OMP CheckPointStore::data().insert(cp); return true; } // Test whether the given check point exists in the check point // store. inline void testCheckPoint(const char * file, unsigned int line) { StaticData::totalCheckPointCount() += 1; CheckPoint cp = {file, line}; if (CheckPointStore::data().find(cp) == CheckPointStore::data().end()) { std::cerr << file << ":" << line << " -- Check point lost." << std::endl; return; } StaticData::foundCheckPointCount() += 1; } // Verify the check points for the given file. inline void verifyCheckPoints(const char * file) { char const * file_name = strrchr(file, '/'); char const * file_name_2 = strrchr(file, '\\'); if (file_name_2 > file_name) file_name = file_name_2; if (!file_name) file_name = file; else ++file_name; int len = strlen(StaticData::pathToRoot()) + strlen("/") + strlen(file) + 1; char * absolutePath = new char[len]; absolutePath[0] = '\0'; strcat(absolutePath, StaticData::pathToRoot()); strcat(absolutePath, "/"); strcat(absolutePath, file); FILE * fl = ::std::fopen(absolutePath, "r"); delete[] absolutePath; if (!fl) { std::cerr << file << " -- verifyCheckPoints could not find this file." << std::endl; } unsigned int line_number = 1; char buf[1 << 16]; while (::std::fgets(buf, sizeof(buf), fl)) { if (::std::strstr(buf, "SEQAN_CHECKPOINT")) { testCheckPoint(file_name, line_number); } ++line_number; } ::std::fclose(fl); } #if SEQAN_ENABLE_TESTING // If in testing mode then raise an AssertionFailedException. inline void fail() { StaticData::thisTestOk() = false; printStackTrace(20); throw AssertionFailedException(); } #else // If not in testing mode then quit with an abort. inline void fail() { printStackTrace(20); abort(); } #endif // #if SEQAN_ENABLE_TESTING } // namespace ClassTest /** .Macro.SEQAN_DEFINE_TEST ..summary:Expand to test definition. ..cat:Testing & Debugging ..signature:SEQAN_DEFINE_TEST(test_name) ..param.test_name:The name of the test. ..remarks:This macro expands to the definition of a $void$ function with $SEQAN_TEST_ + test_name$ as its name. ..example.code: SEQAN_DEFINE_TEST(test_name) { SEQAN_ASSERT_LT(0, 3); } ..see:Macro.SEQAN_SKIP_TEST ..see:Macro.SEQAN_CALL_TEST ..see:Macro.SEQAN_BEGIN_TESTSUITE ..see:Macro.SEQAN_END_TESTSUITE */ // This macro expands to function header for one test. #define SEQAN_DEFINE_TEST(test_name) \ template <bool speed_up_dummy_to_prevent_compilation_of_unused_tests_> \ void SEQAN_TEST_ ## test_name() /** .Macro.SEQAN_BEGIN_TESTSUITE ..summary:Expand to a test suite beginning. ..cat:Testing & Debugging ..signature:SEQAN_BEGIN_TESTSUITE(name) ..param.name:The name of the test suite. ..remarks:This macro expands to a $main()$ function and some initialization code that sets up the test system. ..example.code: #include <seqan/basic.h> SEQAN_BEGIN_TESTSUITE(test_foo) { SEQAN_CALL_TEST(test_foo_my_test); } SEQAN_END_TESTSUITE ..see:Macro.SEQAN_SKIP_TEST ..see:Macro.SEQAN_DEFINE_TEST ..see:Macro.SEQAN_CALL_TEST ..see:Macro.SEQAN_END_TESTSUITE */ #if SEQAN_ENABLE_TESTING // This macro expands to startup code for a test file. #define SEQAN_BEGIN_TESTSUITE(suite_name) \ int main(int argc, char ** argv) { \ (void) argc; \ ::seqan::ClassTest::beginTestSuite(# suite_name, argv[0]); /** .Macro.SEQAN_END_TESTSUITE ..summary:Expand to a test suite ending. ..cat:Testing & Debugging ..signature:SEQAN_END_TESTSUITE ..remarks:This macro expands to finalization code for a test suite. ..example.code: #include <seqan/basic.h> SEQAN_BEGIN_TESTSUITE(test_foo) { SEQAN_CALL_TEST(test_foo_my_test); } SEQAN_END_TESTSUITE ..see:Macro.SEQAN_SKIP_TEST ..see:Macro.SEQAN_DEFINE_TEST ..see:Macro.SEQAN_CALL_TEST ..see:Macro.SEQAN_BEGIN_TESTSUITE */ // This macro expands to shutdown code for a test file. #define SEQAN_END_TESTSUITE \ return ::seqan::ClassTest::endTestSuite(); \ } /** .Macro.SEQAN_CALL_TEST ..summary:Expand to calling a test. ..cat:Testing & Debugging ..signature:SEQAN_CALL_TEST(test_name) ..param.test_name:The name of the test. ..remarks:This expects the test to be defined with @Macro.SEQAN_DEFINE_TEST@. This macro will expand to code that calls the code inside a try/catch block. Use this macro within a test suite, only. ..example.code: // Within a test suite. SEQAN_CALL_TEST(test_name); ..see:Macro.SEQAN_SKIP_TEST ..see:Macro.SEQAN_DEFINE_TEST ..see:Macro.SEQAN_BEGIN_TESTSUITE ..see:Macro.SEQAN_END_TESTSUITE */ // This macro expands to code to call a given test. #define SEQAN_CALL_TEST(test_name) \ do { \ ::seqan::ClassTest::beginTest(# test_name); \ try { \ SEQAN_TEST_ ## test_name<true>(); \ } catch (::seqan::ClassTest::AssertionFailedException e) { \ /* Swallow exception, go on with next test. */ \ (void) e; /* Get rid of unused variable warning. */ \ } \ ::seqan::ClassTest::endTest(); \ } while (false) /** .Macro.SEQAN_SKIP_TEST ..cat:Testing & Debugging ..summary:Force the test to return without failing and mark it as skipped. ..signature:SEQAN_SKIP_TEST ..example.code: SEQAN_DEFINE_TEST(test_skipped) { SEQAN_SKIP_TEST; } ..see:Macro.SEQAN_DEFINE_TEST ..see:Macro.SEQAN_CALL_TEST ..see:Macro.SEQAN_BEGIN_TESTSUITE ..see:Macro.SEQAN_END_TESTSUITE */ // This macro returns from the current function and logs a "skipped" // event for the current test. #define SEQAN_SKIP_TEST \ do { \ ::seqan::ClassTest::skipCurrentTest(); \ return; \ } while (false) #endif // #if SEQAN_ENABLE_TESTING // variadic macros are not supported by VS 2003 and before #if !defined(_MSC_VER) || (_MSC_VER >= 1400) #if SEQAN_ENABLE_DEBUG /** .Macro.SEQAN_ASSERT ..cat:Assertions ..summary:Test that the given expression can be coerced to $true$. ..signature:SEQAN_ASSERT(expression) ..signature:SEQAN_ASSERT_MSG(expression, message[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT(0); // will fail SEQAN_ASSERT(1); // will run through SEQAN_ASSERT_MSG(0, "message %d", 2); // Will fail with message. ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_NOT ..cat:Assertions ..summary:Test that the given expression can be coerced to $false$. ..signature:SEQAN_ASSERT(expression) ..signature:SEQAN_ASSERT_MSG(expression, message[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_NOT(0); // will run through SEQAN_ASSERT_NOT(1); // will fail SEQAN_ASSERT_NOT_MSG(0, "msg %s", "test"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_EQ ..cat:Assertions ..summary:Test that two given expressions are equal, as defined by the matching call to the $operator=(,)$. ..signature:SEQAN_ASSERT_EQ(expression1, expression2) ..signature:SEQAN_ASSERT_EQ_MSG(expression1, expression2, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_EQ(0, false); // will run through SEQAN_ASSERT_EQ(1, false); // will fail SEQAN_ASSERT_EQ(1, "foo"); // will not compile SEQAN_ASSERT_EQ_MSG(1, false, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_NEQ ..cat:Assertions ..summary:Test that two given expressions are not equal, as defined by the matching call to the $operator!=(,)$. ..signature:SEQAN_ASSERT_NEQ(expression) ..signature:SEQAN_ASSERT_NEQ_MSG(expression, message[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_NEQ(0, false); // will fail SEQAN_ASSERT_NEQ(1, false); // will run through SEQAN_ASSERT_NEQ(1, "foo"); // will not compile SEQAN_ASSERT_NEQ_MSG(1, false, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_LT ..cat:Assertions ..summary:Test that the two given expressions are in the less-than relation as defined by the matching call to operator<(,). ..signature:SEQAN_ASSERT_LT(expression1, expression2) ..signature:SEQAN_ASSERT_LT(expression1, expression2, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_LT(0, 1); // will run through SEQAN_ASSERT_LT(1, 1); // will not run through SEQAN_ASSERT_LT_MSG(1, 1, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_LEQ ..cat:Assertions ..summary:Test that the two given expressions are in the less-than-or-equal relation as defined by the matching call to operator<=(,). ..signature:SEQAN_ASSERT_LEQ(expression1, expression2) ..signature:SEQAN_ASSERT_LEQ_MSG(expression1, expression2, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_LEQ(1, 1); // will run through SEQAN_ASSERT_LEQ(1, 2); // will not run through SEQAN_ASSERT_LEQ_MSG(1, 2, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_GT ..cat:Assertions ..summary:Test that the two given expressions are in the greather-than relation as defined by the matching call to operator>(,). ..signature:SEQAN_ASSERT_GT(expression1, expression2) ..signature:SEQAN_ASSERT_GT_MSG(expression1, expression2, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_GT(2, 1); // will run through SEQAN_ASSERT_GT(1, 1); // will not run through SEQAN_ASSERT_GT_MSG(1, 1, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_GEQ ..cat:Assertions ..summary:Test that the two given expressions are in the greater-than-or-equal relation as defined by the matching call to operator>=(,). ..signature:SEQAN_ASSERT_GEQ(expression1, expression2) ..signature:SEQAN_ASSERT_GEQ_MSG(expression1, expression2, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_GEQ(1, 1); // will run through SEQAN_ASSERT_GEQ(0, 1); // will not run through SEQAN_ASSERT_GEQ_MSG(0, 1, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_ASSERT_IN_DELTA ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL .Macro.SEQAN_ASSERT_IN_DELTA ..cat:Assertions ..summary:Test that the given expression can be coerced to $true$. ..signature:SEQAN_ASSERT_IN_DELTA(x, y, delta) ..signature:SEQAN_ASSERT_IN_DELTA_MSG(x, y, delta, comment[, parameters]) ..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call. ..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings. ..example.code: SEQAN_ASSERT_IN_DELTA(0, 0, 0.1); // will run through SEQAN_ASSERT_IN_DELTA(1, -2, 1); // will fail SEQAN_ASSERT_IN_DELTA(1, "foo"); // will not compile SEQAN_ASSERT_IN_DELTA_MSG(1, 0, 0.1, "msg"); // will fail with message ..see:Macro.SEQAN_ASSERT ..see:Macro.SEQAN_ASSERT_NOT ..see:Macro.SEQAN_ASSERT_EQ ..see:Macro.SEQAN_ASSERT_NEQ ..see:Macro.SEQAN_ASSERT_LEQ ..see:Macro.SEQAN_ASSERT_GEQ ..see:Macro.SEQAN_ASSERT_LT ..see:Macro.SEQAN_ASSERT_GT ..see:Macro.SEQAN_CHECK ..see:Macro.SEQAN_FAIL */ // Force a test failure. // // Usage: SEQAN_ASSERT_FAIL("Failed at position %d", pos); #define SEQAN_ASSERT_FAIL(...) \ do { \ ::seqan::ClassTest::forceFail(__FILE__, __LINE__, \ __VA_ARGS__); \ ::seqan::ClassTest::fail(); \ } while (false) // Equality assertion without a comment. // // Usage: SEQAN_ASSERT_EQ(4, 4); #define SEQAN_ASSERT_EQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Equality assertion with a comment. // // Usage: SEQAN_ASSERT_EQ(4, 4); #define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // In-delta-environment assertion without a comment. // // Usage: SEQAN_ASSERT_IN_DELTA(4.1, 4, 0.1); #define SEQAN_ASSERT_IN_DELTA(_arg1, _arg2, _arg3) \ do { \ if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ (_arg3), # _arg3)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // In-delta-environment assertion witha comment. // // Usage: SEQAN_ASSERT_IN_DELTA_MSG(4.1, 4, 0.1, "3.9 <= 4.1 <= 4.1"); #define SEQAN_ASSERT_IN_DELTA_MSG(_arg1, _arg2, _arg3, ...) \ do { \ if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ (_arg3), # _arg3, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Inequality assertion without a comment. // // Usage: SEQAN_ASSERT_NEQ(4, 5); #define SEQAN_ASSERT_NEQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Inequality assertion with a comment. // // Usage: SEQAN_ASSERT_NEQ(4, 5); #define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than-or-equal assertion without a comment. #define SEQAN_ASSERT_LEQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than-or-equal assertion with a comment. #define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than assertion without a comment. #define SEQAN_ASSERT_LT(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than assertion with a comment. #define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than-or-equal assertion without a comment. #define SEQAN_ASSERT_GEQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than-or-equal assertion with a comment. #define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than assertion without a comment. #define SEQAN_ASSERT_GT(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than assertion with a comment. #define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // TODO(holtgrew): Rename to SEQAN_ASSERT once that name is free.; // Trueness assertion with a comment. // // Usage: SEQAN_ASSERT(false); #define SEQAN_ASSERT(_arg1) \ do { \ if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \ (_arg1), # _arg1)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // TODO(holtgrew): Rename to SEQAN_ASSERT once that name is free.; // Trueness assertion with a comment. #define SEQAN_ASSERT_MSG(_arg1, ...) \ do { \ if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \ (_arg1), # _arg1, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Falseness assertion without a comment. // // Usage: SEQAN_ASSERT_NOT(false); #define SEQAN_ASSERT_NOT(_arg1) \ do { \ if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \ (_arg1), # _arg1)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Falseness assertion with a comment. #define SEQAN_ASSERT_NOT_MSG(_arg1, ...) \ do { \ if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \ (_arg1), # _arg1, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) #else // #if SEQAN_ENABLE_DEBUG #define SEQAN_ASSERT_EQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_NEQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_LEQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_LT(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_GEQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_GT(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT(_arg1) do {} while (false) #define SEQAN_ASSERT_MSG(_arg1, ...) do {} while (false) #define SEQAN_ASSERT_NOT(_arg1) do {} while (false) #define SEQAN_ASSERT_NOT_MSG(_arg1, ...) do {} while (false) #define SEQAN_ASSERT_FAIL(...) do {} while (false) #endif // #if SEQAN_ENABLE_DEBUG #else // no variadic macros #if SEQAN_ENABLE_DEBUG inline void SEQAN_ASSERT_FAIL(const char * comment, ...) { va_list args; va_start(args, comment); ::seqan::ClassTest::vforceFail("", 0, comment, args); ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3) { if (!::seqan::ClassTest::testInDelta("", 0, _arg1, "", _arg2, "", _arg3, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA_MSG(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestInDelta("", 0, _arg1, "", _arg2, "", _arg3, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_EQ(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testEqual("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_EQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestEqual("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_NEQ(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testNotEqual("", _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_NEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestNotEqual("", _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_LEQ(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testLeq("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_LEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestLeq("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_LT(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testLt("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_LT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestLt("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_GEQ(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testGeq("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_GEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestGeq("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_GT(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testGt("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_GT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestGt("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1> void SEQAN_ASSERT(T1 const & _arg1) { if (!::seqan::ClassTest::testTrue("", 0, _arg1, "")) ::seqan::ClassTest::fail(); } template <typename T1> void SEQAN_ASSERT_MSG(T1 const & _arg1, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestTrue("", 0, _arg1, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1> void SEQAN_ASSERT_NOT(T1 const & _arg1) { if (!::seqan::ClassTest::testFalse("", 0, _arg1, "")) ::seqan::ClassTest::fail(); } template <typename T1> void SEQAN_ASSERT_NOT_MSG(T1 const & _arg1, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestFalse("", 0, _arg1, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } #else // #if SEQAN_ENABLE_DEBUG inline void SEQAN_ASSERT_FAIL(const char * comment, ...) {} template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3) {} template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA_MSG(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_EQ(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_EQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_NEQ(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_NEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_LEQ(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_LEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_LT(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_LT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_GEQ(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_GEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_GT(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_GT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1> void SEQAN_ASSERT(T1 const & _arg1) {} template <typename T1> void SEQAN_ASSERT_MSG(T1 const & _arg1, const char * comment, ...) {} template <typename T1> void SEQAN_ASSERT_NOT(T1 const & _arg1) {} template <typename T1> void SEQAN_ASSERT_NOT_MSG(T1 const & _arg1, const char * comment, ...) {} #endif // #if SEQAN_ENABLE_DEBUG #endif // no variadic macros // Returns a string (of type char*) with the path to the called binary. // // Use this to locate files relative to the test binary. #define SEQAN_PROGRAM_PATH \ ::seqan::ClassTest::StaticData::basePath() // TODO(holtgrew): Subject to change wiht restructuring. /** .Macro.SEQAN_PATH_TO_ROOT ..cat:Testing & Debugging ..summary:Return path to the checkout root directory (i.e. containing core/extras). ..returns:$char const *$, string with the path to the parent directory of the tests directory. ..signature:SEQAN_PATH_TO_ROOT() ..remarks:The pointed to string is initialized on program startup by the code generated by @Macro.SEQAN_BEGIN_TESTSUITE@. ..example.code: const char *p = SEQAN_PATH_TO_ROOT); char buffer[1000]; strcpy(buffer, p); strcat(buffer, "/tests/files/example.txt"); FILE *f = fopen(buffer, "w"); fprintf(f, "Test Data"); fclose(f); ..see:Macro.SEQAN_TEMP_FILENAME */ // Returns a const char * string with the path to the projects directory. #define SEQAN_PATH_TO_ROOT() \ ::seqan::ClassTest::StaticData::pathToRoot() // Returns the POSIX int file handle to an open file. // TODO(holtgrewe): Uncomment if openTempFile has been implemented. // #define SEQAN_OPEN_TEMP_FILE() (::seqan::ClassTest::openTempFile()) /** .Macro.SEQAN_TEMP_FILENAME ..cat:Testing & Debugging ..summary:Generates the name to a temporary file. ..returns:$char const *$, string with the path to a temporary file. ..signature:SEQAN_TEMP_FILENAME() ..remarks:The pointed to string is stored in a buffer and is overwritten by the next call to this macro. Copy it out if you need it. ..example.code: const char *p = SEQAN_TEMP_FILENAME(); buffer char tempFilename[1000]; strcpy(tempFilename, p); FILE *f = fopen(tempFilename, "w"); fprintf(f, "Test Data"); fclose(f); ..see:Macro.SEQAN_PATH_TO_ROOT */ // Returns a temporary filename. #define SEQAN_TEMP_FILENAME() (::seqan::ClassTest::tempFileName()) /** .Macro.SEQAN_VERIFY_CHECKPOINTS ..cat:Testing & Debugging ..summary:Verify check points for the given file name. ..signature:SEQAN_VERIFY_CHECKPOINTS(path) ..param.path:Path to the file to verify check points for. Relative to parent directory of tests. ..example.code: SEQAN_VERIFY_CHECKPOINTS("core/include/seqan/basic_alphabet.h"); ..see:Macro.SEQAN_CHECKPOINT .Macro.SEQAN_CHECKPOINT ..cat:Testing & Debugging ..summary:Generate a check point. ..signature:SEQAN_CHECKPOINT ..remarks:Whever the code executes the instructions generated by this macro, the check point for this line will be set in global testing state. Use @Macro.SEQAN_VERIFY_CHECKPOINTS@ to verify whether all checkpoints have been reached in a file up to this point. SEQAN_CHECKPOINT; ..see:Macro.SEQAN_VERIFY_CHECKPOINTS */ #if SEQAN_ENABLE_CHECKPOINTS // Create a check point at the point where the macro is placed. // TODO(holtgrew): Should be called SEQAN_CHECK_POINT to be consistent. #define SEQAN_CHECKPOINT \ ::seqan::ClassTest::registerCheckPoint(__LINE__, __FILE__); // Call the check point verification code for the given file. #define SEQAN_VERIFY_CHECKPOINTS(filename) \ ::seqan::ClassTest::verifyCheckPoints(filename) #else // #if SEQAN_ENABLE_CHECKPOINTS #define SEQAN_CHECKPOINT // If checkpoints are to be verified if testing is disabled then print // a warning. #define SEQAN_VERIFY_CHECKPOINTS(filename) \ do { \ fprintf(stderr, ("WARNING: Check point verification is " \ "disabled. Trying to verify %s from %s:%d.\n"), \ filename, __FILE__, __LINE__); \ } while (false) #endif // #if SEQAN_ENABLE_CHECKPOINTS #if !SEQAN_ENABLE_TESTING #define SEQAN_BEGIN_TESTSUITE(suite_name) \ int main(int argc, char ** argv) { \ (void) argc; \ (void) argv; \ fprintf(stderr, "Warning: SEQAN_ENABLE_TESTING is wrong and you used the macro SEQAN_BEGIN_TESTSUITE!\n"); #define SEQAN_END_TESTSUITE \ return 0; \ } #define SEQAN_CALL_TEST(test_name) do { SEQAN_TEST_ ## test_name(); } while (false) #define SEQAN_SKIP_TEST do {} while (false) #endif // #if !SEQAN_ENABLE_TESTING } // namespace seqan #endif // SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_
GB_unop__identity_int32_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int32_int32) // op(A') function: GB (_unop_tran__identity_int32_int32) // C type: int32_t // A type: int32_t // cast: int32_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 1 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int32_int32) ( int32_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; int32_t z = aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; int32_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int32_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int32_int32) // op(A') function: GB (_unop_tran__identity_int32_int32) // C type: int32_t // A type: int32_t // cast: int32_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 1 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int32_int32) ( int32_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ; #else for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; int32_t z = aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; int32_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int32_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int32_int32) // op(A') function: GB (_unop_tran__identity_int32_int32) // C type: int32_t // A type: int32_t // cast: int32_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 1 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int32_int32) ( int32_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; int32_t z = aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; int32_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int32_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__lor_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lor_int32 // A.*B function (eWiseMult): GB_AemultB__lor_int32 // A*D function (colscale): GB_AxD__lor_int32 // D*A function (rowscale): GB_DxB__lor_int32 // C+=B function (dense accum): GB_Cdense_accumB__lor_int32 // C+=b function (dense accum): GB_Cdense_accumb__lor_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_int32 // C=scalar+B GB_bind1st__lor_int32 // C=scalar+B' GB_bind1st_tran__lor_int32 // C=A+scalar GB_bind2nd__lor_int32 // C=A'+scalar GB_bind2nd_tran__lor_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) || (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_INT32 || GxB_NO_LOR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lor_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lor_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lor_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lor_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lor_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lor_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lor_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lor_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lor_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__lor_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__lor_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lor_int32 // A.*B function (eWiseMult): GB_AemultB__lor_int32 // A*D function (colscale): GB_AxD__lor_int32 // D*A function (rowscale): GB_DxB__lor_int32 // C+=B function (dense accum): GB_Cdense_accumB__lor_int32 // C+=b function (dense accum): GB_Cdense_accumb__lor_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_int32 // C=scalar+B GB_bind1st__lor_int32 // C=scalar+B' GB_bind1st_tran__lor_int32 // C=A+scalar GB_bind2nd__lor_int32 // C=A'+scalar GB_bind2nd_tran__lor_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) || (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_INT32 || GxB_NO_LOR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lor_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lor_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lor_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lor_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lor_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lor_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lor_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lor_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lor_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__lor_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__lor_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lor_int32 // A.*B function (eWiseMult): GB_AemultB__lor_int32 // A*D function (colscale): GB_AxD__lor_int32 // D*A function (rowscale): GB_DxB__lor_int32 // C+=B function (dense accum): GB_Cdense_accumB__lor_int32 // C+=b function (dense accum): GB_Cdense_accumb__lor_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_int32 // C=scalar+B GB_bind1st__lor_int32 // C=scalar+B' GB_bind1st_tran__lor_int32 // C=A+scalar GB_bind2nd__lor_int32 // C=A'+scalar GB_bind2nd_tran__lor_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) || (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_INT32 || GxB_NO_LOR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lor_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lor_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lor_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lor_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lor_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lor_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lor_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lor_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lor_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__lor_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__lor_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp-unique-threadid.c
/***************************************************************************************** Example 1.1 : omp-unique-threadid.c Objective : Write a simple OpenMP program to print unique number for each thread started by the #pragma parallel. This example demonstrates the use of OpenMP PARALLEL Directive and omp_get_thread_num() call Input : Set the number of threads to use by means of the OMP_NUM_THREADS environment variable. For C shell use command : setenv OMP_NUM_THREADS 4 For bash shell use command : export OMP_NUM_THREADS=4 Output : Each thread prints its thread id. Created : Aug 2011 Author : RarchK ******************************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* Main Program */ main(int argc,char **argv) { int threadid,Noofthreads; printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Email : RarchK"); printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Objective : OpenMP program to print unique thread identifier for "); printf("\n\t\t each thread using OpenMP PARALLEL directive."); printf("\n\t\t..........................................................................\n"); printf("\n\n\t\t Master thread prints this before forking the team of thread \n"); /* Set the number of threads */ /*omp_set_num_threads(4);*/ /* OpenMP Parallel Construct : Fork a team of threads */ #pragma omp parallel private(threadid) { /* Obtain the thread id */ threadid = omp_get_thread_num(); /* Each Thread Prints Its Threadid */ printf("\n\t\t My thread id is : %d\n", threadid); } /* All thread join Master thread */ printf("\n\t\t Master thread prints this after the end parallel region \n \n"); }
/***************************************************************************************** Example 1.1 : omp-unique-threadid.c Objective : Write a simple OpenMP program to print unique number for each thread started by the #pragma parallel. This example demonstrates the use of OpenMP PARALLEL Directive and omp_get_thread_num() call Input : Set the number of threads to use by means of the OMP_NUM_THREADS environment variable. For C shell use command : setenv OMP_NUM_THREADS 4 For bash shell use command : export OMP_NUM_THREADS=4 Output : Each thread prints its thread id. Created : Aug 2011 Author : RarchK ******************************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* Main Program */ main(int argc, char **argv) { int threadid, Noofthreads; printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Email : RarchK"); printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Objective : OpenMP program to print unique thread identifier for "); printf("\n\t\t each thread using OpenMP PARALLEL directive."); printf("\n\t\t..........................................................................\n"); printf("\n\n\t\t Master thread prints this before forking the team of thread \n"); /* Set the number of threads */ /* omp_set_num_threads(4); */ /* OpenMP Parallel Construct : Fork a team of threads */ /* Obtain the thread id */ threadid = omp_get_thread_num(); /* Each Thread Prints Its Threadid */ printf("\n\t\t My thread id is : %d\n", threadid); /* All thread join Master thread */ printf("\n\t\t Master thread prints this after the end parallel region \n \n"); }
/***************************************************************************************** Example 1.1 : omp-unique-threadid.c Objective : Write a simple OpenMP program to print unique number for each thread started by the #pragma parallel. This example demonstrates the use of OpenMP PARALLEL Directive and omp_get_thread_num() call Input : Set the number of threads to use by means of the OMP_NUM_THREADS environment variable. For C shell use command : setenv OMP_NUM_THREADS 4 For bash shell use command : export OMP_NUM_THREADS=4 Output : Each thread prints its thread id. Created : Aug 2011 Author : RarchK ******************************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* Main Program */ main(int argc, char **argv) { int threadid, Noofthreads; printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Email : RarchK"); printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Objective : OpenMP program to print unique thread identifier for "); printf("\n\t\t each thread using OpenMP PARALLEL directive."); printf("\n\t\t..........................................................................\n"); printf("\n\n\t\t Master thread prints this before forking the team of thread \n"); /* Set the number of threads */ /* omp_set_num_threads(4); */ /* OpenMP Parallel Construct : Fork a team of threads */ #pragma omp parallel private(threadid) { /* Obtain the thread id */ threadid = omp_get_thread_num(); /* Each Thread Prints Its Threadid */ printf("\n\t\t My thread id is : %d\n", threadid); } /* All thread join Master thread */ printf("\n\t\t Master thread prints this after the end parallel region \n \n"); }
gol.h
#ifndef GoL_H #define GoL_H #include <stdlib.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> // Enable OpenMP support #endif #ifdef GoL_MPI #include <mpi.h> // Enable MPI support #endif // Custom includes #include "../../include/globals.h" #include "../../include/utils/log.h" #include "../../include/utils/func.h" #include "../../include/utils/parse.h" #include "../../include/life/init.h" /** * Swap the memory pointers between two 2D matrices. */ void swap_grids(bool ***old, bool ***new) { bool **temp = *old; *old = *new; *new = temp; } /** * Print to console the status of the current GoL board: the number of ALIVE and DEAD cells. */ void get_grid_status(life_t life) { int i, j; int ncols = life.ncols; int nrows = life.nrows; int n_alive = 0; int n_dead = 0; #ifdef _OPENMP #pragma omp parallel for private(j) \ reduction(+:n_alive, n_dead) #endif for (i = 0; i < nrows; i++) for (j = 0; j < ncols; j++) life.grid[i][j] == ALIVE ? n_alive++ : n_dead++; printf("Number of ALIVE cells: %d\n", n_alive); printf("Number of DEAD cells: %d\n\n", n_dead); fflush(stdout); usleep(320000); } #ifdef GoL_MPI #include "../../include/chunk/init.h" /** * Initialize all variables and structures required by a single GoL chunk. */ void initialize_chunk(chunk_t *chunk, life_t life, FILE *input_ptr, int from, int to) { srand(life.seed); // 1. Allocate memory for the chunk malloc_chunk(chunk); // 2. Initialize the chunk with DEAD cells init_empty_chunk(chunk); // 3. Initialize the chunk with ALIVE cells... if (input_ptr != NULL) { // ...from file, if present... init_chunk_from_file(chunk, life.nrows, life.ncols, input_ptr, from, to); } else { // ...or randomly, otherwise. init_random_chunk(chunk, life, from, to); } #ifdef GoL_DEBUG debug_chunk(*chunk); usleep(1000000); #endif } /** * Perform GoL evolution on a single chunk for a given amount of generations. * * @return tot_gene_time The total time devolved to GoL evolution */ double game_chunk(chunk_t *chunk, life_t life) { int i; MPI_Status status; int timesteps = life.timesteps; int tot_rows = life.nrows; char *outfile = life.outfile; bool big = is_big(life); struct timeval gstart, gend; double cur_gene_time = 0.0; double tot_gene_time = 0.0; display_chunk(chunk, big, tot_rows, outfile, false); /* * Only one process (rank 0) will be allowed to track evolution timings. * * TODO: Track the average evolution timings across all processes. */ for (i = 0; i < timesteps; i++) { MPI_Barrier(MPI_COMM_WORLD); if (chunk->rank == 0) // Track the start time gettimeofday(&gstart, NULL); // Evolve the current chunk evolve_chunk(chunk); // Identify top/bottom neighbours ranks int prev_rank = (chunk->rank - 1 + chunk->size) % chunk->size; int next_rank = (chunk->rank + 1) % chunk->size; // Share ghost rows with top/bottom neighbours MPI_Sendrecv(&chunk->slice[1][0], chunk->ncols, MPI_C_BOOL, prev_rank, TOP, &chunk->slice[chunk->nrows + 1][0], chunk->ncols, MPI_C_BOOL, next_rank, TOP, MPI_COMM_WORLD, &status); MPI_Sendrecv(&chunk->slice[chunk->nrows][0], chunk->ncols, MPI_C_BOOL, next_rank, BOTTOM, &chunk->slice[0][0], chunk->ncols, MPI_C_BOOL, prev_rank, BOTTOM, MPI_COMM_WORLD, &status); MPI_Barrier(MPI_COMM_WORLD); if (chunk->rank == 0) { // Track the end time gettimeofday(&gend, NULL); cur_gene_time = elapsed_wtime(gstart, gend); tot_gene_time += cur_gene_time; } if(big) { if (chunk->rank == 0) printf("Generation #%d took %.5f ms on process 0\n", i, cur_gene_time); // If the GoL grid is large, print it (to file) // only at the end of the last generation if (i == timesteps - 1) { display_chunk(chunk, big, tot_rows, outfile, true); } } else { display_chunk(chunk, big, tot_rows, outfile, true); } } if (chunk->rank == 0) printf("\nEvolved GoL's grid for %d generations - ETA: %.5f ms\n", timesteps, tot_gene_time); return tot_gene_time; } void evolve_chunk(chunk_t *chunk) { int x, y, i, j, r, c; int alive_neighbs; // # of alive neighbours int ncols = chunk->ncols; int nrows = chunk->nrows; // 1. Evolve every cell in the chunk #ifdef _OPENMP #pragma omp parallel for private(alive_neighbs, y, i, j, r, c) #endif for (x = 1; x < nrows + 1; x++) // Skip ghost rows: (1, ..., nrows + 1) for (y = 0; y < ncols; y++) { alive_neighbs = 0; // 1.a Check the 3x3 neighbourhood for (i = x - 1; i <= x + 1; i++) for (j = y - 1; j <= y + 1; j++) { /* Compute the actual row/col coordinates in the GoL chunk. */ c = (j + ncols) % ncols; if (!(i == x && j == y) // Skip the current cell (x, y) && chunk->slice[i][c] == ALIVE) alive_neighbs++; } // 1.b Apply GoL rules to determine the cell's next state chunk->next_slice[x][y] = (alive_neighbs == 3 || (alive_neighbs == 2 && chunk->slice[x][y] == ALIVE)) \ ? ALIVE : DEAD; } // 2. Replace the old grid with the updated one swap_grids(&chunk->slice, &chunk->next_slice); } void cleanup_chunk(chunk_t *chunk) { int i; free(chunk->slice); free(chunk->next_slice); } #endif #endif
#ifndef GoL_H #define GoL_H #include <stdlib.h> #include <unistd.h> #ifdef GoL_MPI #include <mpi.h> // Enable MPI support #endif // Custom includes #include "../../include/globals.h" #include "../../include/utils/log.h" #include "../../include/utils/func.h" #include "../../include/utils/parse.h" #include "../../include/life/init.h" /** * Swap the memory pointers between two 2D matrices. */ void swap_grids(bool ***old, bool ***new) { bool **temp = *old; *old = *new; *new = temp; } /** * Print to console the status of the current GoL board: the number of ALIVE and DEAD cells. */ void get_grid_status(life_t life) { int i, j; int ncols = life.ncols; int nrows = life.nrows; int n_alive = 0; int n_dead = 0; for (i = 0; i < nrows; i++) for (j = 0; j < ncols; j++) life.grid[i][j] == ALIVE ? n_alive++ : n_dead++; printf("Number of ALIVE cells: %d\n", n_alive); printf("Number of DEAD cells: %d\n\n", n_dead); fflush(stdout); usleep(320000); } #ifdef GoL_MPI #include "../../include/chunk/init.h" /** * Initialize all variables and structures required by a single GoL chunk. */ void initialize_chunk(chunk_t *chunk, life_t life, FILE *input_ptr, int from, int to) { srand(life.seed); // 1. Allocate memory for the chunk malloc_chunk(chunk); // 2. Initialize the chunk with DEAD cells init_empty_chunk(chunk); // 3. Initialize the chunk with ALIVE cells... if (input_ptr != NULL) { // ...from file, if present... init_chunk_from_file(chunk, life.nrows, life.ncols, input_ptr, from, to); } else { // ...or randomly, otherwise. init_random_chunk(chunk, life, from, to); } #ifdef GoL_DEBUG debug_chunk(*chunk); usleep(1000000); #endif } /** * Perform GoL evolution on a single chunk for a given amount of generations. * * @return tot_gene_time The total time devolved to GoL evolution */ double game_chunk(chunk_t *chunk, life_t life) { int i; MPI_Status status; int timesteps = life.timesteps; int tot_rows = life.nrows; char *outfile = life.outfile; bool big = is_big(life); struct timeval gstart, gend; double cur_gene_time = 0.0; double tot_gene_time = 0.0; display_chunk(chunk, big, tot_rows, outfile, false); /* * Only one process (rank 0) will be allowed to track evolution timings. * * TODO: Track the average evolution timings across all processes. */ for (i = 0; i < timesteps; i++) { MPI_Barrier(MPI_COMM_WORLD); if (chunk->rank == 0) // Track the start time gettimeofday(&gstart, NULL); // Evolve the current chunk evolve_chunk(chunk); // Identify top/bottom neighbours ranks int prev_rank = (chunk->rank - 1 + chunk->size) % chunk->size; int next_rank = (chunk->rank + 1) % chunk->size; // Share ghost rows with top/bottom neighbours MPI_Sendrecv(&chunk->slice[1][0], chunk->ncols, MPI_C_BOOL, prev_rank, TOP, &chunk->slice[chunk->nrows + 1][0], chunk->ncols, MPI_C_BOOL, next_rank, TOP, MPI_COMM_WORLD, &status); MPI_Sendrecv(&chunk->slice[chunk->nrows][0], chunk->ncols, MPI_C_BOOL, next_rank, BOTTOM, &chunk->slice[0][0], chunk->ncols, MPI_C_BOOL, prev_rank, BOTTOM, MPI_COMM_WORLD, &status); MPI_Barrier(MPI_COMM_WORLD); if (chunk->rank == 0) { // Track the end time gettimeofday(&gend, NULL); cur_gene_time = elapsed_wtime(gstart, gend); tot_gene_time += cur_gene_time; } if(big) { if (chunk->rank == 0) printf("Generation #%d took %.5f ms on process 0\n", i, cur_gene_time); // If the GoL grid is large, print it (to file) // only at the end of the last generation if (i == timesteps - 1) { display_chunk(chunk, big, tot_rows, outfile, true); } } else { display_chunk(chunk, big, tot_rows, outfile, true); } } if (chunk->rank == 0) printf("\nEvolved GoL's grid for %d generations - ETA: %.5f ms\n", timesteps, tot_gene_time); return tot_gene_time; } void evolve_chunk(chunk_t *chunk) { int x, y, i, j, r, c; int alive_neighbs; // # of alive neighbours int ncols = chunk->ncols; int nrows = chunk->nrows; // 1. Evolve every cell in the chunk for (x = 1; x < nrows + 1; x++) // Skip ghost rows: (1, ..., nrows + 1) for (y = 0; y < ncols; y++) { alive_neighbs = 0; // 1.a Check the 3x3 neighbourhood for (i = x - 1; i <= x + 1; i++) for (j = y - 1; j <= y + 1; j++) { /* Compute the actual row/col coordinates in the GoL chunk. */ c = (j + ncols) % ncols; if (!(i == x && j == y) // Skip the current cell (x, y) && chunk->slice[i][c] == ALIVE) alive_neighbs++; } // 1.b Apply GoL rules to determine the cell's next state chunk->next_slice[x][y] = (alive_neighbs == 3 || (alive_neighbs == 2 && chunk->slice[x][y] == ALIVE)) \ ? ALIVE : DEAD; } // 2. Replace the old grid with the updated one swap_grids(&chunk->slice, &chunk->next_slice); } void cleanup_chunk(chunk_t *chunk) { int i; free(chunk->slice); free(chunk->next_slice); } #endif #endif
#ifndef GoL_H #define GoL_H #include <stdlib.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> // Enable OpenMP support #endif #ifdef GoL_MPI #include <mpi.h> // Enable MPI support #endif // Custom includes #include "../../include/globals.h" #include "../../include/utils/log.h" #include "../../include/utils/func.h" #include "../../include/utils/parse.h" #include "../../include/life/init.h" /** * Swap the memory pointers between two 2D matrices. */ void swap_grids(bool ***old, bool ***new) { bool **temp = *old; *old = *new; *new = temp; } /** * Print to console the status of the current GoL board: the number of ALIVE and DEAD cells. */ void get_grid_status(life_t life) { int i, j; int ncols = life.ncols; int nrows = life.nrows; int n_alive = 0; int n_dead = 0; #ifdef _OPENMP #pragma omp parallel for private(j) \ reduction(+:n_alive, n_dead) #endif for (i = 0; i < nrows; i++) for (j = 0; j < ncols; j++) life.grid[i][j] == ALIVE ? n_alive++ : n_dead++; printf("Number of ALIVE cells: %d\n", n_alive); printf("Number of DEAD cells: %d\n\n", n_dead); fflush(stdout); usleep(320000); } #ifdef GoL_MPI #include "../../include/chunk/init.h" /** * Initialize all variables and structures required by a single GoL chunk. */ void initialize_chunk(chunk_t *chunk, life_t life, FILE *input_ptr, int from, int to) { srand(life.seed); // 1. Allocate memory for the chunk malloc_chunk(chunk); // 2. Initialize the chunk with DEAD cells init_empty_chunk(chunk); // 3. Initialize the chunk with ALIVE cells... if (input_ptr != NULL) { // ...from file, if present... init_chunk_from_file(chunk, life.nrows, life.ncols, input_ptr, from, to); } else { // ...or randomly, otherwise. init_random_chunk(chunk, life, from, to); } #ifdef GoL_DEBUG debug_chunk(*chunk); usleep(1000000); #endif } /** * Perform GoL evolution on a single chunk for a given amount of generations. * * @return tot_gene_time The total time devolved to GoL evolution */ double game_chunk(chunk_t *chunk, life_t life) { int i; MPI_Status status; int timesteps = life.timesteps; int tot_rows = life.nrows; char *outfile = life.outfile; bool big = is_big(life); struct timeval gstart, gend; double cur_gene_time = 0.0; double tot_gene_time = 0.0; display_chunk(chunk, big, tot_rows, outfile, false); /* * Only one process (rank 0) will be allowed to track evolution timings. * * TODO: Track the average evolution timings across all processes. */ for (i = 0; i < timesteps; i++) { MPI_Barrier(MPI_COMM_WORLD); if (chunk->rank == 0) // Track the start time gettimeofday(&gstart, NULL); // Evolve the current chunk evolve_chunk(chunk); // Identify top/bottom neighbours ranks int prev_rank = (chunk->rank - 1 + chunk->size) % chunk->size; int next_rank = (chunk->rank + 1) % chunk->size; // Share ghost rows with top/bottom neighbours MPI_Sendrecv(&chunk->slice[1][0], chunk->ncols, MPI_C_BOOL, prev_rank, TOP, &chunk->slice[chunk->nrows + 1][0], chunk->ncols, MPI_C_BOOL, next_rank, TOP, MPI_COMM_WORLD, &status); MPI_Sendrecv(&chunk->slice[chunk->nrows][0], chunk->ncols, MPI_C_BOOL, next_rank, BOTTOM, &chunk->slice[0][0], chunk->ncols, MPI_C_BOOL, prev_rank, BOTTOM, MPI_COMM_WORLD, &status); MPI_Barrier(MPI_COMM_WORLD); if (chunk->rank == 0) { // Track the end time gettimeofday(&gend, NULL); cur_gene_time = elapsed_wtime(gstart, gend); tot_gene_time += cur_gene_time; } if(big) { if (chunk->rank == 0) printf("Generation #%d took %.5f ms on process 0\n", i, cur_gene_time); // If the GoL grid is large, print it (to file) // only at the end of the last generation if (i == timesteps - 1) { display_chunk(chunk, big, tot_rows, outfile, true); } } else { display_chunk(chunk, big, tot_rows, outfile, true); } } if (chunk->rank == 0) printf("\nEvolved GoL's grid for %d generations - ETA: %.5f ms\n", timesteps, tot_gene_time); return tot_gene_time; } void evolve_chunk(chunk_t *chunk) { int x, y, i, j, r, c; int alive_neighbs; // # of alive neighbours int ncols = chunk->ncols; int nrows = chunk->nrows; // 1. Evolve every cell in the chunk #ifdef _OPENMP #pragma omp parallel for private(alive_neighbs, y, i, j, r, c) #endif for (x = 1; x < nrows + 1; x++) // Skip ghost rows: (1, ..., nrows + 1) for (y = 0; y < ncols; y++) { alive_neighbs = 0; // 1.a Check the 3x3 neighbourhood for (i = x - 1; i <= x + 1; i++) for (j = y - 1; j <= y + 1; j++) { /* Compute the actual row/col coordinates in the GoL chunk. */ c = (j + ncols) % ncols; if (!(i == x && j == y) // Skip the current cell (x, y) && chunk->slice[i][c] == ALIVE) alive_neighbs++; } // 1.b Apply GoL rules to determine the cell's next state chunk->next_slice[x][y] = (alive_neighbs == 3 || (alive_neighbs == 2 && chunk->slice[x][y] == ALIVE)) \ ? ALIVE : DEAD; } // 2. Replace the old grid with the updated one swap_grids(&chunk->slice, &chunk->next_slice); } void cleanup_chunk(chunk_t *chunk) { int i; free(chunk->slice); free(chunk->next_slice); } #endif #endif
omp_bug1.c
/****************************************************************************** * FILE: omp_bug1.c * DESCRIPTION: * This example attempts to show use of the parallel for construct. However * it will generate errors at compile time. Try to determine what is causing * the error. See omp_bug1fix.c for a corrected version. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define N 50 #define CHUNKSIZE 5 int main (int argc, char *argv[]) { int i, chunk, tid; float a[N], b[N], c[N]; /* Some initializations */ for (i=0; i < N; i++) a[i] = b[i] = i * 1.0; chunk = CHUNKSIZE; #pragma omp parallel for \ shared(a,b,c,chunk) \ private(i,tid) \ schedule(static,chunk) { tid = omp_get_thread_num(); for (i=0; i < N; i++) { c[i] = a[i] + b[i]; printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]); } } /* end of parallel for construct */ }
/****************************************************************************** * FILE: omp_bug1.c * DESCRIPTION: * This example attempts to show use of the parallel for construct. However * it will generate errors at compile time. Try to determine what is causing * the error. See omp_bug1fix.c for a corrected version. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define N 50 #define CHUNKSIZE 5 int main(int argc, char *argv[]) { int i, chunk, tid; float a[N], b[N], c[N]; /* Some initializations */ for (i = 0; i < N; i++) a[i] = b[i] = i * 1.0; chunk = CHUNKSIZE; shared(a, b, c, chunk) \ private(i, tid) \ schedule(static, chunk) { tid = omp_get_thread_num(); for (i = 0; i < N; i++) { c[i] = a[i] + b[i]; printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]); } } /* end of parallel for construct */ }
/****************************************************************************** * FILE: omp_bug1.c * DESCRIPTION: * This example attempts to show use of the parallel for construct. However * it will generate errors at compile time. Try to determine what is causing * the error. See omp_bug1fix.c for a corrected version. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define N 50 #define CHUNKSIZE 5 int main(int argc, char *argv[]) { int i, chunk, tid; float a[N], b[N], c[N]; /* Some initializations */ for (i = 0; i < N; i++) a[i] = b[i] = i * 1.0; chunk = CHUNKSIZE; #pragma omp parallel for \ shared(a,b,c,chunk) \ private(i,tid) \ schedule(static,chunk) { tid = omp_get_thread_num(); for (i = 0; i < N; i++) { c[i] = a[i] + b[i]; printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]); } } /* end of parallel for construct */ }